summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-03-31 22:51:36 -0700
committerVitaly Buka <vitalybuka@google.com>2024-03-31 22:51:36 -0700
commit9c3b69f8aecf03b5e111eaf984f4e1ba675e5198 (patch)
tree8d4d72ab0216b68b420a8e80311108cbbd1ebc50
parent3a8f27c68dd76ecd9299fa5d267b5c3623919337 (diff)
parent37d6e5b7a555e8c85c3e34803a710725c26857c7 (diff)
Created using spr 1.3.4 [skip ci]
-rwxr-xr-x.ci/monolithic-linux.sh6
-rwxr-xr-x.ci/monolithic-windows.sh6
-rw-r--r--.github/CODEOWNERS20
-rw-r--r--.github/workflows/issue-write.yml128
-rw-r--r--.github/workflows/pr-code-format.yml22
-rw-r--r--.github/workflows/scorecard.yml2
-rw-r--r--bolt/docs/BAT.md76
-rw-r--r--bolt/include/bolt/Core/AddressMap.h1
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h3
-rw-r--r--bolt/include/bolt/Core/BinaryData.h1
-rw-r--r--bolt/include/bolt/Core/BinaryDomTree.h1
-rw-r--r--bolt/include/bolt/Core/BinaryFunction.h11
-rw-r--r--bolt/include/bolt/Core/BinaryLoop.h2
-rw-r--r--bolt/include/bolt/Core/BinarySection.h1
-rw-r--r--bolt/include/bolt/Core/DebugData.h1
-rw-r--r--bolt/include/bolt/Core/DebugNames.h13
-rw-r--r--bolt/include/bolt/Core/FunctionLayout.h1
-rw-r--r--bolt/include/bolt/Core/MCPlus.h3
-rw-r--r--bolt/include/bolt/Core/MCPlusBuilder.h23
-rw-r--r--bolt/include/bolt/Passes/BinaryPasses.h1
-rw-r--r--bolt/include/bolt/Passes/CacheMetrics.h1
-rw-r--r--bolt/include/bolt/Passes/DominatorAnalysis.h1
-rw-r--r--bolt/include/bolt/Passes/ReachingDefOrUse.h2
-rw-r--r--bolt/include/bolt/Passes/ReachingInsns.h1
-rw-r--r--bolt/include/bolt/Passes/ReorderUtils.h1
-rw-r--r--bolt/include/bolt/Profile/BoltAddressTranslation.h125
-rw-r--r--bolt/include/bolt/Profile/DataAggregator.h7
-rw-r--r--bolt/include/bolt/Profile/ProfileReaderBase.h2
-rw-r--r--bolt/include/bolt/Profile/ProfileYAMLMapping.h1
-rw-r--r--bolt/include/bolt/Rewrite/DWARFRewriter.h2
-rw-r--r--bolt/include/bolt/Rewrite/MetadataManager.h1
-rw-r--r--bolt/include/bolt/Rewrite/RewriteInstance.h1
-rw-r--r--bolt/include/bolt/RuntimeLibs/RuntimeLibrary.h1
-rw-r--r--bolt/include/bolt/Utils/NameShortener.h1
-rw-r--r--bolt/lib/Core/BinaryContext.cpp49
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp37
-rw-r--r--bolt/lib/Core/DIEBuilder.cpp7
-rw-r--r--bolt/lib/Core/DebugData.cpp3
-rw-r--r--bolt/lib/Core/DebugNames.cpp109
-rw-r--r--bolt/lib/Core/FunctionLayout.cpp13
-rw-r--r--bolt/lib/Core/HashUtilities.cpp1
-rw-r--r--bolt/lib/Core/MCPlusBuilder.cpp37
-rw-r--r--bolt/lib/Passes/BinaryPasses.cpp20
-rw-r--r--bolt/lib/Passes/CMOVConversion.cpp1
-rw-r--r--bolt/lib/Passes/FixRISCVCallsPass.cpp8
-rw-r--r--bolt/lib/Passes/FixRelaxationPass.cpp8
-rw-r--r--bolt/lib/Passes/FrameOptimizer.cpp1
-rw-r--r--bolt/lib/Passes/Hugify.cpp1
-rw-r--r--bolt/lib/Passes/Inliner.cpp1
-rw-r--r--bolt/lib/Passes/ShrinkWrapping.cpp1
-rw-r--r--bolt/lib/Passes/SplitFunctions.cpp1
-rw-r--r--bolt/lib/Passes/TailDuplication.cpp2
-rw-r--r--bolt/lib/Passes/ValidateInternalCalls.cpp1
-rw-r--r--bolt/lib/Profile/BoltAddressTranslation.cpp177
-rw-r--r--bolt/lib/Profile/CMakeLists.txt1
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp198
-rw-r--r--bolt/lib/Profile/DataReader.cpp1
-rw-r--r--bolt/lib/Profile/Heatmap.cpp1
-rw-r--r--bolt/lib/Profile/YAMLProfileWriter.cpp40
-rw-r--r--bolt/lib/Rewrite/BinaryPassManager.cpp2
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp46
-rw-r--r--bolt/lib/Rewrite/JITLinkLinker.cpp4
-rw-r--r--bolt/lib/Rewrite/LinuxKernelRewriter.cpp504
-rw-r--r--bolt/lib/Rewrite/MachORewriteInstance.cpp35
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp51
-rw-r--r--bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp3
-rw-r--r--bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp3
-rw-r--r--bolt/lib/Target/X86/X86MCPlusBuilder.cpp20
-rw-r--r--bolt/lib/Utils/CommandLineOpts.cpp4
-rw-r--r--bolt/test/X86/Inputs/blarge_new.preagg.txt81
-rw-r--r--bolt/test/X86/Inputs/blarge_new.yaml1648
-rw-r--r--bolt/test/X86/Inputs/blarge_new_bat.preagg.txt79
-rw-r--r--bolt/test/X86/bolt-address-translation-yaml.test64
-rw-r--r--bolt/test/X86/bolt-address-translation.test2
-rw-r--r--bolt/test/X86/dwarf4-label-low-pc.s263
-rw-r--r--bolt/test/X86/dwarf5-debug-names-cross-cu.s712
-rw-r--r--bolt/test/X86/dwarf5-label-low-pc.s36
-rw-r--r--bolt/test/X86/linux-alt-instruction.s15
-rw-r--r--bolt/test/X86/linux-bug-table.s25
-rw-r--r--bolt/test/X86/linux-orc.s4
-rw-r--r--bolt/test/X86/linux-parainstructions.s2
-rw-r--r--bolt/test/X86/linux-static-keys.s67
-rw-r--r--bolt/test/X86/yaml-secondary-entry-discriminator.s74
-rw-r--r--bolt/tools/bat-dump/bat-dump.cpp11
-rw-r--r--bolt/tools/heatmap/heatmap.cpp12
-rw-r--r--bolt/unittests/Core/BinaryContext.cpp13
-rw-r--r--bolt/unittests/Core/MCPlusBuilder.cpp13
-rw-r--r--clang-tools-extra/clang-tidy/ClangTidy.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/IncDecInConditionsCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/google/IntegerTypesCheck.cpp41
-rw-r--r--clang-tools-extra/clang-tidy/google/IntegerTypesCheck.h3
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseUsingCheck.cpp24
-rw-r--r--clang-tools-extra/clang-tidy/readability/StaticDefinitionInAnonymousNamespaceCheck.h6
-rw-r--r--clang-tools-extra/clang-tidy/utils/IncludeSorter.cpp2
-rw-r--r--clang-tools-extra/clangd/ClangdLSPServer.cpp2
-rw-r--r--clang-tools-extra/clangd/InlayHints.cpp5
-rw-r--r--clang-tools-extra/clangd/Protocol.cpp31
-rw-r--r--clang-tools-extra/clangd/Protocol.h47
-rw-r--r--clang-tools-extra/clangd/support/Trace.h4
-rw-r--r--clang-tools-extra/clangd/test/inlayHints.test6
-rw-r--r--clang-tools-extra/clangd/tool/Check.cpp8
-rw-r--r--clang-tools-extra/clangd/unittests/InlayHintTests.cpp9
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst35
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/inc-dec-in-conditions.cpp10
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp43
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/static-definition-in-anonymous-namespace.cpp11
-rw-r--r--clang/CMakeLists.txt35
-rw-r--r--clang/cmake/caches/CrossWinToARMLinux.cmake42
-rw-r--r--clang/cmake/caches/HLSL.cmake2
-rw-r--r--clang/docs/ClangFormat.rst1
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst145
-rw-r--r--clang/docs/LanguageExtensions.rst116
-rw-r--r--clang/docs/ReleaseNotes.rst62
-rw-r--r--clang/docs/UsersManual.rst60
-rw-r--r--clang/docs/analyzer/checkers.rst218
-rw-r--r--clang/include/clang-c/Index.h1
-rw-r--r--clang/include/clang/AST/DeclBase.h5
-rw-r--r--clang/include/clang/AST/DeclContextInternals.h2
-rw-r--r--clang/include/clang/AST/FormatString.h2
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h1
-rw-r--r--clang/include/clang/AST/Type.h108
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h29
-rw-r--r--clang/include/clang/Analysis/PathDiagnostic.h8
-rw-r--r--clang/include/clang/Basic/Attr.td21
-rw-r--r--clang/include/clang/Basic/AttrDocs.td14
-rw-r--r--clang/include/clang/Basic/Builtins.td18
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def11
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticGroups.td8
-rw-r--r--clang/include/clang/Basic/DiagnosticInstallAPIKinds.td5
-rw-r--r--clang/include/clang/Basic/DiagnosticParseKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td29
-rw-r--r--clang/include/clang/Basic/Features.def1
-rw-r--r--clang/include/clang/Basic/LangStandard.h5
-rw-r--r--clang/include/clang/Basic/Specifiers.h43
-rw-r--r--clang/include/clang/Basic/SyncScope.h3
-rw-r--r--clang/include/clang/Basic/TargetInfo.h15
-rw-r--r--clang/include/clang/Driver/Options.td17
-rw-r--r--clang/include/clang/Driver/Types.def1
-rw-r--r--clang/include/clang/Format/Format.h17
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h36
-rw-r--r--clang/include/clang/Frontend/FrontendActions.h14
-rw-r--r--clang/include/clang/InstallAPI/DylibVerifier.h33
-rw-r--r--clang/include/clang/InstallAPI/HeaderFile.h66
-rw-r--r--clang/include/clang/InstallAPI/MachO.h2
-rw-r--r--clang/include/clang/Interpreter/Interpreter.h8
-rw-r--r--clang/include/clang/Interpreter/Value.h1
-rw-r--r--clang/include/clang/Lex/ModuleMap.h15
-rw-r--r--clang/include/clang/Lex/Preprocessor.h18
-rw-r--r--clang/include/clang/Lex/PreprocessorOptions.h35
-rw-r--r--clang/include/clang/Sema/Sema.h3
-rw-r--r--clang/include/clang/StaticAnalyzer/Checkers/Checkers.td12
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h9
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h12
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h8
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h72
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h16
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h20
-rw-r--r--clang/lib/APINotes/APINotesManager.cpp5
-rw-r--r--clang/lib/APINotes/APINotesWriter.cpp2
-rw-r--r--clang/lib/AST/APValue.cpp3
-rw-r--r--clang/lib/AST/ASTContext.cpp40
-rw-r--r--clang/lib/AST/Decl.cpp2
-rw-r--r--clang/lib/AST/DeclBase.cpp8
-rw-r--r--clang/lib/AST/ExprConstant.cpp78
-rw-r--r--clang/lib/AST/FormatString.cpp29
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp8
-rw-r--r--clang/lib/AST/Interp/ByteCodeStmtGen.h1
-rw-r--r--clang/lib/AST/Interp/EvaluationResult.cpp2
-rw-r--r--clang/lib/AST/Interp/Program.cpp2
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp1
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp2
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp9
-rw-r--r--clang/lib/AST/ScanfFormatString.cpp4
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp13
-rw-r--r--clang/lib/AST/Type.cpp33
-rw-r--r--clang/lib/AST/TypePrinter.cpp21
-rw-r--r--clang/lib/Analysis/CFG.cpp4
-rw-r--r--clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp2
-rw-r--r--clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp38
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp45
-rw-r--r--clang/lib/Analysis/FlowSensitive/Transfer.cpp61
-rw-r--r--clang/lib/Analysis/PathDiagnostic.cpp7
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp8
-rw-r--r--clang/lib/Basic/LangStandards.cpp3
-rw-r--r--clang/lib/Basic/TargetInfo.cpp1
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp7
-rw-r--r--clang/lib/Basic/Targets/AArch64.h1
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp6
-rw-r--r--clang/lib/Basic/Targets/ARM.h2
-rw-r--r--clang/lib/Basic/Targets/LoongArch.cpp2
-rw-r--r--clang/lib/Basic/Targets/LoongArch.h1
-rw-r--r--clang/lib/Basic/Targets/Mips.h2
-rw-r--r--clang/lib/Basic/Targets/PPC.h1
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp11
-rw-r--r--clang/lib/Basic/Targets/RISCV.h2
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h1
-rw-r--r--clang/lib/Basic/Targets/VE.h1
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h1
-rw-r--r--clang/lib/Basic/Targets/X86.h1
-rw-r--r--clang/lib/CodeGen/ABIInfo.cpp4
-rw-r--r--clang/lib/CodeGen/ABIInfoImpl.cpp14
-rw-r--r--clang/lib/CodeGen/Address.h195
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp53
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp34
-rw-r--r--clang/lib/CodeGen/CGBlocks.h3
-rw-r--r--clang/lib/CodeGen/CGBuilder.h234
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp387
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp46
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp21
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h14
-rw-r--r--clang/lib/CodeGen/CGCall.cpp186
-rw-r--r--clang/lib/CodeGen/CGCall.h1
-rw-r--r--clang/lib/CodeGen/CGClass.cpp76
-rw-r--r--clang/lib/CodeGen/CGCleanup.cpp110
-rw-r--r--clang/lib/CodeGen/CGCleanup.h2
-rw-r--r--clang/lib/CodeGen/CGCoroutine.cpp4
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp10
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp71
-rw-r--r--clang/lib/CodeGen/CGException.cpp19
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp248
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp29
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp121
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp2
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp12
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp23
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.h7
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp8
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp43
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp42
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp101
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp200
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h5
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp76
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp351
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp8
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp87
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp9
-rw-r--r--clang/lib/CodeGen/CGValue.h250
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp78
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h276
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp192
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp10
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.h6
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp2
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp52
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp58
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp2
-rw-r--r--clang/lib/CodeGen/TargetInfo.h5
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/LoongArch.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp13
-rw-r--r--clang/lib/CodeGen/Targets/RISCV.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/Sparc.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/SystemZ.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp7
-rw-r--r--clang/lib/CodeGen/Targets/XCore.cpp2
-rw-r--r--clang/lib/Driver/Driver.cpp49
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/HLSL.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp8
-rw-r--r--clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp1
-rw-r--r--clang/lib/Format/Format.cpp3
-rw-r--r--clang/lib/Format/FormatToken.h1
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp64
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp13
-rw-r--r--clang/lib/Format/WhitespaceManager.h3
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp46
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp32
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp10
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp8
-rw-r--r--clang/lib/Frontend/FrontendOptions.cpp1
-rw-r--r--clang/lib/Headers/avxintrin.h42
-rw-r--r--clang/lib/Headers/emmintrin.h166
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h161
-rw-r--r--clang/lib/Headers/mmintrin.h12
-rw-r--r--clang/lib/Headers/smmintrin.h4
-rw-r--r--clang/lib/Headers/xmmintrin.h142
-rw-r--r--clang/lib/InstallAPI/CMakeLists.txt1
-rw-r--r--clang/lib/InstallAPI/DylibVerifier.cpp220
-rw-r--r--clang/lib/InstallAPI/Frontend.cpp2
-rw-r--r--clang/lib/InstallAPI/HeaderFile.cpp51
-rw-r--r--clang/lib/InstallAPI/Visitor.cpp9
-rw-r--r--clang/lib/Interpreter/IncrementalExecutor.cpp33
-rw-r--r--clang/lib/Interpreter/IncrementalExecutor.h9
-rw-r--r--clang/lib/Interpreter/IncrementalParser.cpp18
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp26
-rw-r--r--clang/lib/Interpreter/Value.cpp10
-rw-r--r--clang/lib/Lex/ModuleMap.cpp66
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp12
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp8
-rw-r--r--clang/lib/Parse/ParseExpr.cpp2
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp9
-rw-r--r--clang/lib/Parse/ParseInit.cpp2
-rw-r--r--clang/lib/Sema/Sema.cpp16
-rw-r--r--clang/lib/Sema/SemaAPINotes.cpp41
-rw-r--r--clang/lib/Sema/SemaChecking.cpp193
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp5
-rw-r--r--clang/lib/Sema/SemaConcept.cpp16
-rw-r--r--clang/lib/Sema/SemaDecl.cpp223
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp7
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp11
-rw-r--r--clang/lib/Sema/SemaExpr.cpp5
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp2
-rw-r--r--clang/lib/Sema/SemaInit.cpp32
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp4
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp6
-rw-r--r--clang/lib/Sema/SemaOverload.cpp44
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp2
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp26
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp4
-rw-r--r--clang/lib/Sema/SemaType.cpp10
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp21
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp35
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp23
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp192
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporter.cpp36
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerContext.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp1
-rw-r--r--clang/lib/StaticAnalyzer/Core/MemRegion.cpp22
-rw-r--r--clang/lib/StaticAnalyzer/Core/ProgramState.cpp32
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp4
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp63
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp35
-rw-r--r--clang/test/APINotes/Inputs/APINotes/SomeOtherKit.apinotes8
-rw-r--r--clang/test/APINotes/Inputs/BrokenHeaders/APINotes.apinotes5
-rw-r--r--clang/test/APINotes/Inputs/BrokenHeaders/SomeBrokenLib.h6
-rw-r--r--clang/test/APINotes/Inputs/BrokenHeaders2/APINotes.apinotes7
-rw-r--r--clang/test/APINotes/Inputs/BrokenHeaders2/SomeBrokenLib.h6
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Headers/FrameworkWithActualPrivateModule.h1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.private.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.h2
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Headers/FrameworkWithWrongCase.h1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/PrivateHeaders/FrameworkWithWrongCase_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Headers/FrameworkWithWrongCasePrivate.h1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.private.modulemap1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/PrivateHeaders/FrameworkWithWrongCasePrivate_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Headers/LayeredKit.h11
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.apinotes9
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.h7
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SimpleKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit.apinotes74
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit_private.apinotes15
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Headers/SomeKitForNullAnnotation.h55
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.private.modulemap8
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module_private.modulemap8
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_Private.h16
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_PrivateForNullAnnotation.h17
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_private.apinotes15
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/APINotes/SomeOtherKit.apinotes8
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.apinotes8
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.h9
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit.h1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.private.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.apinotes4
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.h1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private_private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.apinotes156
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.h137
-rw-r--r--clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Modules/module.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/Headers/APINotes.apinotes18
-rw-r--r--clang/test/APINotes/Inputs/Headers/BrokenTypes.apinotes10
-rw-r--r--clang/test/APINotes/Inputs/Headers/BrokenTypes.h8
-rw-r--r--clang/test/APINotes/Inputs/Headers/ExportAs.apinotes5
-rw-r--r--clang/test/APINotes/Inputs/Headers/ExportAs.h1
-rw-r--r--clang/test/APINotes/Inputs/Headers/ExportAsCore.h1
-rw-r--r--clang/test/APINotes/Inputs/Headers/ExternCtx.apinotes15
-rw-r--r--clang/test/APINotes/Inputs/Headers/ExternCtx.h11
-rw-r--r--clang/test/APINotes/Inputs/Headers/HeaderLib.apinotes37
-rw-r--r--clang/test/APINotes/Inputs/Headers/HeaderLib.h19
-rw-r--r--clang/test/APINotes/Inputs/Headers/InstancetypeModule.apinotes10
-rw-r--r--clang/test/APINotes/Inputs/Headers/InstancetypeModule.h10
-rw-r--r--clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase.h1
-rw-r--r--clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate.h1
-rw-r--r--clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase_Private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Headers/Namespaces.apinotes53
-rw-r--r--clang/test/APINotes/Inputs/Headers/Namespaces.h39
-rw-r--r--clang/test/APINotes/Inputs/Headers/PrivateLib.apinotes4
-rw-r--r--clang/test/APINotes/Inputs/Headers/PrivateLib.h1
-rw-r--r--clang/test/APINotes/Inputs/Headers/PrivateLib_private.apinotes1
-rw-r--r--clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes9
-rw-r--r--clang/test/APINotes/Inputs/Headers/SwiftImportAs.h6
-rw-r--r--clang/test/APINotes/Inputs/Headers/Templates.apinotes5
-rw-r--r--clang/test/APINotes/Inputs/Headers/Templates.h9
-rw-r--r--clang/test/APINotes/Inputs/Headers/module.modulemap45
-rw-r--r--clang/test/APINotes/Inputs/Headers/module.private.modulemap5
-rw-r--r--clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.apinotes65
-rw-r--r--clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.h1
-rw-r--r--clang/test/APINotes/Inputs/yaml-reader-errors/module.modulemap3
-rw-r--r--clang/test/APINotes/availability.m48
-rw-r--r--clang/test/APINotes/broken_types.m19
-rw-r--r--clang/test/APINotes/case-for-private-apinotes-file.c22
-rw-r--r--clang/test/APINotes/export-as.c8
-rw-r--r--clang/test/APINotes/extern-context.cpp23
-rw-r--r--clang/test/APINotes/instancetype.m9
-rw-r--r--clang/test/APINotes/module-cache.m66
-rw-r--r--clang/test/APINotes/namespaces.cpp69
-rw-r--r--clang/test/APINotes/nullability.c21
-rw-r--r--clang/test/APINotes/nullability.m46
-rw-r--r--clang/test/APINotes/objc-forward-declarations.m12
-rw-r--r--clang/test/APINotes/objc_designated_inits.m17
-rw-r--r--clang/test/APINotes/properties.m42
-rw-r--r--clang/test/APINotes/retain-count-convention.m38
-rw-r--r--clang/test/APINotes/search-order.m25
-rw-r--r--clang/test/APINotes/swift-import-as.cpp16
-rw-r--r--clang/test/APINotes/templates.cpp9
-rw-r--r--clang/test/APINotes/top-level-private-modules.c8
-rw-r--r--clang/test/APINotes/types.m28
-rw-r--r--clang/test/APINotes/versioned-multi.c69
-rw-r--r--clang/test/APINotes/versioned.m187
-rw-r--r--clang/test/APINotes/yaml-convert-diags.c6
-rw-r--r--clang/test/APINotes/yaml-parse-diags.c6
-rw-r--r--clang/test/APINotes/yaml-reader-errors.m5
-rw-r--r--clang/test/AST/ast-dump-invalid.cpp9
-rw-r--r--clang/test/Analysis/ArrayDelete.cpp2
-rw-r--r--clang/test/Analysis/Inputs/system-header-simulator-cxx.h11
-rw-r--r--clang/test/Analysis/analyzer-display-progress.cpp42
-rw-r--r--clang/test/Analysis/analyzer-display-progress.m31
-rw-r--r--clang/test/Analysis/analyzer-note-analysis-entry-points.cpp75
-rw-r--r--clang/test/Analysis/cxx23-static-operator.cpp38
-rw-r--r--clang/test/Analysis/getline-cpp.cpp15
-rw-r--r--clang/test/Analysis/getline-unixapi.c322
-rw-r--r--clang/test/Analysis/inlining/false-positive-suppression.cpp17
-rw-r--r--clang/test/Analysis/stream.c73
-rw-r--r--clang/test/C/C11/n1282.c20
-rw-r--r--clang/test/C/C11/n1365.c60
-rw-r--r--clang/test/C/C2x/n2350.c10
-rw-r--r--clang/test/C/C99/Inputs/nested-include.h3
-rw-r--r--clang/test/C/C99/block-scopes.c34
-rw-r--r--clang/test/C/C99/digraphs.c90
-rw-r--r--clang/test/C/C99/n590.c390
-rw-r--r--clang/test/C/C99/n696.c22
-rw-r--r--clang/test/C/drs/dr0xx.c4
-rw-r--r--clang/test/C/drs/dr290.c20
-rw-r--r--clang/test/C/drs/dr4xx.c17
-rw-r--r--clang/test/C/drs/dr5xx.c2
-rw-r--r--clang/test/ClangScanDeps/modules-extension.c33
-rw-r--r--clang/test/ClangScanDeps/modules-extern-unrelated.m1
-rw-r--r--clang/test/CodeCompletion/member-access.cpp20
-rw-r--r--clang/test/CodeGen/CSKY/csky-abi.c16
-rw-r--r--clang/test/CodeGen/LoongArch/abi-lp64d.c4
-rw-r--r--clang/test/CodeGen/PowerPC/aix-altivec-vaargs.c4
-rw-r--r--clang/test/CodeGen/PowerPC/aix-vaargs.c14
-rw-r--r--clang/test/CodeGen/PowerPC/ppc64le-varargs-f128.c18
-rw-r--r--clang/test/CodeGen/RISCV/riscv-func-attr-target-err.c22
-rw-r--r--clang/test/CodeGen/RISCV/riscv-func-attr-target.c33
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c34
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp32
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv.c17
-rw-r--r--clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp35
-rw-r--r--clang/test/CodeGen/RISCV/riscv32-vararg.c40
-rw-r--r--clang/test/CodeGen/RISCV/riscv64-vararg.c16
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c4
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c12
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-error.c2
-rw-r--r--clang/test/CodeGen/WebAssembly/wasm-varargs.c16
-rw-r--r--clang/test/CodeGen/X86/va-arg-sse.c4
-rw-r--r--clang/test/CodeGen/X86/x86_64-vaarg.c69
-rw-r--r--clang/test/CodeGen/aapcs-align.cpp4
-rw-r--r--clang/test/CodeGen/aapcs-bitfield-access-unit.c231
-rw-r--r--clang/test/CodeGen/aapcs-bitfield.c1920
-rw-r--r--clang/test/CodeGen/aapcs64-align.cpp8
-rw-r--r--clang/test/CodeGen/aarch64-ABI-align-packed.c14
-rw-r--r--clang/test/CodeGen/aarch64-mixed-target-attributes.c278
-rw-r--r--clang/test/CodeGen/aarch64-varargs.c2
-rw-r--r--clang/test/CodeGen/arm-bitfield-alignment.c23
-rw-r--r--clang/test/CodeGen/arm-varargs.c2
-rw-r--r--clang/test/CodeGen/arm64-be-bitfield.c20
-rw-r--r--clang/test/CodeGen/attr-counted-by-debug-info.c18
-rw-r--r--clang/test/CodeGen/attr-counted-by.c26
-rw-r--r--clang/test/CodeGen/attr-target-clones-aarch64.c94
-rw-r--r--clang/test/CodeGen/attr-target-version.c368
-rw-r--r--clang/test/CodeGen/bitfield-2.c6
-rw-r--r--clang/test/CodeGen/bitfield-access-pad.c396
-rw-r--r--clang/test/CodeGen/bitfield-access-unit.c302
-rw-r--r--clang/test/CodeGen/builtins.c204
-rw-r--r--clang/test/CodeGen/cfi-check-attrs.c5
-rw-r--r--clang/test/CodeGen/cfi-check-fail.c2
-rw-r--r--clang/test/CodeGen/debug-info-bitfield-0-struct.c6
-rw-r--r--clang/test/CodeGen/debug-info-cc.c7
-rw-r--r--clang/test/CodeGen/flexible-array-init.c89
-rw-r--r--clang/test/CodeGen/flexible-array-init.cpp24
-rw-r--r--clang/test/CodeGen/hexagon-linux-vararg.c2
-rw-r--r--clang/test/CodeGen/mips-varargs.c16
-rw-r--r--clang/test/CodeGen/no-bitfield-type-align.c10
-rw-r--r--clang/test/CodeGen/pr53127.cpp4
-rw-r--r--clang/test/CodeGen/struct-x86-darwin.c69
-rw-r--r--clang/test/CodeGen/tbaa-struct.cpp2
-rw-r--r--clang/test/CodeGen/ubsan-builtin-checks.c6
-rw-r--r--clang/test/CodeGen/varargs-with-nonzero-default-address-space.c46
-rw-r--r--clang/test/CodeGen/xcore-abi.c2
-rw-r--r--clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp28
-rw-r--r--clang/test/CodeGenCXX/attr-target-version.cpp62
-rw-r--r--clang/test/CodeGenCXX/auto-var-init.cpp27
-rw-r--r--clang/test/CodeGenCXX/bitfield-access-empty.cpp150
-rw-r--r--clang/test/CodeGenCXX/bitfield-access-tail.cpp115
-rw-r--r--clang/test/CodeGenCXX/bitfield-ir.cpp101
-rw-r--r--clang/test/CodeGenCXX/bitfield.cpp101
-rw-r--r--clang/test/CodeGenCXX/ext-int.cpp12
-rw-r--r--clang/test/CodeGenCXX/ibm128-declarations.cpp4
-rw-r--r--clang/test/CodeGenCXX/mangle-ms-back-references.cpp13
-rw-r--r--clang/test/CodeGenCXX/x86_64-vaarg.cpp52
-rw-r--r--clang/test/CodeGenHLSL/builtins/RWBuffer-elementtype.hlsl2
-rw-r--r--clang/test/CodeGenHLSL/builtins/bitreverse.hlsl155
-rw-r--r--clang/test/CodeGenHLSL/builtins/ceil.hlsl13
-rw-r--r--clang/test/CodeGenHLSL/builtins/dot.hlsl28
-rw-r--r--clang/test/CodeGenHLSL/builtins/floor.hlsl13
-rw-r--r--clang/test/CodeGenHLSL/builtins/pow.hlsl13
-rw-r--r--clang/test/CodeGenHLSL/builtins/reversebits.hlsl75
-rw-r--r--clang/test/CodeGenHLSL/builtins/round.hlsl48
-rw-r--r--clang/test/CodeGenHLSL/builtins/sqrt.hlsl78
-rw-r--r--clang/test/CodeGenHLSL/builtins/wave_get_lane_index_do_while.hlsl40
-rw-r--r--clang/test/CodeGenHLSL/builtins/wave_get_lane_index_simple.hlsl14
-rw-r--r--clang/test/CodeGenHLSL/builtins/wave_get_lane_index_subcall.hlsl21
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-printf.cl9
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx11-err.cl19
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w32-err.cl9
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w64-err.cl9
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w32.cl38
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w64.cl38
-rw-r--r--clang/test/Driver/aarch64-ptrauth.c5
-rw-r--r--clang/test/Driver/aarch64-sve.c9
-rw-r--r--clang/test/Driver/clang-offload-bundler-asserts-on.c2
-rw-r--r--clang/test/Driver/clang-offload-bundler-standardize.c2
-rw-r--r--clang/test/Driver/clang-offload-bundler.c2
-rw-r--r--clang/test/Driver/darwin-ld-reexports.c21
-rw-r--r--clang/test/Driver/fat-archive-unbundle-ext.c2
-rw-r--r--clang/test/Driver/linker-wrapper-image.c8
-rw-r--r--clang/test/Driver/modules-print-library-module-manifest-path.cpp19
-rw-r--r--clang/test/Driver/msvc-link.c20
-rw-r--r--clang/test/Driver/riscv-profiles.c324
-rw-r--r--clang/test/Driver/toc-conf.c2
-rw-r--r--clang/test/Driver/unsupported-option-gpu.c1
-rw-r--r--clang/test/Format/fail-on-incomplete.cpp4
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI1.h1
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI2.h1
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Basic.h103
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/External.h19
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Simple.h45
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/SimpleAPI.h1
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivate.h5
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivateSPI.h2
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/Simple.yaml3196
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI.h3
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI2.h7
-rw-r--r--clang/test/InstallAPI/Inputs/Simple/SimpleInternalSPI.h5
-rw-r--r--clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/AAA.h3
-rw-r--r--clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/SpecialUmbrella.h1
-rw-r--r--clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/AAA_Private.h3
-rw-r--r--clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h1
-rw-r--r--clang/test/InstallAPI/diagnostics-cpp.test2
-rw-r--r--clang/test/InstallAPI/diagnostics-dsym.test39
-rw-r--r--clang/test/InstallAPI/extra-exclude-headers.test207
-rw-r--r--clang/test/InstallAPI/linker-symbols.test440
-rw-r--r--clang/test/InstallAPI/mismatching-objc-class-symbols.test269
-rw-r--r--clang/test/InstallAPI/symbol-flags.test290
-rw-r--r--clang/test/InstallAPI/umbrella-headers-unix.test40
-rw-r--r--clang/test/InstallAPI/umbrella-headers.test48
-rw-r--r--clang/test/Lexer/has_extension_cxx.cpp5
-rw-r--r--clang/test/Modules/codegen.test2
-rw-r--r--clang/test/Modules/no-local-decl-in-reduced-bmi.cppm33
-rw-r--r--clang/test/OpenMP/atomic_capture_codegen.cpp8
-rw-r--r--clang/test/OpenMP/atomic_read_codegen.c4
-rw-r--r--clang/test/OpenMP/atomic_update_codegen.cpp8
-rw-r--r--clang/test/OpenMP/atomic_write_codegen.c8
-rw-r--r--clang/test/OpenMP/bug54082.c4
-rw-r--r--clang/test/OpenMP/declare_reduction_messages.cpp14
-rw-r--r--clang/test/OpenMP/openmp_check.cpp2
-rw-r--r--clang/test/Options/enable_16bit_types_validation.hlsl25
-rw-r--r--clang/test/Options/enable_16bit_types_validation_spirv.hlsl14
-rw-r--r--clang/test/Parser/cxx03-lambda-extension.cpp5
-rw-r--r--clang/test/Parser/cxx0x-lambda-expressions.cpp116
-rw-r--r--clang/test/Parser/cxx2b-lambdas.cpp45
-rw-r--r--clang/test/Parser/objcxx-lambda-expressions-neg.mm9
-rw-r--r--clang/test/ParserHLSL/group_shared.hlsl4
-rw-r--r--clang/test/Preprocessor/aarch64-target-features.c2
-rw-r--r--clang/test/Sema/aarch64-sme-func-attrs.c8
-rw-r--r--clang/test/Sema/attr-target-clones-aarch64.c2
-rw-r--r--clang/test/Sema/attr-target-version.c8
-rw-r--r--clang/test/Sema/builtin-popcountg.c23
-rw-r--r--clang/test/Sema/constant-builtins-2.c116
-rw-r--r--clang/test/Sema/constant-builtins-all-args-evaluated.cpp34
-rw-r--r--clang/test/Sema/count-builtins.c87
-rw-r--r--clang/test/Sema/flexible-array-in-union.c187
-rw-r--r--clang/test/Sema/format-strings-signedness-fixit.c98
-rw-r--r--clang/test/Sema/format-strings-signedness.c222
-rw-r--r--clang/test/Sema/transparent-union.c4
-rw-r--r--clang/test/Sema/warn-cast-function-type-strict.c12
-rw-r--r--clang/test/SemaCXX/attr-target-version.cpp11
-rw-r--r--clang/test/SemaCXX/cxx20-ctad-type-alias.cpp12
-rw-r--r--clang/test/SemaCXX/cxx2a-template-lambdas.cpp26
-rw-r--r--clang/test/SemaCXX/lambda-expressions.cpp64
-rw-r--r--clang/test/SemaCXX/lambda-implicit-this-capture.cpp1
-rw-r--r--clang/test/SemaCXX/lambda-invalid-capture.cpp1
-rw-r--r--clang/test/SemaCXX/namespace-alias.cpp2
-rw-r--r--clang/test/SemaCXX/new-delete.cpp7
-rw-r--r--clang/test/SemaCXX/warn-cast-function-type-strict.cpp10
-rw-r--r--clang/test/SemaCXX/warn-exit-time-destructors.cpp10
-rw-r--r--clang/test/SemaHLSL/BuiltIns/half-float-only-errors.hlsl17
-rw-r--r--clang/test/SemaHLSL/BuiltIns/pow-errors.hlsl6
-rw-r--r--clang/test/SemaHLSL/BuiltIns/reversebits-errors.hlsl12
-rw-r--r--clang/test/SemaObjC/attr-objc-NSObject.m23
-rw-r--r--clang/test/SemaTemplate/concepts-GH86757.cpp13
-rw-r--r--clang/test/SemaTemplate/concepts-friends.cpp26
-rw-r--r--clang/test/SemaTemplate/concepts.cpp10
-rw-r--r--clang/test/SemaTemplate/ctad.cpp2
-rw-r--r--clang/test/SemaTemplate/deduction-guide.cpp9
-rw-r--r--clang/tools/clang-format/ClangFormat.cpp13
-rwxr-xr-xclang/tools/clang-format/clang-format-diff.py10
-rw-r--r--clang/tools/clang-installapi/ClangInstallAPI.cpp2
-rw-r--r--clang/tools/clang-installapi/InstallAPIOpts.td46
-rw-r--r--clang/tools/clang-installapi/Options.cpp202
-rw-r--r--clang/tools/clang-installapi/Options.h33
-rw-r--r--clang/tools/libclang/CXType.cpp1
-rw-r--r--clang/unittests/AST/DeclPrinterTest.cpp73
-rw-r--r--clang/unittests/AST/DeclTest.cpp16
-rw-r--r--clang/unittests/Analysis/FlowSensitive/DeterminismTest.cpp4
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TransferTest.cpp33
-rw-r--r--clang/unittests/Analysis/FlowSensitive/UncheckedOptionalAccessModelTest.cpp69
-rw-r--r--clang/unittests/Format/FormatTest.cpp77
-rw-r--r--clang/unittests/Format/FormatTestTableGen.cpp32
-rw-r--r--clang/unittests/Format/QualifierFixerTest.cpp36
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp16
-rw-r--r--clang/unittests/Interpreter/CMakeLists.txt1
-rw-r--r--clang/unittests/Interpreter/InterpreterExtensionsTest.cpp141
-rw-r--r--clang/unittests/Interpreter/InterpreterTest.cpp6
-rw-r--r--clang/unittests/Lex/PPDependencyDirectivesTest.cpp11
-rw-r--r--clang/unittests/StaticAnalyzer/CMakeLists.txt2
-rw-r--r--clang/unittests/StaticAnalyzer/IsCLibraryFunctionTest.cpp84
-rw-r--r--clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp145
-rw-r--r--clang/utils/TableGen/MveEmitter.cpp2
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp4
-rwxr-xr-xclang/utils/analyzer/exploded-graph-rewriter.py31
-rw-r--r--clang/www/analyzer/alpha_checks.html20
-rw-r--r--clang/www/analyzer/available_checks.html27
-rw-r--r--clang/www/c_dr_status.html4
-rw-r--r--clang/www/c_status.html29
-rwxr-xr-xclang/www/cxx_status.html42
-rw-r--r--cmake/Modules/GetDarwinLinkerVersion.cmake19
-rw-r--r--compiler-rt/CMakeLists.txt10
-rw-r--r--compiler-rt/cmake/Modules/CompilerRTCompile.cmake2
-rw-r--r--compiler-rt/cmake/config-ix.cmake2
-rw-r--r--compiler-rt/include/sanitizer/linux_syscall_hooks.h16
-rw-r--r--compiler-rt/lib/asan/CMakeLists.txt3
-rw-r--r--compiler-rt/lib/asan/asan_interceptors.cpp4
-rw-r--r--compiler-rt/lib/asan/tests/asan_noinst_test.cpp14
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors.cpp4
-rw-r--r--compiler-rt/lib/interception/interception_win.cpp11
-rw-r--r--compiler-rt/lib/msan/msan_interceptors.cpp2
-rw-r--r--compiler-rt/lib/msan/msan_linux.cpp4
-rw-r--r--compiler-rt/lib/msan/tests/CMakeLists.txt6
-rw-r--r--compiler-rt/lib/sanitizer_common/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc26
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h16
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h31
-rw-r--r--compiler-rt/lib/scudo/standalone/fuchsia.cpp7
-rw-r--r--compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp7
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h5
-rw-r--r--compiler-rt/lib/scudo/standalone/report_linux.cpp25
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h10
-rw-r--r--compiler-rt/lib/scudo/standalone/stack_depot.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.cpp148
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.h13
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/combined_test.cpp78
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/strings_test.cpp43
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/vector_test.cpp44
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h21
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp79
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp10
-rw-r--r--compiler-rt/lib/tsan/tests/CMakeLists.txt2
-rw-r--r--compiler-rt/lib/ubsan/CMakeLists.txt1
-rw-r--r--compiler-rt/test/lit.common.cfg.py4
-rw-r--r--compiler-rt/test/lit.common.configured.in1
-rw-r--r--compiler-rt/test/tsan/Linux/signal_in_futex_wait.cpp113
-rw-r--r--compiler-rt/test/tsan/signal_errno.cpp6
-rw-r--r--compiler-rt/test/tsan/signal_in_mutex_lock.cpp71
-rw-r--r--compiler-rt/test/tsan/signal_reset.cpp8
-rw-r--r--compiler-rt/test/tsan/signal_sync.cpp4
-rw-r--r--compiler-rt/test/tsan/signal_thread.cpp4
-rw-r--r--compiler-rt/test/tsan/signal_thread2.cpp4
-rw-r--r--flang/include/flang/Common/Version.h6
-rw-r--r--flang/include/flang/Common/api-attrs.h (renamed from flang/include/flang/Runtime/api-attrs.h)4
-rw-r--r--flang/include/flang/Common/idioms.h4
-rw-r--r--flang/include/flang/Common/optional.h2
-rw-r--r--flang/include/flang/Common/real.h3
-rw-r--r--flang/include/flang/Common/reference-wrapper.h2
-rw-r--r--flang/include/flang/Common/restorer.h11
-rw-r--r--flang/include/flang/Common/template.h2
-rw-r--r--flang/include/flang/Common/uint128.h3
-rw-r--r--flang/include/flang/Common/unwrap.h2
-rw-r--r--flang/include/flang/Common/variant.h30
-rw-r--r--flang/include/flang/Common/visit.h7
-rw-r--r--flang/include/flang/Decimal/binary-floating-point.h51
-rw-r--r--flang/include/flang/Decimal/decimal.h53
-rw-r--r--flang/include/flang/Frontend/CodeGenOptions.h6
-rw-r--r--flang/include/flang/Frontend/LangOptions.h6
-rw-r--r--flang/include/flang/ISO_Fortran_binding_wrapper.h4
-rw-r--r--flang/include/flang/Lower/ConvertVariable.h6
-rw-r--r--flang/include/flang/Lower/OpenMP.h12
-rw-r--r--flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h248
-rw-r--r--flang/include/flang/Optimizer/CodeGen/TypeConverter.h4
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRAttr.td16
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIROps.td25
-rw-r--r--flang/include/flang/Parser/dump-parse-tree.h1
-rw-r--r--flang/include/flang/Parser/parse-tree-visitor.h12
-rw-r--r--flang/include/flang/Parser/parse-tree.h6
-rw-r--r--flang/include/flang/Parser/tools.h1
-rw-r--r--flang/include/flang/Runtime/entry-names.h2
-rw-r--r--flang/include/flang/Runtime/io-api.h16
-rw-r--r--flang/include/flang/Runtime/iostat.h3
-rw-r--r--flang/include/flang/Runtime/memory.h15
-rw-r--r--flang/include/flang/Runtime/reduce.h257
-rw-r--r--flang/include/flang/Runtime/reduction.h2
-rw-r--r--flang/include/flang/Runtime/type-code.h1
-rw-r--r--flang/include/flang/Semantics/tools.h6
-rw-r--r--flang/lib/Evaluate/check-expression.cpp10
-rw-r--r--flang/lib/Evaluate/constant.cpp2
-rw-r--r--flang/lib/Lower/Bridge.cpp48
-rw-r--r--flang/lib/Lower/ConvertConstant.cpp22
-rw-r--r--flang/lib/Lower/ConvertExpr.cpp10
-rw-r--r--flang/lib/Lower/ConvertExprToHLFIR.cpp30
-rw-r--r--flang/lib/Lower/ConvertVariable.cpp48
-rw-r--r--flang/lib/Lower/HostAssociations.cpp9
-rw-r--r--flang/lib/Lower/OpenACC.cpp72
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.cpp243
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.h36
-rw-r--r--flang/lib/Lower/OpenMP/ClauseT.h714
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp788
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.h253
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.cpp8
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.h2
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp1101
-rw-r--r--flang/lib/Lower/OpenMP/ReductionProcessor.cpp6
-rw-r--r--flang/lib/Lower/OpenMP/Utils.cpp19
-rw-r--r--flang/lib/Lower/OpenMP/Utils.h3
-rw-r--r--flang/lib/Lower/PFTBuilder.cpp9
-rw-r--r--flang/lib/Optimizer/Builder/IntrinsicCall.cpp8
-rw-r--r--flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp34
-rw-r--r--flang/lib/Optimizer/CodeGen/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp486
-rw-r--r--flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp315
-rw-r--r--flang/lib/Optimizer/Dialect/FIRAttr.cpp3
-rw-r--r--flang/lib/Parser/Fortran-parsers.cpp31
-rw-r--r--flang/lib/Parser/tools.cpp4
-rw-r--r--flang/lib/Parser/unparse.cpp4
-rw-r--r--flang/lib/Semantics/check-call.cpp3
-rw-r--r--flang/lib/Semantics/check-declarations.cpp9
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp9
-rw-r--r--flang/lib/Semantics/resolve-names.cpp106
-rw-r--r--flang/lib/Semantics/tools.cpp25
-rw-r--r--flang/runtime/CMakeLists.txt29
-rw-r--r--flang/runtime/buffer.cpp6
-rw-r--r--flang/runtime/buffer.h42
-rw-r--r--flang/runtime/complex-reduction.c22
-rw-r--r--flang/runtime/complex-reduction.h45
-rw-r--r--flang/runtime/connection.cpp15
-rw-r--r--flang/runtime/connection.h29
-rw-r--r--flang/runtime/derived.h2
-rw-r--r--flang/runtime/descriptor-io.cpp7
-rw-r--r--flang/runtime/descriptor-io.h36
-rw-r--r--flang/runtime/edit-input.cpp90
-rw-r--r--flang/runtime/edit-input.h28
-rw-r--r--flang/runtime/edit-output.cpp84
-rw-r--r--flang/runtime/edit-output.h76
-rw-r--r--flang/runtime/emit-encoded.h7
-rw-r--r--flang/runtime/environment.h2
-rw-r--r--flang/runtime/external-unit.cpp336
-rw-r--r--flang/runtime/file.cpp22
-rw-r--r--flang/runtime/file.h12
-rw-r--r--flang/runtime/format-implementation.h16
-rw-r--r--flang/runtime/format.cpp2
-rw-r--r--flang/runtime/format.h39
-rw-r--r--flang/runtime/freestanding-tools.h27
-rw-r--r--flang/runtime/internal-unit.cpp31
-rw-r--r--flang/runtime/internal-unit.h26
-rw-r--r--flang/runtime/io-api.cpp62
-rw-r--r--flang/runtime/io-error.cpp24
-rw-r--r--flang/runtime/io-error.h36
-rw-r--r--flang/runtime/io-stmt.cpp36
-rw-r--r--flang/runtime/io-stmt.h412
-rw-r--r--flang/runtime/iostat.cpp4
-rw-r--r--flang/runtime/lock.h23
-rw-r--r--flang/runtime/memory.cpp12
-rw-r--r--flang/runtime/namelist.cpp15
-rw-r--r--flang/runtime/namelist.h3
-rw-r--r--flang/runtime/non-tbp-dio.h2
-rw-r--r--flang/runtime/numeric-templates.h9
-rw-r--r--flang/runtime/pointer.cpp2
-rw-r--r--flang/runtime/pseudo-unit.cpp169
-rw-r--r--flang/runtime/reduce.cpp526
-rw-r--r--flang/runtime/reduction-templates.h55
-rw-r--r--flang/runtime/stat.h2
-rw-r--r--flang/runtime/terminator.h4
-rw-r--r--flang/runtime/tools.cpp29
-rw-r--r--flang/runtime/tools.h37
-rw-r--r--flang/runtime/unit.cpp330
-rw-r--r--flang/runtime/unit.h206
-rw-r--r--flang/runtime/utf.cpp6
-rw-r--r--flang/runtime/utf.h14
-rw-r--r--flang/test/Fir/boxproc-2.fir7
-rw-r--r--flang/test/Lower/CUDA/cuda-data-transfer.cuf57
-rw-r--r--flang/test/Lower/HLFIR/cray-pointers.f90114
-rw-r--r--flang/test/Lower/HLFIR/procedure-pointer-component-structure-constructor.f9071
-rw-r--r--flang/test/Lower/OpenACC/acc-kernels-loop.f90131
-rw-r--r--flang/test/Lower/OpenACC/acc-loop.f902
-rw-r--r--flang/test/Lower/OpenACC/acc-parallel-loop.f90135
-rw-r--r--flang/test/Lower/OpenACC/acc-private.f9026
-rw-r--r--flang/test/Lower/OpenACC/acc-serial-loop.f90123
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-multi.f9081
-rw-r--r--flang/test/Lower/cray-pointer.f904
-rw-r--r--flang/test/Parser/unrecognized-dir.f904
-rw-r--r--flang/test/Semantics/OpenMP/do20.f9018
-rw-r--r--flang/test/Semantics/deferred01.f9028
-rw-r--r--flang/test/Semantics/init01.f902
-rw-r--r--flang/test/Semantics/resolve61.f909
-rw-r--r--flang/test/Semantics/resolve81.f901
-rw-r--r--flang/test/Semantics/structconst09.f9037
-rw-r--r--flang/test/Transforms/stack-arrays.fir9
-rw-r--r--flang/unittests/Runtime/Reduction.cpp37
-rw-r--r--libc/cmake/modules/LLVMLibCCompileOptionRules.cmake9
-rw-r--r--libc/cmake/modules/prepare_libc_gpu_build.cmake5
-rw-r--r--libc/config/config.json10
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt33
-rw-r--r--libc/config/linux/api.td4
-rw-r--r--libc/config/linux/arm/entrypoints.txt24
-rw-r--r--libc/config/linux/riscv/entrypoints.txt33
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt59
-rw-r--r--libc/config/windows/entrypoints.txt24
-rw-r--r--libc/docs/configure.rst3
-rw-r--r--libc/docs/dev/code_style.rst2
-rw-r--r--libc/docs/dev/printf_behavior.rst4
-rw-r--r--libc/docs/dev/undefined_behavior.rst8
-rw-r--r--libc/docs/gpu/rpc.rst15
-rw-r--r--libc/docs/math/index.rst511
-rw-r--r--libc/docs/stdio.rst2
-rw-r--r--libc/include/CMakeLists.txt9
-rw-r--r--libc/include/arpa/inet.h.def2
-rw-r--r--libc/include/assert.h.def2
-rw-r--r--libc/include/ctype.h.def2
-rw-r--r--libc/include/dirent.h.def2
-rw-r--r--libc/include/errno.h.def4
-rw-r--r--libc/include/fcntl.h.def4
-rw-r--r--libc/include/features.h.def4
-rw-r--r--libc/include/fenv.h.def4
-rw-r--r--libc/include/float.h.def2
-rw-r--r--libc/include/gpu/rpc.h.def4
-rw-r--r--libc/include/inttypes.h.def4
-rw-r--r--libc/include/limits.h.def2
-rw-r--r--libc/include/llvm-libc-macros/containerof-macro.h2
-rw-r--r--libc/include/llvm-libc-macros/math-macros.h47
-rw-r--r--libc/include/llvm-libc-macros/sys-queue-macros.h4
-rw-r--r--libc/include/llvm-libc-types/CMakeLists.txt10
-rw-r--r--libc/include/llvm-libc-types/__mutex_type.h2
-rw-r--r--libc/include/llvm-libc-types/cookie_io_functions_t.h6
-rw-r--r--libc/include/llvm-libc-types/fd_set.h2
-rw-r--r--libc/include/llvm-libc-types/fsblkcnt_t.h14
-rw-r--r--libc/include/llvm-libc-types/fsfilcnt_t.h14
-rw-r--r--libc/include/llvm-libc-types/mtx_t.h2
-rw-r--r--libc/include/llvm-libc-types/once_flag.h2
-rw-r--r--libc/include/llvm-libc-types/pthread_attr_t.h2
-rw-r--r--libc/include/llvm-libc-types/pthread_mutex_t.h2
-rw-r--r--libc/include/llvm-libc-types/pthread_once_t.h2
-rw-r--r--libc/include/llvm-libc-types/pthread_t.h2
-rw-r--r--libc/include/llvm-libc-types/siginfo_t.h8
-rw-r--r--libc/include/llvm-libc-types/sigset_t.h2
-rw-r--r--libc/include/llvm-libc-types/stack_t.h2
-rw-r--r--libc/include/llvm-libc-types/struct_dirent.h4
-rw-r--r--libc/include/llvm-libc-types/struct_epoll_event.h2
-rw-r--r--libc/include/llvm-libc-types/struct_rlimit.h2
-rw-r--r--libc/include/llvm-libc-types/struct_rusage.h2
-rw-r--r--libc/include/llvm-libc-types/struct_sched_param.h6
-rw-r--r--libc/include/llvm-libc-types/struct_sigaction.h4
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr.h2
-rw-r--r--libc/include/llvm-libc-types/struct_sockaddr_un.h2
-rw-r--r--libc/include/llvm-libc-types/struct_stat.h20
-rw-r--r--libc/include/llvm-libc-types/struct_statvfs.h29
-rw-r--r--libc/include/llvm-libc-types/struct_termios.h6
-rw-r--r--libc/include/llvm-libc-types/struct_timespec.h2
-rw-r--r--libc/include/llvm-libc-types/struct_timeval.h4
-rw-r--r--libc/include/llvm-libc-types/thrd_t.h2
-rw-r--r--libc/include/math.h.def6
-rw-r--r--libc/include/pthread.h.def2
-rw-r--r--libc/include/sched.h.def4
-rw-r--r--libc/include/search.h.def2
-rw-r--r--libc/include/setjmp.h.def2
-rw-r--r--libc/include/signal.h.def4
-rw-r--r--libc/include/spawn.h.def2
-rw-r--r--libc/include/stdbit.h.def4
-rw-r--r--libc/include/stdckdint.h.def4
-rw-r--r--libc/include/stdfix.h.def4
-rw-r--r--libc/include/stdint.h.def2
-rw-r--r--libc/include/stdio.h.def6
-rw-r--r--libc/include/stdlib.h.def4
-rw-r--r--libc/include/string.h.def4
-rw-r--r--libc/include/strings.h.def2
-rw-r--r--libc/include/sys/auxv.h.def4
-rw-r--r--libc/include/sys/epoll.h.def2
-rw-r--r--libc/include/sys/ioctl.h.def4
-rw-r--r--libc/include/sys/mman.h.def4
-rw-r--r--libc/include/sys/prctl.h.def2
-rw-r--r--libc/include/sys/queue.h2
-rw-r--r--libc/include/sys/random.h.def4
-rw-r--r--libc/include/sys/resource.h.def4
-rw-r--r--libc/include/sys/select.h.def4
-rw-r--r--libc/include/sys/sendfile.h.def2
-rw-r--r--libc/include/sys/socket.h.def4
-rw-r--r--libc/include/sys/stat.h.def4
-rw-r--r--libc/include/sys/statvfs.h.def16
-rw-r--r--libc/include/sys/time.h.def6
-rw-r--r--libc/include/sys/types.h.def2
-rw-r--r--libc/include/sys/utsname.h.def2
-rw-r--r--libc/include/sys/wait.h.def4
-rw-r--r--libc/include/termios.h.def4
-rw-r--r--libc/include/threads.h.def2
-rw-r--r--libc/include/time.h.def4
-rw-r--r--libc/include/uchar.h.def2
-rw-r--r--libc/include/unistd.h.def6
-rw-r--r--libc/include/wchar.h.def4
-rw-r--r--libc/spec/posix.td30
-rw-r--r--libc/spec/stdc.td78
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt7
-rw-r--r--libc/src/__support/CPP/atomic.h74
-rw-r--r--libc/src/__support/CPP/bit.h25
-rw-r--r--libc/src/__support/CPP/iterator.h35
-rw-r--r--libc/src/__support/CPP/type_traits.h1
-rw-r--r--libc/src/__support/CPP/type_traits/add_pointer.h1
-rw-r--r--libc/src/__support/CPP/type_traits/decay.h1
-rw-r--r--libc/src/__support/CPP/type_traits/is_constant_evaluated.h21
-rw-r--r--libc/src/__support/CPP/type_traits/is_destructible.h3
-rw-r--r--libc/src/__support/CPP/type_traits/is_function.h3
-rw-r--r--libc/src/__support/CPP/type_traits/is_lvalue_reference.h3
-rw-r--r--libc/src/__support/CPP/type_traits/is_reference.h3
-rw-r--r--libc/src/__support/CPP/type_traits/is_rvalue_reference.h3
-rw-r--r--libc/src/__support/CPP/type_traits/is_trivially_copyable.h1
-rw-r--r--libc/src/__support/CPP/type_traits/is_trivially_destructible.h5
-rw-r--r--libc/src/__support/CPP/type_traits/remove_all_extents.h3
-rw-r--r--libc/src/__support/FPUtil/BasicOperations.h191
-rw-r--r--libc/src/__support/FPUtil/CMakeLists.txt4
-rw-r--r--libc/src/__support/FPUtil/FEnvImpl.h1
-rw-r--r--libc/src/__support/FPUtil/NearestIntegerOperations.h136
-rw-r--r--libc/src/__support/FPUtil/gpu/FMA.h8
-rw-r--r--libc/src/__support/File/file.h4
-rw-r--r--libc/src/__support/OSUtil/CMakeLists.txt27
-rw-r--r--libc/src/__support/OSUtil/baremetal/CMakeLists.txt6
-rw-r--r--libc/src/__support/OSUtil/baremetal/io.cpp22
-rw-r--r--libc/src/__support/OSUtil/baremetal/io.h7
-rw-r--r--libc/src/__support/OSUtil/baremetal/quick_exit.cpp (renamed from libc/src/__support/OSUtil/baremetal/quick_exit.h)13
-rw-r--r--libc/src/__support/OSUtil/darwin/CMakeLists.txt1
-rw-r--r--libc/src/__support/OSUtil/darwin/quick_exit.h26
-rw-r--r--libc/src/__support/OSUtil/gpu/CMakeLists.txt1
-rw-r--r--libc/src/__support/OSUtil/gpu/quick_exit.cpp9
-rw-r--r--libc/src/__support/OSUtil/linux/CMakeLists.txt5
-rw-r--r--libc/src/__support/OSUtil/linux/quick_exit.cpp (renamed from libc/src/__support/OSUtil/linux/quick_exit.h)11
-rw-r--r--libc/src/__support/OSUtil/quick_exit.h15
-rw-r--r--libc/src/__support/UInt.h54
-rw-r--r--libc/src/__support/char_vector.h8
-rw-r--r--libc/src/__support/fixedvector.h8
-rw-r--r--libc/src/__support/macros/config.h26
-rw-r--r--libc/src/__support/macros/optimization.h1
-rw-r--r--libc/src/__support/macros/sanitizer.h3
-rw-r--r--libc/src/__support/math_extras.h9
-rw-r--r--libc/src/__support/memory_size.h2
-rw-r--r--libc/src/math/CMakeLists.txt65
-rw-r--r--libc/src/math/canonicalize.h18
-rw-r--r--libc/src/math/canonicalizef.h18
-rw-r--r--libc/src/math/canonicalizef128.h20
-rw-r--r--libc/src/math/canonicalizel.h18
-rw-r--r--libc/src/math/docs/add_math_function.md2
-rw-r--r--libc/src/math/fmaximum.h19
-rw-r--r--libc/src/math/fmaximum_mag.h19
-rw-r--r--libc/src/math/fmaximum_mag_num.h (renamed from libc/src/__support/OSUtil/gpu/quick_exit.h)10
-rw-r--r--libc/src/math/fmaximum_mag_numf.h19
-rw-r--r--libc/src/math/fmaximum_mag_numf128.h21
-rw-r--r--libc/src/math/fmaximum_mag_numl.h19
-rw-r--r--libc/src/math/fmaximum_magf.h19
-rw-r--r--libc/src/math/fmaximum_magf128.h21
-rw-r--r--libc/src/math/fmaximum_magl.h19
-rw-r--r--libc/src/math/fmaximum_num.h19
-rw-r--r--libc/src/math/fmaximum_numf.h19
-rw-r--r--libc/src/math/fmaximum_numf128.h21
-rw-r--r--libc/src/math/fmaximum_numl.h19
-rw-r--r--libc/src/math/fmaximumf.h19
-rw-r--r--libc/src/math/fmaximumf128.h21
-rw-r--r--libc/src/math/fmaximuml.h19
-rw-r--r--libc/src/math/fminimum.h19
-rw-r--r--libc/src/math/fminimum_mag.h19
-rw-r--r--libc/src/math/fminimum_mag_num.h19
-rw-r--r--libc/src/math/fminimum_mag_numf.h19
-rw-r--r--libc/src/math/fminimum_mag_numf128.h21
-rw-r--r--libc/src/math/fminimum_mag_numl.h19
-rw-r--r--libc/src/math/fminimum_magf.h19
-rw-r--r--libc/src/math/fminimum_magf128.h21
-rw-r--r--libc/src/math/fminimum_magl.h19
-rw-r--r--libc/src/math/fminimum_num.h19
-rw-r--r--libc/src/math/fminimum_numf.h19
-rw-r--r--libc/src/math/fminimum_numf128.h21
-rw-r--r--libc/src/math/fminimum_numl.h19
-rw-r--r--libc/src/math/fminimumf.h19
-rw-r--r--libc/src/math/fminimumf128.h21
-rw-r--r--libc/src/math/fminimuml.h19
-rw-r--r--libc/src/math/fromfp.h18
-rw-r--r--libc/src/math/fromfpf.h18
-rw-r--r--libc/src/math/fromfpf128.h20
-rw-r--r--libc/src/math/fromfpl.h18
-rw-r--r--libc/src/math/fromfpx.h18
-rw-r--r--libc/src/math/fromfpxf.h18
-rw-r--r--libc/src/math/fromfpxf128.h20
-rw-r--r--libc/src/math/fromfpxl.h18
-rw-r--r--libc/src/math/generic/CMakeLists.txt639
-rw-r--r--libc/src/math/generic/canonicalize.cpp19
-rw-r--r--libc/src/math/generic/canonicalizef.cpp19
-rw-r--r--libc/src/math/generic/canonicalizef128.cpp19
-rw-r--r--libc/src/math/generic/canonicalizel.cpp20
-rw-r--r--libc/src/math/generic/fmaximum.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_mag.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_mag_num.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_mag_numf.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_mag_numf128.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_mag_numl.cpp20
-rw-r--r--libc/src/math/generic/fmaximum_magf.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_magf128.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_magl.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_num.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_numf.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_numf128.cpp19
-rw-r--r--libc/src/math/generic/fmaximum_numl.cpp19
-rw-r--r--libc/src/math/generic/fmaximumf.cpp19
-rw-r--r--libc/src/math/generic/fmaximumf128.cpp19
-rw-r--r--libc/src/math/generic/fmaximuml.cpp19
-rw-r--r--libc/src/math/generic/fminimum.cpp19
-rw-r--r--libc/src/math/generic/fminimum_mag.cpp19
-rw-r--r--libc/src/math/generic/fminimum_mag_num.cpp19
-rw-r--r--libc/src/math/generic/fminimum_mag_numf.cpp19
-rw-r--r--libc/src/math/generic/fminimum_mag_numf128.cpp19
-rw-r--r--libc/src/math/generic/fminimum_mag_numl.cpp20
-rw-r--r--libc/src/math/generic/fminimum_magf.cpp19
-rw-r--r--libc/src/math/generic/fminimum_magf128.cpp19
-rw-r--r--libc/src/math/generic/fminimum_magl.cpp19
-rw-r--r--libc/src/math/generic/fminimum_num.cpp19
-rw-r--r--libc/src/math/generic/fminimum_numf.cpp19
-rw-r--r--libc/src/math/generic/fminimum_numf128.cpp19
-rw-r--r--libc/src/math/generic/fminimum_numl.cpp19
-rw-r--r--libc/src/math/generic/fminimumf.cpp19
-rw-r--r--libc/src/math/generic/fminimumf128.cpp19
-rw-r--r--libc/src/math/generic/fminimuml.cpp19
-rw-r--r--libc/src/math/generic/fromfp.cpp19
-rw-r--r--libc/src/math/generic/fromfpf.cpp19
-rw-r--r--libc/src/math/generic/fromfpf128.cpp20
-rw-r--r--libc/src/math/generic/fromfpl.cpp20
-rw-r--r--libc/src/math/generic/fromfpx.cpp19
-rw-r--r--libc/src/math/generic/fromfpxf.cpp19
-rw-r--r--libc/src/math/generic/fromfpxf128.cpp20
-rw-r--r--libc/src/math/generic/fromfpxl.cpp20
-rw-r--r--libc/src/math/generic/ufromfp.cpp19
-rw-r--r--libc/src/math/generic/ufromfpf.cpp19
-rw-r--r--libc/src/math/generic/ufromfpf128.cpp20
-rw-r--r--libc/src/math/generic/ufromfpl.cpp20
-rw-r--r--libc/src/math/generic/ufromfpx.cpp19
-rw-r--r--libc/src/math/generic/ufromfpxf.cpp19
-rw-r--r--libc/src/math/generic/ufromfpxf128.cpp20
-rw-r--r--libc/src/math/generic/ufromfpxl.cpp20
-rw-r--r--libc/src/math/ufromfp.h18
-rw-r--r--libc/src/math/ufromfpf.h18
-rw-r--r--libc/src/math/ufromfpf128.h20
-rw-r--r--libc/src/math/ufromfpl.h18
-rw-r--r--libc/src/math/ufromfpx.h18
-rw-r--r--libc/src/math/ufromfpxf.h18
-rw-r--r--libc/src/math/ufromfpxf128.h20
-rw-r--r--libc/src/math/ufromfpxl.h18
-rw-r--r--libc/src/stdio/CMakeLists.txt7
-rw-r--r--libc/src/stdio/linux/CMakeLists.txt12
-rw-r--r--libc/src/stdio/linux/rename.cpp28
-rw-r--r--libc/src/stdio/printf_core/converter_utils.h16
-rw-r--r--libc/src/stdio/printf_core/core_structs.h9
-rw-r--r--libc/src/stdio/printf_core/int_converter.h5
-rw-r--r--libc/src/stdio/printf_core/parser.h69
-rw-r--r--libc/src/stdio/printf_core/write_int_converter.h2
-rw-r--r--libc/src/stdio/rename.h18
-rw-r--r--libc/src/stdlib/CMakeLists.txt25
-rw-r--r--libc/src/stdlib/_Exit.cpp3
-rw-r--r--libc/src/stdlib/atexit.cpp31
-rw-r--r--libc/src/stdlib/exit.cpp11
-rw-r--r--libc/src/stdlib/str_from_util.h2
-rw-r--r--libc/src/stdlib/strfromd.cpp39
-rw-r--r--libc/src/stdlib/strfromd.h21
-rw-r--r--libc/src/stdlib/strfromf.cpp3
-rw-r--r--libc/src/stdlib/strfromf.h2
-rw-r--r--libc/src/stdlib/strfroml.cpp44
-rw-r--r--libc/src/stdlib/strfroml.h21
-rw-r--r--libc/src/stdlib/strtod.cpp2
-rw-r--r--libc/src/stdlib/strtof.cpp2
-rw-r--r--libc/src/stdlib/strtold.cpp2
-rw-r--r--libc/src/string/memory_utils/generic/builtin.h8
-rw-r--r--libc/src/string/memory_utils/utils.h5
-rw-r--r--libc/src/sys/CMakeLists.txt1
-rw-r--r--libc/src/sys/statvfs/CMakeLists.txt17
-rw-r--r--libc/src/sys/statvfs/fstatvfs.h20
-rw-r--r--libc/src/sys/statvfs/linux/CMakeLists.txt36
-rw-r--r--libc/src/sys/statvfs/linux/fstatvfs.cpp26
-rw-r--r--libc/src/sys/statvfs/linux/statfs_utils.h95
-rw-r--r--libc/src/sys/statvfs/linux/statvfs.cpp28
-rw-r--r--libc/src/sys/statvfs/statvfs.h20
-rw-r--r--libc/test/UnitTest/CMakeLists.txt1
-rw-r--r--libc/test/UnitTest/FPMatcher.h10
-rw-r--r--libc/test/UnitTest/PrintfMatcher.cpp6
-rw-r--r--libc/test/src/__support/CPP/bit_test.cpp9
-rw-r--r--libc/test/src/__support/arg_list_test.cpp2
-rw-r--r--libc/test/src/__support/fixedvector_test.cpp16
-rw-r--r--libc/test/src/__support/math_extras_test.cpp4
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt674
-rw-r--r--libc/test/src/math/smoke/CanonicalizeTest.h209
-rw-r--r--libc/test/src/math/smoke/FMaximumMagNumTest.h101
-rw-r--r--libc/test/src/math/smoke/FMaximumMagTest.h89
-rw-r--r--libc/test/src/math/smoke/FMaximumNumTest.h100
-rw-r--r--libc/test/src/math/smoke/FMaximumTest.h88
-rw-r--r--libc/test/src/math/smoke/FMinimumMagNumTest.h101
-rw-r--r--libc/test/src/math/smoke/FMinimumMagTest.h89
-rw-r--r--libc/test/src/math/smoke/FMinimumNumTest.h100
-rw-r--r--libc/test/src/math/smoke/FMinimumTest.h88
-rw-r--r--libc/test/src/math/smoke/FromfpTest.h528
-rw-r--r--libc/test/src/math/smoke/FromfpxTest.h690
-rw-r--r--libc/test/src/math/smoke/UfromfpTest.h462
-rw-r--r--libc/test/src/math/smoke/UfromfpxTest.h551
-rw-r--r--libc/test/src/math/smoke/canonicalize_test.cpp13
-rw-r--r--libc/test/src/math/smoke/canonicalizef128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/canonicalizef_test.cpp13
-rw-r--r--libc/test/src/math/smoke/canonicalizel_test.cpp19
-rw-r--r--libc/test/src/math/smoke/fmaximum_mag_num_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_mag_numf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_mag_numf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_mag_numl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_mag_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_magf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_magf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_magl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_num_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_numf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_numf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_numl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximum_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximumf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximumf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fmaximuml_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_mag_num_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_mag_numf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_mag_numf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_mag_numl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_mag_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_magf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_magf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_magl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_num_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_numf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_numf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_numl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimum_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimumf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimumf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fminimuml_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfp_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpx_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpxf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpxf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/fromfpxl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfp_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpl_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpx_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpxf128_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpxf_test.cpp13
-rw-r--r--libc/test/src/math/smoke/ufromfpxl_test.cpp13
-rw-r--r--libc/test/src/stdio/CMakeLists.txt15
-rw-r--r--libc/test/src/stdio/printf_core/parser_test.cpp36
-rw-r--r--libc/test/src/stdio/rename_test.cpp51
-rw-r--r--libc/test/src/stdio/sprintf_test.cpp87
-rw-r--r--libc/test/src/stdlib/CMakeLists.txt32
-rw-r--r--libc/test/src/stdlib/StrfromTest.h500
-rw-r--r--libc/test/src/stdlib/strfromd_test.cpp13
-rw-r--r--libc/test/src/stdlib/strfromf_test.cpp98
-rw-r--r--libc/test/src/stdlib/strfroml_test.cpp13
-rw-r--r--libc/test/src/sys/CMakeLists.txt1
-rw-r--r--libc/test/src/sys/statvfs/CMakeLists.txt3
-rw-r--r--libc/test/src/sys/statvfs/linux/CMakeLists.txt29
-rw-r--r--libc/test/src/sys/statvfs/linux/fstatvfs_test.cpp42
-rw-r--r--libc/test/src/sys/statvfs/linux/statvfs_test.cpp47
-rw-r--r--libc/utils/gpu/loader/Loader.h8
-rw-r--r--libc/utils/gpu/loader/amdgpu/Loader.cpp47
-rw-r--r--libc/utils/gpu/loader/nvptx/Loader.cpp39
-rw-r--r--libc/utils/gpu/server/llvmlibc_rpc_server.h29
-rw-r--r--libc/utils/gpu/server/rpc_server.cpp117
-rw-r--r--libclc/CMakeLists.txt19
-rw-r--r--libclc/cmake/CMakeCLCInformation.cmake1
-rw-r--r--libcxx/benchmarks/CMakeLists.txt1
-rw-r--r--libcxx/benchmarks/algorithms/mismatch.bench.cpp40
-rw-r--r--libcxx/docs/DesignDocs/NodiscardPolicy.rst42
-rw-r--r--libcxx/docs/ReleaseNotes/19.rst5
-rw-r--r--libcxx/docs/Status/Cxx23Issues.csv4
-rw-r--r--libcxx/docs/UsingLibcxx.rst12
-rw-r--r--libcxx/docs/index.rst1
-rw-r--r--libcxx/include/CMakeLists.txt6
-rw-r--r--libcxx/include/__algorithm/copy.h6
-rw-r--r--libcxx/include/__algorithm/copy_backward.h6
-rw-r--r--libcxx/include/__algorithm/copy_move_common.h39
-rw-r--r--libcxx/include/__algorithm/mismatch.h109
-rw-r--r--libcxx/include/__algorithm/move.h6
-rw-r--r--libcxx/include/__algorithm/move_backward.h6
-rw-r--r--libcxx/include/__algorithm/ranges_ends_with.h6
-rw-r--r--libcxx/include/__algorithm/ranges_starts_with.h8
-rw-r--r--libcxx/include/__algorithm/simd_utils.h123
-rw-r--r--libcxx/include/__bit/bit_cast.h9
-rw-r--r--libcxx/include/__bit/countr.h13
-rw-r--r--libcxx/include/__chrono/tzdb_list.h22
-rw-r--r--libcxx/include/__config42
-rw-r--r--libcxx/include/__format/container_adaptor.h4
-rw-r--r--libcxx/include/__format/escaped_output_table.h2
-rw-r--r--libcxx/include/__format/extended_grapheme_cluster_table.h2
-rw-r--r--libcxx/include/__format/parser_std_format_spec.h3
-rw-r--r--libcxx/include/__format/width_estimation_table.h2
-rw-r--r--libcxx/include/__fwd/deque.h26
-rw-r--r--libcxx/include/__fwd/memory.h25
-rw-r--r--libcxx/include/__fwd/queue.h31
-rw-r--r--libcxx/include/__fwd/sstream.h1
-rw-r--r--libcxx/include/__fwd/stack.h26
-rw-r--r--libcxx/include/__fwd/string.h4
-rw-r--r--libcxx/include/__fwd/vector.h26
-rw-r--r--libcxx/include/__memory/allocator.h16
-rw-r--r--libcxx/include/__ranges/as_rvalue_view.h16
-rw-r--r--libcxx/include/__ranges/repeat_view.h5
-rw-r--r--libcxx/include/__ranges/to.h4
-rw-r--r--libcxx/include/__ranges/zip_view.h8
-rw-r--r--libcxx/include/__string/char_traits.h11
-rw-r--r--libcxx/include/__system_error/errc.h70
-rw-r--r--libcxx/include/__type_traits/apply_cv.h50
-rw-r--r--libcxx/include/cerrno13
-rw-r--r--libcxx/include/deque4
-rw-r--r--libcxx/include/format2
-rw-r--r--libcxx/include/iosfwd5
-rw-r--r--libcxx/include/libcxx.imp6
-rw-r--r--libcxx/include/module.modulemap15
-rw-r--r--libcxx/include/queue7
-rw-r--r--libcxx/include/stack4
-rw-r--r--libcxx/include/tuple28
-rw-r--r--libcxx/include/vector3
-rw-r--r--libcxx/modules/CMakeLists.txt15
-rw-r--r--libcxx/src/include/tzdb/tzdb_list_private.h11
-rw-r--r--libcxx/src/random.cpp4
-rw-r--r--libcxx/src/tzdb_list.cpp24
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.pass.cpp (renamed from libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx2a.pass.cpp)33
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.verify.cpp42
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.depr_in_cxx17.verify.cpp4
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.pass.cpp (renamed from libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.pass.cpp)31
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.verify.cpp23
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.verify.cpp28
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.depr_in_cxx17.verify.cpp4
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp (renamed from libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx2a.pass.cpp)53
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.verify.cpp77
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.pass.cpp (renamed from libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx2a.pass.cpp)15
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.verify.cpp32
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx20.pass.cpp (renamed from libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx2a.pass.cpp)24
-rw-r--r--libcxx/test/libcxx/depr/depr.default.allocator/enable_removed_allocator_members.deprecated.verify.cpp20
-rw-r--r--libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp2
-rw-r--r--libcxx/test/libcxx/input.output/filesystems/convert_file_time.pass.cpp2
-rw-r--r--libcxx/test/libcxx/time/time.zone/time.zone.db/rules.pass.cpp2
-rw-r--r--libcxx/test/libcxx/time/time.zone/time.zone.db/zones.pass.cpp2
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx03.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx11.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx14.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx17.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx20.csv1
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx23.csv4
-rw-r--r--libcxx/test/libcxx/transitive_includes/cxx26.csv4
-rw-r--r--libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_allocator_void_no_members.verify.cpp25
-rw-r--r--libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_with_removed_members.compile.pass.cpp22
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch.pass.cpp242
-rw-r--r--libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch_pred.pass.cpp119
-rw-r--r--libcxx/test/std/containers/sequences/deque/types.pass.cpp25
-rw-r--r--libcxx/test/std/containers/sequences/list/types.pass.cpp13
-rw-r--r--libcxx/test/std/containers/sequences/vector/types.pass.cpp25
-rw-r--r--libcxx/test/std/containers/sequences/vector/vector.cons/deduct.verify.cpp29
-rw-r--r--libcxx/test/std/depr.cerro/cerrno.syn.verify.cpp37
-rw-r--r--libcxx/test/std/depr.cerro/system.error.syn.verify.cpp28
-rw-r--r--libcxx/test/std/diagnostics/syserr/errc.pass.cpp2
-rw-r--r--libcxx/test/std/experimental/simd/simd.class/simd_ctor_broadcast.pass.cpp4
-rw-r--r--libcxx/test/std/experimental/simd/simd.class/simd_ctor_conversion.pass.cpp4
-rw-r--r--libcxx/test/std/experimental/simd/simd.class/simd_ctor_load.pass.cpp6
-rw-r--r--libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_ctor_conversion.pass.cpp4
-rw-r--r--libcxx/test/std/experimental/simd/simd.reference/reference_assignment.pass.cpp6
-rw-r--r--libcxx/test/std/experimental/simd/test_utils.h12
-rw-r--r--libcxx/test/std/numerics/numeric.ops/numeric.ops.sat/saturate_cast.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_token_pred.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_token_pred.pass.cpp2
-rw-r--r--libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_token_pred.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/tuple/tuple.tuple/tuple.apply/make_from_tuple.pass.cpp88
-rw-r--r--libcxx/test/std/utilities/variant/variant.get/get_if_index.pass.cpp55
-rw-r--r--libcxx/test/std/utilities/variant/variant.get/get_if_type.pass.cpp55
-rw-r--r--libcxx/test/std/utilities/variant/variant.get/get_index.pass.cpp121
-rw-r--r--libcxx/test/std/utilities/variant/variant.get/get_type.pass.cpp125
-rw-r--r--libcxx/test/std/utilities/variant/variant.helpers/variant_alternative.pass.cpp10
-rw-r--r--libcxx/test/std/utilities/variant/variant.variant/variant.assign/T.pass.cpp29
-rw-r--r--libcxx/test/std/utilities/variant/variant.variant/variant.ctor/T.pass.cpp70
-rw-r--r--libcxx/test/std/utilities/variant/variant.variant/variant.ctor/default.pass.cpp6
-rw-r--r--libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_index_args.pass.cpp58
-rw-r--r--libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_type_args.pass.cpp59
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp33
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp32
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit/visit.pass.cpp30
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit/visit_return_type.pass.cpp30
-rw-r--r--libcxx/test/support/deduction_guides_sfinae_checks.h1
-rw-r--r--libcxx/test/support/variant_test_helpers.h3
-rw-r--r--libcxx/utils/ci/Dockerfile9
-rw-r--r--libcxx/utils/ci/buildkite-pipeline.yml6
-rwxr-xr-xlibcxx/utils/ci/oss-fuzz.sh2
-rwxr-xr-xlibcxx/utils/generate_escaped_output_table.py2
-rwxr-xr-xlibcxx/utils/generate_extended_grapheme_cluster_table.py2
-rw-r--r--libcxx/utils/generate_width_estimation_table.py2
-rw-r--r--libcxx/utils/libcxx/test/features.py1
-rw-r--r--libcxx/utils/libcxx/test/params.py2
-rw-r--r--libcxxabi/test/forced_unwind4.pass.cpp4
-rw-r--r--lld/COFF/Config.h8
-rw-r--r--lld/COFF/Driver.cpp17
-rw-r--r--lld/COFF/DriverUtils.cpp25
-rw-r--r--lld/ELF/Arch/Mips.cpp6
-rw-r--r--lld/ELF/Arch/X86_64.cpp7
-rw-r--r--lld/ELF/Config.h2
-rw-r--r--lld/ELF/DWARF.h2
-rw-r--r--lld/ELF/Driver.cpp55
-rw-r--r--lld/ELF/InputFiles.cpp30
-rw-r--r--lld/ELF/InputFiles.h5
-rw-r--r--lld/ELF/InputSection.cpp4
-rw-r--r--lld/ELF/LinkerScript.cpp48
-rw-r--r--lld/ELF/LinkerScript.h21
-rw-r--r--lld/ELF/Relocations.cpp24
-rw-r--r--lld/ELF/ScriptParser.cpp13
-rw-r--r--lld/ELF/SymbolTable.cpp4
-rw-r--r--lld/ELF/SymbolTable.h3
-rw-r--r--lld/ELF/Symbols.cpp6
-rw-r--r--lld/ELF/SyntheticSections.cpp45
-rw-r--r--lld/ELF/SyntheticSections.h7
-rw-r--r--lld/ELF/Writer.cpp50
-rw-r--r--lld/MachO/ConcatOutputSection.cpp6
-rw-r--r--lld/MachO/Config.h1
-rw-r--r--lld/MachO/Driver.cpp60
-rw-r--r--lld/MachO/InputSection.cpp49
-rw-r--r--lld/MachO/InputSection.h5
-rw-r--r--lld/MachO/MapFile.cpp22
-rw-r--r--lld/MachO/ObjC.cpp16
-rw-r--r--lld/MachO/ObjC.h2
-rw-r--r--lld/MachO/Options.td12
-rw-r--r--lld/MachO/SymbolTable.cpp2
-rw-r--r--lld/MachO/SyntheticSections.cpp261
-rw-r--r--lld/MachO/SyntheticSections.h70
-rw-r--r--lld/MachO/Writer.cpp10
-rw-r--r--lld/MinGW/Driver.cpp2
-rw-r--r--lld/MinGW/Options.td2
-rw-r--r--lld/test/COFF/export.test43
-rw-r--r--lld/test/COFF/exportas.test88
-rw-r--r--lld/test/ELF/aarch64-gnu-ifunc-nonpreemptable.s12
-rw-r--r--lld/test/ELF/aarch64-gnu-ifunc.s3
-rw-r--r--lld/test/ELF/allow-multiple-definition.s3
-rw-r--r--lld/test/ELF/allow-shlib-undefined.s2
-rw-r--r--lld/test/ELF/arm-gnu-ifunc.s3
-rw-r--r--lld/test/ELF/common-gc2.s8
-rw-r--r--lld/test/ELF/driver.test48
-rw-r--r--lld/test/ELF/executable-undefined-ignoreall.s2
-rw-r--r--lld/test/ELF/gc-sections-with-provide.s60
-rw-r--r--lld/test/ELF/gnu-ifunc-dyntags.s8
-rw-r--r--lld/test/ELF/gnu-ifunc-i386.s3
-rw-r--r--lld/test/ELF/linkerscript/symbolreferenced.s51
-rw-r--r--lld/test/ELF/lto/libcall-archive.ll6
-rw-r--r--lld/test/ELF/pack-dyn-relocs-ifunc.s49
-rw-r--r--lld/test/ELF/ppc32-ifunc-nonpreemptible-pic.s8
-rw-r--r--lld/test/ELF/relro-non-contiguous-script-data.s6
-rw-r--r--lld/test/ELF/riscv-ifunc-nonpreemptible.s20
-rw-r--r--lld/test/ELF/riscv-tlsdesc-relax.s8
-rw-r--r--lld/test/ELF/riscv-tlsdesc.s27
-rw-r--r--lld/test/ELF/riscv-undefined-weak.s18
-rw-r--r--lld/test/ELF/shlib-undefined-local.s5
-rw-r--r--lld/test/ELF/static-with-export-dynamic.s32
-rw-r--r--lld/test/ELF/systemz-ifunc-nonpreemptible.s2
-rw-r--r--lld/test/ELF/weak-undef.s9
-rw-r--r--lld/test/ELF/x86-64-dyn-rel-error.s2
-rw-r--r--lld/test/ELF/x86-64-gotpc-relax-too-far.s12
-rw-r--r--lld/test/MachO/objc-relative-method-lists-simple.s250
-rw-r--r--lld/test/MachO/silent-ignore.s2
-rw-r--r--lld/test/MinGW/driver.test3
-rw-r--r--lldb/docs/use/python-reference.rst28
-rw-r--r--lldb/include/lldb/Core/Disassembler.h2
-rw-r--r--lldb/include/lldb/Core/Progress.h41
-rw-r--r--lldb/include/lldb/Symbol/LineEntry.h5
-rw-r--r--lldb/include/lldb/Symbol/UnwindTable.h4
-rw-r--r--lldb/include/lldb/Utility/Scalar.h5
-rw-r--r--lldb/include/lldb/Utility/SupportFile.h3
-rw-r--r--lldb/source/API/SBLineEntry.cpp10
-rw-r--r--lldb/source/API/SBThread.cpp2
-rw-r--r--lldb/source/API/SystemInitializerFull.cpp14
-rw-r--r--lldb/source/Breakpoint/BreakpointResolver.cpp2
-rw-r--r--lldb/source/Breakpoint/BreakpointResolverFileLine.cpp7
-rw-r--r--lldb/source/Commands/CommandObjectBreakpoint.cpp4
-rw-r--r--lldb/source/Commands/CommandObjectSource.cpp14
-rw-r--r--lldb/source/Commands/CommandObjectThread.cpp2
-rw-r--r--lldb/source/Core/Address.cpp2
-rw-r--r--lldb/source/Core/Disassembler.cpp8
-rw-r--r--lldb/source/Core/FormatEntity.cpp2
-rw-r--r--lldb/source/Core/IOHandlerCursesGUI.cpp11
-rw-r--r--lldb/source/Core/Module.cpp9
-rw-r--r--lldb/source/Core/Progress.cpp114
-rw-r--r--lldb/source/Core/SourceManager.cpp2
-rw-r--r--lldb/source/Host/common/Alarm.cpp84
-rw-r--r--lldb/source/Interpreter/OptionArgParser.cpp60
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp2
-rw-r--r--lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp6
-rw-r--r--lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp2
-rw-r--r--lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp12
-rw-r--r--lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.h1
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp17
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp4
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp1
-rw-r--r--lldb/source/Symbol/CompileUnit.cpp2
-rw-r--r--lldb/source/Symbol/Function.cpp4
-rw-r--r--lldb/source/Symbol/LineEntry.cpp17
-rw-r--r--lldb/source/Symbol/LineTable.cpp4
-rw-r--r--lldb/source/Symbol/SymbolContext.cpp4
-rw-r--r--lldb/source/Symbol/UnwindTable.cpp45
-rw-r--r--lldb/source/Target/StackFrame.cpp3
-rw-r--r--lldb/source/Target/StackFrameList.cpp4
-rw-r--r--lldb/source/Target/Thread.cpp8
-rw-r--r--lldb/source/Target/TraceDumper.cpp4
-rw-r--r--lldb/source/Utility/Scalar.cpp42
-rw-r--r--lldb/test/API/commands/target/modules/lookup/Makefile4
-rw-r--r--lldb/test/API/commands/target/modules/lookup/TestImageLookupPCExpression.py27
-rw-r--r--lldb/test/API/commands/target/modules/lookup/main.c15
-rw-r--r--lldb/test/API/functionalities/type_find_first/Makefile2
-rw-r--r--lldb/test/API/functionalities/type_find_first/TestFindFirstType.py27
-rw-r--r--lldb/test/API/functionalities/type_find_first/main.cpp5
-rw-r--r--lldb/test/API/functionalities/type_find_first/other.cpp4
-rw-r--r--lldb/test/CMakeLists.txt17
-rw-r--r--lldb/unittests/Core/ProgressReportTest.cpp39
-rw-r--r--lldb/unittests/SymbolFile/PDB/SymbolFilePDBTests.cpp2
-rw-r--r--lldb/unittests/Utility/ScalarTest.cpp58
-rw-r--r--llvm/bindings/ocaml/debuginfo/debuginfo_ocaml.c15
-rw-r--r--llvm/bindings/ocaml/debuginfo/llvm_debuginfo.ml10
-rw-r--r--llvm/bindings/ocaml/debuginfo/llvm_debuginfo.mli10
-rw-r--r--llvm/bindings/ocaml/llvm/llvm.ml2
-rw-r--r--llvm/bindings/ocaml/llvm/llvm.mli6
-rw-r--r--llvm/bindings/ocaml/llvm/llvm_ocaml.c9
-rw-r--r--llvm/bindings/ocaml/llvm/llvm_ocaml.h1
-rw-r--r--llvm/docs/AMDGPUUsage.rst41
-rw-r--r--llvm/docs/CommandGuide/llvm-debuginfo-analyzer.rst4
-rw-r--r--llvm/docs/CommandGuide/llvm-objcopy.rst13
-rw-r--r--llvm/docs/DirectX/DXILArchitecture.rst13
-rw-r--r--llvm/docs/GlobalISel/GenericOpcode.rst19
-rw-r--r--llvm/docs/InstCombineContributorGuide.md556
-rw-r--r--llvm/docs/InstrProfileFormat.rst129
-rw-r--r--llvm/docs/LangRef.rst246
-rw-r--r--llvm/docs/ReleaseNotes.rst12
-rw-r--r--llvm/docs/RemoveDIsDebugInfo.md11
-rw-r--r--llvm/docs/SPIRVUsage.rst334
-rw-r--r--llvm/docs/UserGuides.rst5
-rw-r--r--llvm/include/llvm-c/Core.h18
-rw-r--r--llvm/include/llvm-c/DebugInfo.h60
-rw-r--r--llvm/include/llvm/ADT/SCCIterator.h6
-rw-r--r--llvm/include/llvm/Analysis/InlineCost.h3
-rw-r--r--llvm/include/llvm/Analysis/MemoryBuiltins.h8
-rw-r--r--llvm/include/llvm/Analysis/MemoryLocation.h7
-rw-r--r--llvm/include/llvm/AsmParser/LLToken.h1
-rw-r--r--llvm/include/llvm/BinaryFormat/COFF.h6
-rw-r--r--llvm/include/llvm/BinaryFormat/DXContainer.h16
-rw-r--r--llvm/include/llvm/BinaryFormat/Dwarf.def2
-rw-r--r--llvm/include/llvm/Bitcode/LLVMBitCodes.h7
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h8
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h1
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h27
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/Utils.h12
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h5
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstr.h2
-rw-r--r--llvm/include/llvm/CodeGen/MachinePassManager.h189
-rw-r--r--llvm/include/llvm/CodeGen/MachineScheduler.h9
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h2
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h50
-rw-r--r--llvm/include/llvm/CodeGen/TargetRegisterInfo.h31
-rw-r--r--llvm/include/llvm/DebugInfo/LogicalView/Readers/LVCodeViewReader.h2
-rw-r--r--llvm/include/llvm/DebugInfo/LogicalView/Readers/LVDWARFReader.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h2
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h1268
-rw-r--r--llvm/include/llvm/IR/BasicBlock.h2
-rw-r--r--llvm/include/llvm/IR/CallingConv.h3
-rw-r--r--llvm/include/llvm/IR/Constants.h6
-rw-r--r--llvm/include/llvm/IR/DiagnosticHandler.h4
-rw-r--r--llvm/include/llvm/IR/GlobalValue.h1
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h20
-rw-r--r--llvm/include/llvm/IR/InstrTypes.h29
-rw-r--r--llvm/include/llvm/IR/Instructions.h35
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h13
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td21
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td119
-rw-r--r--llvm/include/llvm/IR/IntrinsicsDirectX.td10
-rw-r--r--llvm/include/llvm/IR/Mangler.h4
-rw-r--r--llvm/include/llvm/IR/PassManager.h37
-rw-r--r--llvm/include/llvm/IR/PatternMatch.h87
-rw-r--r--llvm/include/llvm/IR/ProfDataUtils.h3
-rw-r--r--llvm/include/llvm/MC/ConstantPools.h9
-rw-r--r--llvm/include/llvm/MC/DXContainerPSVInfo.h25
-rw-r--r--llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h3
-rw-r--r--llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h4
-rw-r--r--llvm/include/llvm/MC/MCRegisterInfo.h21
-rw-r--r--llvm/include/llvm/MC/MCStreamer.h2
-rw-r--r--llvm/include/llvm/MC/StringTableBuilder.h8
-rw-r--r--llvm/include/llvm/ObjCopy/CommonConfig.h1
-rw-r--r--llvm/include/llvm/Object/COFF.h41
-rw-r--r--llvm/include/llvm/Object/COFFImportFile.h22
-rw-r--r--llvm/include/llvm/Object/DXContainer.h16
-rw-r--r--llvm/include/llvm/Object/ELFObjectFile.h16
-rw-r--r--llvm/include/llvm/Object/ELFTypes.h101
-rw-r--r--llvm/include/llvm/ObjectYAML/DXContainerYAML.h5
-rw-r--r--llvm/include/llvm/Passes/MachinePassRegistry.def2
-rw-r--r--llvm/include/llvm/Passes/PassBuilder.h7
-rw-r--r--llvm/include/llvm/Passes/TargetPassRegistry.inc194
-rw-r--r--llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h17
-rw-r--r--llvm/include/llvm/ProfileData/InstrProf.h14
-rw-r--r--llvm/include/llvm/ProfileData/InstrProfWriter.h11
-rw-r--r--llvm/include/llvm/ProfileData/MemProf.h42
-rw-r--r--llvm/include/llvm/Support/BalancedPartitioning.h5
-rw-r--r--llvm/include/llvm/Support/DXILABI.h18
-rw-r--r--llvm/include/llvm/Support/FormattedStream.h51
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def5
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td22
-rw-r--r--llvm/include/llvm/Target/GlobalISel/Combine.td16
-rw-r--r--llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td4
-rw-r--r--llvm/include/llvm/Target/Target.td17
-rw-r--r--llvm/include/llvm/TextAPI/DylibReader.h9
-rw-r--r--llvm/include/llvm/TextAPI/Record.h17
-rw-r--r--llvm/include/llvm/TextAPI/Utils.h7
-rw-r--r--llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h154
-rw-r--r--llvm/include/llvm/Transforms/Scalar/Float2Int.h2
-rw-r--r--llvm/include/llvm/Transforms/Utils/CodeExtractor.h2
-rw-r--r--llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h1
-rw-r--r--llvm/include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h30
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp2
-rw-r--r--llvm/lib/Analysis/InlineCost.cpp2
-rw-r--r--llvm/lib/Analysis/InlineOrder.cpp5
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp6
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp4
-rw-r--r--llvm/lib/Analysis/ReplayInlineAdvisor.cpp4
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp130
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp4
-rw-r--r--llvm/lib/AsmParser/LLLexer.cpp1
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp21
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp22
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp5
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp3
-rw-r--r--llvm/lib/CodeGen/ExpandLargeFpConvert.cpp11
-rw-r--r--llvm/lib/CodeGen/FinalizeISel.cpp3
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp24
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp155
-rw-r--r--llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp30
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp54
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp120
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp34
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp63
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp73
-rw-r--r--llvm/lib/CodeGen/LowLevelTypeUtils.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MILexer.cpp2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MILexer.h2
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIParser.cpp8
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineCopyPropagation.cpp34
-rw-r--r--llvm/lib/CodeGen/MachineInstr.cpp58
-rw-r--r--llvm/lib/CodeGen/MachineOperand.cpp13
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp113
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp20
-rw-r--r--llvm/lib/CodeGen/RegisterBankInfo.cpp7
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp278
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FastISel.cpp25
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp90
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp70
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp34
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp60
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp9
-rw-r--r--llvm/lib/CodeGen/TargetRegisterInfo.cpp36
-rw-r--r--llvm/lib/CodeGen/TypePromotion.cpp2
-rw-r--r--llvm/lib/DebugInfo/LogicalView/Readers/LVDWARFReader.cpp4
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h42
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h2
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp13
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp18
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLink.cpp3
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp6
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp6
-rw-r--r--llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp19
-rw-r--r--llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp4
-rw-r--r--llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp2
-rw-r--r--llvm/lib/Frontend/Offloading/OffloadWrapper.cpp70
-rw-r--r--llvm/lib/IR/AsmWriter.cpp8
-rw-r--r--llvm/lib/IR/BasicBlock.cpp2
-rw-r--r--llvm/lib/IR/ConstantRange.cpp4
-rw-r--r--llvm/lib/IR/Constants.cpp4
-rw-r--r--llvm/lib/IR/Core.cpp21
-rw-r--r--llvm/lib/IR/DebugInfo.cpp121
-rw-r--r--llvm/lib/IR/Globals.cpp7
-rw-r--r--llvm/lib/IR/Instruction.cpp32
-rw-r--r--llvm/lib/IR/Instructions.cpp68
-rw-r--r--llvm/lib/IR/LLVMContextImpl.h2
-rw-r--r--llvm/lib/IR/Mangler.cpp52
-rw-r--r--llvm/lib/IR/Operator.cpp4
-rw-r--r--llvm/lib/IR/ProfDataUtils.cpp48
-rw-r--r--llvm/lib/IR/Verifier.cpp9
-rw-r--r--llvm/lib/InterfaceStub/ELFObjHandler.cpp2
-rw-r--r--llvm/lib/LTO/LTO.cpp4
-rw-r--r--llvm/lib/MC/ConstantPools.cpp9
-rw-r--r--llvm/lib/MC/DXContainerPSVInfo.cpp75
-rw-r--r--llvm/lib/MC/ELFObjectWriter.cpp103
-rw-r--r--llvm/lib/MC/MCDwarf.cpp7
-rw-r--r--llvm/lib/MC/MCParser/MCTargetAsmParser.cpp6
-rw-r--r--llvm/lib/MC/MCRegisterInfo.cpp12
-rw-r--r--llvm/lib/MC/MCStreamer.cpp2
-rw-r--r--llvm/lib/MC/WinCOFFObjectWriter.cpp4
-rw-r--r--llvm/lib/MC/XCOFFObjectWriter.cpp23
-rw-r--r--llvm/lib/ObjCopy/ConfigManager.cpp8
-rw-r--r--llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp3
-rw-r--r--llvm/lib/ObjCopy/ELF/ELFObject.cpp15
-rw-r--r--llvm/lib/Object/COFFImportFile.cpp32
-rw-r--r--llvm/lib/Object/DXContainer.cpp15
-rw-r--r--llvm/lib/Object/ELF.cpp6
-rw-r--r--llvm/lib/Object/OffloadBinary.cpp5
-rw-r--r--llvm/lib/ObjectYAML/DXContainerEmitter.cpp3
-rw-r--r--llvm/lib/ObjectYAML/DXContainerYAML.cpp15
-rw-r--r--llvm/lib/ObjectYAML/ELFEmitter.cpp58
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp116
-rw-r--r--llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp35
-rw-r--r--llvm/lib/ProfileData/InstrProfReader.cpp42
-rw-r--r--llvm/lib/ProfileData/InstrProfWriter.cpp40
-rw-r--r--llvm/lib/ProfileData/MemProf.cpp30
-rw-r--r--llvm/lib/ProfileData/RawMemProfReader.cpp7
-rw-r--r--llvm/lib/Support/BalancedPartitioning.cpp4
-rw-r--r--llvm/lib/Support/FormattedStream.cpp3
-rw-r--r--llvm/lib/Support/RISCVISAInfo.cpp64
-rw-r--r--llvm/lib/Support/Windows/Path.inc4
-rw-r--r--llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp59
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp42
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp5
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td3
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64PassRegistry.def (renamed from bolt/lib/Profile/ProfileReaderBase.cpp)21
-rw-r--r--llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedAmpere1.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp2
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp6
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp20
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td14
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp40
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp96
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp32
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp40
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def73
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td90
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp124
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp196
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td5
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td12
-rw-r--r--llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h4
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.cpp98
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.h54
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp425
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h33
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/AMDGPU/MIMGInstructions.td1
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp131
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td8
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp30
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SMInstructions.td13
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp51
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h10
-rw-r--r--llvm/lib/Target/AMDGPU/VINTERPInstructions.td6
-rw-r--r--llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp550
-rw-r--r--llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp1
-rw-r--r--llvm/lib/Target/ARM/Thumb1FrameLowering.cpp11
-rw-r--r--llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp2
-rw-r--r--llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp2
-rw-r--r--llvm/lib/Target/BPF/BPFISelLowering.cpp9
-rw-r--r--llvm/lib/Target/BPF/BPFPassRegistry.def32
-rw-r--r--llvm/lib/Target/BPF/BPFTargetMachine.cpp25
-rw-r--r--llvm/lib/Target/BPF/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp2
-rw-r--r--llvm/lib/Target/DirectX/DXIL.td35
-rw-r--r--llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp82
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.cpp26
-rw-r--r--llvm/lib/Target/DirectX/DXILOpBuilder.h5
-rw-r--r--llvm/lib/Target/DirectX/DXILOpLowering.cpp55
-rw-r--r--llvm/lib/Target/DirectX/DirectXPassRegistry.def29
-rw-r--r--llvm/lib/Target/DirectX/DirectXTargetMachine.cpp20
-rw-r--r--llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPassRegistry.def21
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp3
-rw-r--r--llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp4
-rw-r--r--llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp4
-rw-r--r--llvm/lib/Target/M68k/M68kISelLowering.cpp1
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.td116
-rw-r--r--llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp2
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp2
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp5
-rw-r--r--llvm/lib/Target/Mips/MipsExpandPseudo.cpp9
-rw-r--r--llvm/lib/Target/Mips/MipsLegalizerInfo.cpp8
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp18
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXPassRegistry.def40
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp41
-rw-r--r--llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp26
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp28
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp23
-rw-r--r--llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp53
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp55
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp27
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp114
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h1
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp20
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp2
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h11
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp19
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h9
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp2
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp38
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h5
-rw-r--r--llvm/lib/Target/RISCV/RISCV.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVCallingConv.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp35
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVFoldMasks.cpp48
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp109
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp400
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h64
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrFormats.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrGISel.td8
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp55
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoV.td22
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td35
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td62
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZc.td53
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td229
-rw-r--r--llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp50
-rw-r--r--llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp29
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp200
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h3
-rw-r--r--llvm/lib/Target/SPIRV/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp33
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.td2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp48
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp101
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCommandLine.h38
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.h9
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp307
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp15
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h88
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp108
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp104
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp6
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp90
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp9
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h22
-rw-r--r--llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp16
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.h3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp105
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.h6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.td6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrSystem.td2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZOperators.td4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp2
-rw-r--r--llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp314
-rw-r--r--llvm/lib/Target/X86/AsmParser/X86Operand.h2
-rw-r--r--llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp19
-rw-r--r--llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp3
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp38
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp143
-rw-r--r--llvm/lib/Target/X86/X86InstrArithmetic.td38
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp9
-rw-r--r--llvm/lib/Target/X86/X86InstrUtils.td2
-rw-r--r--llvm/lib/Target/X86/X86SchedBroadwell.td6
-rw-r--r--llvm/lib/Target/X86/X86SchedHaswell.td6
-rw-r--r--llvm/lib/Target/X86/X86SchedIceLake.td6
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp10
-rw-r--r--llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp2
-rw-r--r--llvm/lib/TargetParser/AArch64TargetParser.cpp5
-rw-r--r--llvm/lib/TextAPI/BinaryReader/CMakeLists.txt1
-rw-r--r--llvm/lib/TextAPI/BinaryReader/DylibReader.cpp118
-rw-r--r--llvm/lib/TextAPI/Utils.cpp46
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp6
-rw-r--r--llvm/lib/Transforms/IPO/CMakeLists.txt1
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp84
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfile.cpp733
-rw-r--r--llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp552
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp15
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp7
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp7
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp29
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp60
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp24
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp55
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/Float2Int.cpp33
-rw-r--r--llvm/lib/Transforms/Scalar/GVNHoist.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/CodeExtractor.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/LoopRotationUtils.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h3
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp102
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp242
-rw-r--r--llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h35
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp17
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h89
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp3
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp82
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp106
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp37
-rw-r--r--llvm/runtimes/CMakeLists.txt4
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/cast.ll920
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll91
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll52
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll12
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll84
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll84
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll2
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll70
-rw-r--r--llvm/test/Assembler/flags.ll48
-rw-r--r--llvm/test/Bindings/OCaml/core.ml2
-rw-r--r--llvm/test/Bindings/OCaml/debuginfo.ml10
-rw-r--r--llvm/test/Bitcode/compatibility-3.6.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.7.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.8.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-3.9.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-4.0.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-5.0.ll16
-rw-r--r--llvm/test/Bitcode/compatibility-6.0.ll16
-rw-r--r--llvm/test/Bitcode/compatibility.ll18
-rw-r--r--llvm/test/Bitcode/flags.ll29
-rw-r--r--llvm/test/Bitcode/thinlto-function-summary.ll6
-rw-r--r--llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir84
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll92
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir245
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll135
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll90
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir28
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir27
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir36
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir29
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir57
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir18
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir9
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir329
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select.mir319
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir52
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-smull.ll53
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/abs.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/allow-check.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/and-sink.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-anyregcc.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-patchpoint.ll139
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-xaluo.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/bitcast.ll145
-rw-r--r--llvm/test/CodeGen/AArch64/bswap.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/dllexport.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/fptoi.ll45
-rw-r--r--llvm/test/CodeGen/AArch64/hadd-combine.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/insert-subvector.ll150
-rw-r--r--llvm/test/CodeGen/AArch64/itofp.ll278
-rw-r--r--llvm/test/CodeGen/AArch64/load.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir1
-rw-r--r--llvm/test/CodeGen/AArch64/misched-bundle.mir195
-rw-r--r--llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll46
-rw-r--r--llvm/test/CodeGen/AArch64/neon-compare-instructions.ll101
-rw-r--r--llvm/test/CodeGen/AArch64/overflow.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-movd.mir60
-rw-r--r--llvm/test/CodeGen/AArch64/pr86717.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/setcc_knownbits.ll93
-rw-r--r--llvm/test/CodeGen/AArch64/shift.ll225
-rw-r--r--llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll120
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-body.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/srem-vec-crash.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll274
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir15
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir45
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll147
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll622
-rw-r--r--llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir68
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/allow-check.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll255
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/convergence-tokens.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll2298
-rw-r--r--llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll270
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoi.i128.ll408
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll424
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll5578
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll3960
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll5576
-rw-r--r--llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll111
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll317
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll280
-rw-r--r--llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir1154
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-buffer.mir1130
-rw-r--r--llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir28
-rw-r--r--llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir504
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-kernargs.ll8976
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll78
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-reserved.ll4
-rw-r--r--llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll15
-rw-r--r--llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir2
-rw-r--r--llvm/test/CodeGen/ARM/select.ll399
-rw-r--r--llvm/test/CodeGen/BPF/cttz-ctlz.ll304
-rw-r--r--llvm/test/CodeGen/DirectX/abs-vec.ll34
-rw-r--r--llvm/test/CodeGen/DirectX/abs.ll38
-rw-r--r--llvm/test/CodeGen/DirectX/ceil.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ceil_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/cos.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/cos_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot3_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/dot4_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/fabs.ll32
-rw-r--r--llvm/test/CodeGen/DirectX/fdot.ll94
-rw-r--r--llvm/test/CodeGen/DirectX/floor.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/floor_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/log-vec.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/log.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log10.ll25
-rw-r--r--llvm/test/CodeGen/DirectX/log2.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/log2_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/pow-vec.ll15
-rw-r--r--llvm/test/CodeGen/DirectX/pow.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/reversebits.ll31
-rw-r--r--llvm/test/CodeGen/DirectX/round.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/round_error.ll4
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/sqrt_error.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/trunc.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/trunc_error.ll10
-rw-r--r--llvm/test/CodeGen/Generic/allow-check.ll12
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll1
-rw-r--r--llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir2
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll42
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir (renamed from llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir)9
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir21
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir126
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir4
-rw-r--r--llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir34
-rw-r--r--llvm/test/CodeGen/Mips/atomic-min-max.ll56
-rw-r--r--llvm/test/CodeGen/NVPTX/common-linkage.ll29
-rw-r--r--llvm/test/CodeGen/NVPTX/weak-global.ll9
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py59
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll632
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll1066
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll105
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll222
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll53
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll142
-rw-r--r--llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir345
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir300
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir139
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir33
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir410
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir400
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir228
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir110
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir425
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir558
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir48
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir25
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll936
-rw-r--r--llvm/test/CodeGen/RISCV/allow-check.ll13
-rw-r--r--llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll496
-rw-r--r--llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll102
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert.ll146
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv-sat.ll708
-rw-r--r--llvm/test/CodeGen/RISCV/float-convert.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/float-round-conv-sat.ll168
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll57
-rw-r--r--llvm/test/CodeGen/RISCV/half-round-conv-sat.ll336
-rw-r--r--llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/make-compressible-zbc.mir585
-rw-r--r--llvm/test/CodeGen/RISCV/misched-postra-direction.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/rv32xtheadbb.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbb.ll447
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-typepromotion.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rv64xtheadbb.ll209
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zba.ll179
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbb.ll438
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/abd.ll343
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll91
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/binop-zext.ll154
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll95
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv.ll163
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll727
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll189
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll254
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll105
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll1064
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll13
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr63596.ll37
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll27
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll541
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll368
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll320
-rw-r--r--llvm/test/CodeGen/RISCV/spill-fill-fold.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/strip-w-suffix.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/zdinx-large-spill.mir74
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODR.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/assume.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/expect.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll7
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll68
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll12
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll57
-rw-r--r--llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll9
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/call-zos-vararg.ll4
-rw-r--r--llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir1
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-04.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-08.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir6
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-28.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-adjstack.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/int-cmp-56.mir4
-rw-r--r--llvm/test/CodeGen/SystemZ/readcyclecounter.ll27
-rw-r--r--llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir2
-rw-r--r--llvm/test/CodeGen/SystemZ/swifterror.ll8
-rw-r--r--llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll16
-rw-r--r--llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll16
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll1
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll3
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vldst4.ll5
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir25
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll35
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll37
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll6
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll22
-rw-r--r--llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll8
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir25
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir2
-rw-r--r--llvm/test/CodeGen/X86/addcarry.ll23
-rw-r--r--llvm/test/CodeGen/X86/allow-check.ll6
-rw-r--r--llvm/test/CodeGen/X86/apx/domain-reassignment.mir (renamed from llvm/test/CodeGen/X86/domain-reassignment-ndd.mir)62
-rw-r--r--llvm/test/CodeGen/X86/apx/foldimmediate.mir70
-rw-r--r--llvm/test/CodeGen/X86/avgceilu.ll92
-rw-r--r--llvm/test/CodeGen/X86/callbr-asm-kill.mir1
-rw-r--r--llvm/test/CodeGen/X86/combine-pavg.ll30
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-shifts.ll127
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll364
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset.ll4
-rw-r--r--llvm/test/CodeGen/X86/huge-stack-offset2.ll2
-rw-r--r--llvm/test/CodeGen/X86/insertelement-var-index.ll44
-rw-r--r--llvm/test/CodeGen/X86/isel-traps.ll73
-rw-r--r--llvm/test/CodeGen/X86/known-never-zero.ll1831
-rw-r--r--llvm/test/CodeGen/X86/late-remat-update.mir1
-rw-r--r--llvm/test/CodeGen/X86/limit-split-cost.mir1
-rw-r--r--llvm/test/CodeGen/X86/masked_store.ll793
-rw-r--r--llvm/test/CodeGen/X86/oddshuffles.ll25
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll1
-rw-r--r--llvm/test/CodeGen/X86/pr45378.ll40
-rw-r--r--llvm/test/CodeGen/X86/pr86305.ll74
-rw-r--r--llvm/test/CodeGen/X86/pr86880.mir21
-rw-r--r--llvm/test/CodeGen/X86/regalloc-copy-hints.mir1
-rw-r--r--llvm/test/CodeGen/X86/sar_fold.ll41
-rw-r--r--llvm/test/CodeGen/X86/setcc-non-simple-type.ll36
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll223
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll9
-rw-r--r--llvm/test/CodeGen/X86/statepoint-fastregalloc.mir4
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-invoke-ra.mir2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-vreg-folding.mir2
-rw-r--r--llvm/test/CodeGen/X86/tls-loads-control3.ll5
-rw-r--r--llvm/test/CodeGen/X86/var-permute-128.ll32
-rw-r--r--llvm/test/CodeGen/X86/vec_int_to_fp.ll305
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll254
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll736
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll30
-rw-r--r--llvm/test/CodeGen/X86/widen_fadd.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fmul.ll91
-rw-r--r--llvm/test/CodeGen/X86/widen_fsub.ll91
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir1
-rw-r--r--llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir2
-rw-r--r--llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir1
-rw-r--r--llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll65
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s43
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s30
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll46
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll5
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll2
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll48
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll2
-rw-r--r--llvm/test/MC/AArch64/coff-relocations.s12
-rw-r--r--llvm/test/MC/AArch64/constant-pool-sizes.s25
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s278
-rw-r--r--llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s27
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s281
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s190
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s186
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s184
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s168
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s171
-rw-r--r--llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s148
-rw-r--r--llvm/test/MC/AMDGPU/hsa-tg-split.s74
-rw-r--r--llvm/test/MC/AMDGPU/vinterp-fake16.s182
-rw-r--r--llvm/test/MC/ARM/basic-arm-instructions.s8
-rw-r--r--llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s8
-rw-r--r--llvm/test/MC/ARM/load-store-acquire-release-v8.s17
-rw-r--r--llvm/test/MC/COFF/dwarf5lineinfo.s13
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt251
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt9
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt251
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt252
-rw-r--r--llvm/test/MC/Disassembler/X86/apx/imulzu.txt50
-rw-r--r--llvm/test/MC/RISCV/rv32zcmp-invalid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zcmp-invalid.s12
-rw-r--r--llvm/test/MC/RISCV/rvv/zvkned-invalid.s23
-rw-r--r--llvm/test/MC/RISCV/rvv/zvknh-invalid.s26
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksed-invalid.s6
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksh-invalid.s10
-rw-r--r--llvm/test/MC/RISCV/rvv/zvksh.s7
-rw-r--r--llvm/test/MC/X86/apx/imulzu-att.s41
-rw-r--r--llvm/test/MC/X86/apx/imulzu-intel.s38
-rw-r--r--llvm/test/MachineVerifier/test_adjustsstack.mir26
-rw-r--r--llvm/test/MachineVerifier/test_g_ubsantrap.mir18
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml97
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml95
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml105
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml105
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml107
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml109
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml99
-rw-r--r--llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml97
-rw-r--r--llvm/test/TableGen/ConcatenatedSubregs.td9
-rw-r--r--llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td3
-rw-r--r--llvm/test/TableGen/HwModeSubRegs.td75
-rw-r--r--llvm/test/TableGen/x86-fold-tables.inc6
-rw-r--r--llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll24
-rw-r--r--llvm/test/Transforms/Attributor/align.ll163
-rw-r--r--llvm/test/Transforms/Attributor/nocapture-1.ll12
-rw-r--r--llvm/test/Transforms/Attributor/nofpclass.ll2
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/basic.ll56
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll20
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/libcalls.ll17
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll16
-rw-r--r--llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll16
-rw-r--r--llvm/test/Transforms/Float2Int/basic.ll317
-rw-r--r--llvm/test/Transforms/Float2Int/pr79158.ll73
-rw-r--r--llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll63
-rw-r--r--llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll2
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-vaarg.ll12
-rw-r--r--llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/Inline/RISCV/inline-target-features.ll34
-rw-r--r--llvm/test/Transforms/Inline/RISCV/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/Inline/update_invoke_prof.ll64
-rw-r--r--llvm/test/Transforms/Inline/update_value_profile.ll81
-rw-r--r--llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll140
-rw-r--r--llvm/test/Transforms/InstCombine/X86/x86-avx512.ll140
-rw-r--r--llvm/test/Transforms/InstCombine/add.ll76
-rw-r--r--llvm/test/Transforms/InstCombine/apint-shl-trunc.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/binop-itofp.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/cast.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/catchswitch-phi.ll19
-rw-r--r--llvm/test/Transforms/InstCombine/div.ll22
-rw-r--r--llvm/test/Transforms/InstCombine/fmul.ll99
-rw-r--r--llvm/test/Transforms/InstCombine/fpcast.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/freeze.ll19
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-mul-and.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-mul-zext.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/known-bits.ll51
-rw-r--r--llvm/test/Transforms/InstCombine/mul-masked-bits.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/mul.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll21
-rw-r--r--llvm/test/Transforms/InstCombine/powi.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/ptr-int-cast.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/sadd-with-overflow.ll32
-rw-r--r--llvm/test/Transforms/InstCombine/scalarization.ll11
-rw-r--r--llvm/test/Transforms/InstCombine/shift-add.ll29
-rw-r--r--llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/shuffle_select.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/trunc.ll37
-rw-r--r--llvm/test/Transforms/InstCombine/uadd-with-overflow.ll23
-rw-r--r--llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll3
-rw-r--r--llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/update-branch-weights.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll100
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll15
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll63
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll148
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll74
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll49
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll13
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll154
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr81872.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/small-size.ll236
-rw-r--r--llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll261
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll61
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction.ll99
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/pr31483.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll3
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll43
-rw-r--r--llvm/test/Transforms/Reassociate/vaarg_movable.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll29
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll49
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll33
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll263
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll82
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll9
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll95
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll70
-rw-r--r--llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll22
-rw-r--r--llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof16
-rw-r--r--llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll67
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll63
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll7
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/remarks-hotness.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/HoistCode.ll34
-rw-r--r--llvm/test/Transforms/TailCallElim/debugloc.ll4
-rw-r--r--llvm/test/Verifier/tbaa-struct.ll40
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected6
-rw-r--r--llvm/test/tools/dxil-dis/debug-info.ll2
-rw-r--r--llvm/test/tools/llc/new-pm/machine-function-properties.mir12
-rw-r--r--llvm/test/tools/llvm-lib/arm64ec-implib.test106
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s18
-rw-r--r--llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s34
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s38
-rw-r--r--llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s34
-rw-r--r--llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test100
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test4
-rw-r--r--llvm/tools/CMakeLists.txt7
-rw-r--r--llvm/tools/llvm-c-test/debuginfo.c13
-rw-r--r--llvm/tools/llvm-debuginfo-analyzer/README.md170
-rw-r--r--llvm/tools/llvm-debuginfo-analyzer/README.txt221
-rw-r--r--llvm/tools/llvm-dis/llvm-dis.cpp6
-rw-r--r--llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp137
-rw-r--r--llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp37
-rw-r--r--llvm/tools/llvm-exegesis/lib/SubprocessMemory.h5
-rw-r--r--llvm/tools/llvm-link/llvm-link.cpp6
-rw-r--r--llvm/tools/llvm-lto/llvm-lto.cpp4
-rw-r--r--llvm/tools/llvm-lto2/llvm-lto2.cpp4
-rw-r--r--llvm/tools/llvm-mc/llvm-mc.cpp5
-rw-r--r--llvm/tools/llvm-objcopy/ObjcopyOptions.cpp9
-rw-r--r--llvm/tools/llvm-objcopy/ObjcopyOpts.td14
-rw-r--r--llvm/tools/llvm-objdump/llvm-objdump.cpp7
-rw-r--r--llvm/tools/llvm-profdata/llvm-profdata.cpp10
-rw-r--r--llvm/tools/llvm-readobj/DwarfCFIEHPrinter.h9
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp37
-rw-r--r--llvm/tools/obj2yaml/dxcontainer2yaml.cpp3
-rw-r--r--llvm/tools/spirv-tools/CMakeLists.txt4
-rw-r--r--llvm/tools/verify-uselistorder/verify-uselistorder.cpp13
-rw-r--r--llvm/unittests/CodeGen/GlobalISel/CSETest.cpp212
-rw-r--r--llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp94
-rw-r--r--llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp2
-rw-r--r--llvm/unittests/CodeGen/MFCommon.inc5
-rw-r--r--llvm/unittests/IR/ConstantRangeTest.cpp18
-rw-r--r--llvm/unittests/IR/PatternMatch.cpp49
-rw-r--r--llvm/unittests/Linker/LinkModulesTest.cpp2
-rw-r--r--llvm/unittests/ProfileData/InstrProfTest.cpp3
-rw-r--r--llvm/unittests/ProfileData/MemProfTest.cpp7
-rw-r--r--llvm/unittests/Support/ThreadPool.cpp2
-rw-r--r--llvm/unittests/TableGen/CMakeLists.txt4
-rw-r--r--llvm/unittests/TableGen/CodeExpanderTest.cpp4
-rw-r--r--llvm/unittests/TargetParser/TargetParserTest.cpp15
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp2
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanTest.cpp4
-rw-r--r--llvm/unittests/tools/llvm-exegesis/X86/SubprocessMemoryTest.cpp5
-rw-r--r--llvm/utils/TableGen/AsmMatcherEmitter.cpp14
-rw-r--r--llvm/utils/TableGen/AsmWriterEmitter.cpp14
-rw-r--r--llvm/utils/TableGen/Basic/CMakeLists.txt21
-rw-r--r--llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp (renamed from llvm/utils/TableGen/CodeGenIntrinsics.cpp)0
-rw-r--r--llvm/utils/TableGen/Basic/CodeGenIntrinsics.h (renamed from llvm/utils/TableGen/CodeGenIntrinsics.h)0
-rw-r--r--llvm/utils/TableGen/Basic/SDNodeProperties.cpp (renamed from llvm/utils/TableGen/SDNodeProperties.cpp)0
-rw-r--r--llvm/utils/TableGen/Basic/SDNodeProperties.h (renamed from llvm/utils/TableGen/SDNodeProperties.h)0
-rw-r--r--llvm/utils/TableGen/Basic/SequenceToOffsetTable.h (renamed from llvm/utils/TableGen/SequenceToOffsetTable.h)0
-rw-r--r--llvm/utils/TableGen/CMakeLists.txt67
-rw-r--r--llvm/utils/TableGen/CallingConvEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp10
-rw-r--r--llvm/utils/TableGen/CodeGenMapTable.cpp4
-rw-r--r--llvm/utils/TableGen/Common/AsmWriterInst.cpp (renamed from llvm/utils/TableGen/AsmWriterInst.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/AsmWriterInst.h (renamed from llvm/utils/TableGen/AsmWriterInst.h)0
-rw-r--r--llvm/utils/TableGen/Common/CMakeLists.txt51
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp (renamed from llvm/utils/TableGen/CodeGenDAGPatterns.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h (renamed from llvm/utils/TableGen/CodeGenDAGPatterns.h)4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenHwModes.cpp (renamed from llvm/utils/TableGen/CodeGenHwModes.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenHwModes.h (renamed from llvm/utils/TableGen/CodeGenHwModes.h)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.cpp (renamed from llvm/utils/TableGen/CodeGenInstAlias.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstAlias.h (renamed from llvm/utils/TableGen/CodeGenInstAlias.h)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstruction.cpp (renamed from llvm/utils/TableGen/CodeGenInstruction.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenInstruction.h (renamed from llvm/utils/TableGen/CodeGenInstruction.h)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenRegisters.cpp (renamed from llvm/utils/TableGen/CodeGenRegisters.cpp)81
-rw-r--r--llvm/utils/TableGen/Common/CodeGenRegisters.h (renamed from llvm/utils/TableGen/CodeGenRegisters.h)43
-rw-r--r--llvm/utils/TableGen/Common/CodeGenSchedule.cpp (renamed from llvm/utils/TableGen/CodeGenSchedule.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenSchedule.h (renamed from llvm/utils/TableGen/CodeGenSchedule.h)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.cpp (renamed from llvm/utils/TableGen/CodeGenTarget.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.h (renamed from llvm/utils/TableGen/CodeGenTarget.h)2
-rw-r--r--llvm/utils/TableGen/Common/DAGISelMatcher.cpp (renamed from llvm/utils/TableGen/DAGISelMatcher.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/DAGISelMatcher.h (renamed from llvm/utils/TableGen/DAGISelMatcher.h)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.cpp (renamed from llvm/utils/TableGen/GlobalISel/CXXPredicates.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.h (renamed from llvm/utils/TableGen/GlobalISel/CXXPredicates.h)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CodeExpander.cpp (renamed from llvm/utils/TableGen/GlobalISel/CodeExpander.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CodeExpander.h (renamed from llvm/utils/TableGen/GlobalISel/CodeExpander.h)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CodeExpansions.h (renamed from llvm/utils/TableGen/GlobalISel/CodeExpansions.h)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.cpp23
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.h (renamed from llvm/utils/TableGen/GlobalISel/CombinerUtils.h)4
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp (renamed from llvm/utils/TableGen/GlobalISelMatchTable.cpp)31
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h (renamed from llvm/utils/TableGen/GlobalISelMatchTable.h)2
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp (renamed from llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h (renamed from llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.h)2
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.cpp (renamed from llvm/utils/TableGen/GlobalISel/MatchDataInfo.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.h (renamed from llvm/utils/TableGen/GlobalISel/MatchDataInfo.h)0
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp462
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/PatternParser.h118
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp (renamed from llvm/utils/TableGen/GlobalISel/Patterns.cpp)4
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/Patterns.h (renamed from llvm/utils/TableGen/GlobalISel/Patterns.h)0
-rw-r--r--llvm/utils/TableGen/Common/InfoByHwMode.cpp (renamed from llvm/utils/TableGen/InfoByHwMode.cpp)18
-rw-r--r--llvm/utils/TableGen/Common/InfoByHwMode.h (renamed from llvm/utils/TableGen/InfoByHwMode.h)23
-rw-r--r--llvm/utils/TableGen/Common/OptEmitter.cpp (renamed from llvm/utils/TableGen/OptEmitter.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/OptEmitter.h (renamed from llvm/utils/TableGen/OptEmitter.h)0
-rw-r--r--llvm/utils/TableGen/Common/PredicateExpander.cpp (renamed from llvm/utils/TableGen/PredicateExpander.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/PredicateExpander.h (renamed from llvm/utils/TableGen/PredicateExpander.h)0
-rw-r--r--llvm/utils/TableGen/Common/SubtargetFeatureInfo.cpp (renamed from llvm/utils/TableGen/SubtargetFeatureInfo.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/SubtargetFeatureInfo.h (renamed from llvm/utils/TableGen/SubtargetFeatureInfo.h)0
-rw-r--r--llvm/utils/TableGen/Common/Types.cpp (renamed from llvm/utils/TableGen/Types.cpp)0
-rw-r--r--llvm/utils/TableGen/Common/Types.h (renamed from llvm/utils/TableGen/Types.h)0
-rw-r--r--llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp (renamed from llvm/utils/TableGen/VarLenCodeEmitterGen.cpp)15
-rw-r--r--llvm/utils/TableGen/Common/VarLenCodeEmitterGen.h (renamed from llvm/utils/TableGen/VarLenCodeEmitterGen.h)0
-rw-r--r--llvm/utils/TableGen/CompressInstEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/DAGISelEmitter.cpp8
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherEmitter.cpp12
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherGen.cpp14
-rw-r--r--llvm/utils/TableGen/DAGISelMatcherOpt.cpp6
-rw-r--r--llvm/utils/TableGen/DFAEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/DFAPacketizerEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp54
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/DisassemblerEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/FastISelEmitter.cpp10
-rw-r--r--llvm/utils/TableGen/GlobalISel/CMakeLists.txt20
-rw-r--r--llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp504
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp18
-rw-r--r--llvm/utils/TableGen/InstrDocsEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/InstrInfoEmitter.cpp16
-rw-r--r--llvm/utils/TableGen/IntrinsicEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/OptParserEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/OptRSTEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/PseudoLoweringEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/RegisterBankEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/RegisterInfoEmitter.cpp52
-rw-r--r--llvm/utils/TableGen/SearchableTableEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/SubtargetEmitter.cpp8
-rw-r--r--llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp2
-rw-r--r--llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/X86FoldTablesEmitter.cpp4
-rw-r--r--llvm/utils/TableGen/X86MnemonicTables.cpp4
-rw-r--r--llvm/utils/TableGen/X86RecognizableInstr.h2
-rwxr-xr-xllvm/utils/bisect-skip-count2
-rwxr-xr-xllvm/utils/git/code-format-helper.py27
-rwxr-xr-xllvm/utils/git/github-automation.py27
-rw-r--r--llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/test/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn6
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/MCTargetDesc/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/TableGen/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn30
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/Basic/BUILD.gn10
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/Common/BUILD.gn34
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/GlobalISel/BUILD.gn13
-rw-r--r--mlir/docs/DataLayout.md6
-rw-r--r--mlir/examples/transform/Ch4/include/MyExtension.td2
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Matrix.h3
-rw-r--r--mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h21
-rw-r--r--mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h20
-rw-r--r--mlir/include/mlir/Conversion/Passes.h1
-rw-r--r--mlir/include/mlir/Conversion/Passes.td13
-rw-r--r--mlir/include/mlir/Dialect/Arith/IR/Arith.h4
-rw-r--r--mlir/include/mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h20
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h27
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td73
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h4
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td1
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt1
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h42
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h12
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td18
-rw-r--r--mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td2
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td2
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h1
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td57
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td67
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td91
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td4
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td6
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td2
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h12
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.h2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td4
-rw-r--r--mlir/include/mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h20
-rw-r--r--mlir/include/mlir/Dialect/SCF/TransformOps/SCFTransformOps.td23
-rw-r--r--mlir/include/mlir/Dialect/SCF/Utils/Utils.h10
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td16
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h5
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.h12
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.h2
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td27
-rw-r--r--mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.h2
-rw-r--r--mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Transform/IR/CMakeLists.txt4
-rw-r--r--mlir/include/mlir/Dialect/Transform/IR/TransformDialect.h8
-rw-r--r--mlir/include/mlir/Dialect/Transform/IR/TransformOps.h2
-rw-r--r--mlir/include/mlir/Dialect/Transform/IR/TransformOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Transform/Interfaces/CMakeLists.txt6
-rw-r--r--mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.h (renamed from mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.h)2
-rw-r--r--mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.td (renamed from mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.td)0
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h104
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h20
-rw-r--r--mlir/include/mlir/Dialect/Vector/Transforms/VectorTransforms.h6
-rw-r--r--mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h20
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h35
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h8
-rw-r--r--mlir/include/mlir/IR/Builders.h2
-rw-r--r--mlir/include/mlir/IR/Dialect.h4
-rw-r--r--mlir/include/mlir/IR/OpDefinition.h3
-rw-r--r--mlir/include/mlir/IR/OperationSupport.h5
-rw-r--r--mlir/include/mlir/IR/TensorEncoding.td4
-rw-r--r--mlir/include/mlir/InitAllDialects.h6
-rw-r--r--mlir/include/mlir/Interfaces/DataLayoutInterfaces.h13
-rw-r--r--mlir/include/mlir/Interfaces/DataLayoutInterfaces.td28
-rw-r--r--mlir/include/mlir/Interfaces/MemorySlotInterfaces.h3
-rw-r--r--mlir/include/mlir/Interfaces/ValueBoundsOpInterface.h20
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleImport.h8
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h7
-rw-r--r--mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h1
-rw-r--r--mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp52
-rw-r--r--mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp8
-rw-r--r--mlir/lib/Conversion/CMakeLists.txt1
-rw-r--r--mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp50
-rw-r--r--mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp23
-rw-r--r--mlir/lib/Conversion/MemRefToEmitC/CMakeLists.txt18
-rw-r--r--mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp114
-rw-r--r--mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp55
-rw-r--r--mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp52
-rw-r--r--mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp304
-rw-r--r--mlir/lib/Dialect/Affine/Utils/Utils.cpp3
-rw-r--r--mlir/lib/Dialect/Arith/IR/ArithDialect.cpp6
-rw-r--r--mlir/lib/Dialect/Arith/IR/ArithOps.cpp36
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.cpp44
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp58
-rw-r--r--mlir/lib/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp18
-rw-r--r--mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp3
-rw-r--r--mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp80
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp230
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp5
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp116
-rw-r--r--mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp2
-rw-r--r--mlir/lib/Dialect/Complex/IR/ComplexOps.cpp26
-rw-r--r--mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp6
-rw-r--r--mlir/lib/Dialect/EmitC/Transforms/FormExpressions.cpp3
-rw-r--r--mlir/lib/Dialect/Func/IR/FuncOps.cpp4
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp4
-rw-r--r--mlir/lib/Dialect/Index/IR/IndexDialect.cpp2
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp112
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp25
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp4
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp2
-rw-r--r--mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp101
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp12
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp2
-rw-r--r--mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp19
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp303
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp38
-rw-r--r--mlir/lib/Dialect/Math/IR/MathDialect.cpp2
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp4
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp7
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.cpp48
-rw-r--r--mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt2
-rw-r--r--mlir/lib/Dialect/SCF/IR/SCF.cpp2
-rw-r--r--mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp66
-rw-r--r--mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp2
-rw-r--r--mlir/lib/Dialect/SCF/Utils/Utils.cpp99
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp166
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp2
-rw-r--r--mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp9
-rw-r--r--mlir/lib/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.cpp54
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp56
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp4
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp2
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp4
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp101
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp145
-rw-r--r--mlir/lib/Dialect/Tensor/Utils/Utils.cpp8
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp5
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaOps.cpp5
-rw-r--r--mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp73
-rw-r--r--mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp51
-rw-r--r--mlir/lib/Dialect/Transform/IR/CMakeLists.txt5
-rw-r--r--mlir/lib/Dialect/Transform/IR/TransformOps.cpp25
-rw-r--r--mlir/lib/Dialect/Transform/Interfaces/CMakeLists.txt2
-rw-r--r--mlir/lib/Dialect/Transform/Interfaces/MatchInterfaces.cpp (renamed from mlir/lib/Dialect/Transform/IR/MatchInterfaces.cpp)4
-rw-r--r--mlir/lib/Dialect/UB/IR/UBOps.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/IR/CMakeLists.txt2
-rw-r--r--mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp103
-rw-r--r--mlir/lib/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.cpp51
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp4
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp8
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp50
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp16
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp10
-rw-r--r--mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp5
-rw-r--r--mlir/lib/ExecutionEngine/CMakeLists.txt23
-rw-r--r--mlir/lib/ExecutionEngine/CRunnerUtils.cpp16
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp8
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp7
-rw-r--r--mlir/lib/IR/MLIRContext.cpp29
-rw-r--r--mlir/lib/Interfaces/DataLayoutInterfaces.cpp30
-rw-r--r--mlir/lib/Interfaces/ValueBoundsOpInterface.cpp115
-rw-r--r--mlir/lib/Target/LLVMIR/DebugImporter.cpp8
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp374
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp21
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp21
-rw-r--r--mlir/lib/Transforms/InlinerPass.cpp9
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp43
-rw-r--r--mlir/test/Conversion/ArithToEmitC/arith-to-emitc-failed.mlir15
-rw-r--r--mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir51
-rw-r--r--mlir/test/Conversion/ArithToSPIRV/fast-math.mlir14
-rw-r--r--mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir46
-rw-r--r--mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir36
-rw-r--r--mlir/test/Conversion/GPUToSPIRV/load-store.mlir8
-rw-r--r--mlir/test/Conversion/MemRefToEmitC/memref-to-emitc-failed.mlir40
-rw-r--r--mlir/test/Conversion/MemRefToEmitC/memref-to-emitc.mlir28
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir158
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir34
-rw-r--r--mlir/test/Conversion/SCFToSPIRV/for.mlir12
-rw-r--r--mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir10
-rw-r--r--mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir368
-rw-r--r--mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir20
-rw-r--r--mlir/test/Dialect/Arith/expand-ops.mlir84
-rw-r--r--mlir/test/Dialect/Arith/one-shot-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-loops.mlir86
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation-simplification.mlir14
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir6
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis-bottom-up-from-terminators.mlir36
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir6
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir6
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir11
-rw-r--r--mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/Complex/canonicalize.mlir56
-rw-r--r--mlir/test/Dialect/EmitC/transforms.mlir17
-rw-r--r--mlir/test/Dialect/LLVMIR/debuginfo.mlir8
-rw-r--r--mlir/test/Dialect/LLVMIR/layout.mlir30
-rw-r--r--mlir/test/Dialect/LLVMIR/roundtrip.mlir15
-rw-r--r--mlir/test/Dialect/LLVMIR/sroa.mlir103
-rw-r--r--mlir/test/Dialect/LLVMIR/type-consistency.mlir140
-rw-r--r--mlir/test/Dialect/Linalg/data-layout-propagation.mlir160
-rw-r--r--mlir/test/Dialect/Linalg/flatten-elementwise.mlir21
-rw-r--r--mlir/test/Dialect/Linalg/flatten-unsupported.mlir33
-rw-r--r--mlir/test/Dialect/Linalg/one-shot-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/Linalg/tile-to-forall.mlir141
-rw-r--r--mlir/test/Dialect/SCF/for-loop-peeling-front.mlir47
-rw-r--r--mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir6
-rw-r--r--mlir/test/Dialect/SCF/one-shot-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir251
-rw-r--r--mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir352
-rw-r--r--mlir/test/Dialect/SPIRV/Transforms/webgpu-prepare.mlir32
-rw-r--r--mlir/test/Dialect/SparseTensor/no_fold_into_consumer.mlir47
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir23
-rw-r--r--mlir/test/Dialect/Tensor/drop-redundant-insert-slice-rank-expansion.mlir65
-rw-r--r--mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir139
-rw-r--r--mlir/test/Dialect/Tensor/one-shot-bufferize.mlir6
-rw-r--r--mlir/test/Dialect/Tosa/canonicalize.mlir8
-rw-r--r--mlir/test/Dialect/Tosa/constant-op-fold.mlir17
-rw-r--r--mlir/test/Dialect/Tosa/level_check.mlir16
-rw-r--r--mlir/test/Dialect/Transform/foreach-match.mlir80
-rw-r--r--mlir/test/Dialect/Transform/ops-invalid.mlir11
-rw-r--r--mlir/test/Dialect/Vector/linearize.mlir190
-rw-r--r--mlir/test/Dialect/Vector/test-scalable-bounds.mlir161
-rw-r--r--mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir142
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir18
-rw-r--r--mlir/test/IR/greedy-pattern-rewrite-driver-bottom-up.mlir (renamed from mlir/test/IR/greedy-pattern-rewriter-driver.mlir)0
-rw-r--r--mlir/test/IR/greedy-pattern-rewrite-driver-top-down.mlir58
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir60
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir2
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/pack-unpack-mmt4d.mlir173
-rw-r--r--mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir3
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir3
-rwxr-xr-xmlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir57
-rw-r--r--mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir30
-rw-r--r--mlir/test/Interfaces/DataLayoutInterfaces/module.mlir4
-rw-r--r--mlir/test/Interfaces/DataLayoutInterfaces/query.mlir51
-rw-r--r--mlir/test/Interfaces/DataLayoutInterfaces/types.mlir1
-rw-r--r--mlir/test/Target/LLVMIR/Import/basic.ll14
-rw-r--r--mlir/test/Target/LLVMIR/Import/debug-info.ll3
-rw-r--r--mlir/test/Target/LLVMIR/Import/intrinsic.ll31
-rw-r--r--mlir/test/Target/LLVMIR/data-layout.mlir2
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-debug.mlir4
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir31
-rw-r--r--mlir/test/Target/LLVMIR/llvmir.mlir8
-rw-r--r--mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir11
-rw-r--r--mlir/test/Target/LLVMIR/omptarget-llvm.mlir25
-rw-r--r--mlir/test/Transforms/canonicalize.mlir9
-rw-r--r--mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp38
-rw-r--r--mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp12
-rw-r--r--mlir/test/lib/Dialect/Test/TestTypeDefs.td3
-rw-r--r--mlir/test/lib/Dialect/Test/TestTypes.cpp8
-rw-r--r--mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.h2
-rw-r--r--mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td2
-rw-r--r--mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp4
-rw-r--r--mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir18
-rw-r--r--mlir/test/mlir-opt/split-markers.mlir32
-rw-r--r--mlir/test/mlir-pdll/split-markers.pdll12
-rw-r--r--mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp4
-rw-r--r--mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp2
-rw-r--r--mlir/unittests/IR/InterfaceAttachmentTest.cpp4
-rw-r--r--mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp7
-rw-r--r--openmp/libomptarget/CMakeLists.txt3
-rw-r--r--openmp/libomptarget/DeviceRTL/src/Debug.cpp4
-rw-r--r--openmp/libomptarget/include/PluginManager.h49
-rw-r--r--openmp/libomptarget/plugins-nextgen/CMakeLists.txt58
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt80
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp85
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt56
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/OMPT/CMakeLists.txt70
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/include/PluginInterface.h202
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/include/RPC.h7
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp476
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp44
-rw-r--r--openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt40
-rw-r--r--openmp/libomptarget/plugins-nextgen/cuda/src/rtl.cpp31
-rw-r--r--openmp/libomptarget/plugins-nextgen/host/CMakeLists.txt36
-rw-r--r--openmp/libomptarget/plugins-nextgen/host/dynamic_ffi/ffi.cpp16
-rw-r--r--openmp/libomptarget/plugins-nextgen/host/src/rtl.cpp36
-rw-r--r--openmp/libomptarget/src/PluginManager.cpp59
-rw-r--r--openmp/libomptarget/src/device.cpp61
-rw-r--r--openmp/libomptarget/src/interface.cpp6
-rw-r--r--openmp/libomptarget/src/omptarget.cpp14
-rw-r--r--openmp/libomptarget/test/offloading/d2d_memcpy_sync.c72
-rw-r--r--openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-2.f9039
-rw-r--r--openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-bounds.f9044
-rw-r--r--openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-scalar.f9033
-rw-r--r--openmp/runtime/src/kmp.h5
-rw-r--r--openmp/runtime/src/kmp_affinity.cpp130
-rw-r--r--openmp/runtime/src/kmp_affinity.h74
-rw-r--r--openmp/runtime/src/kmp_collapse.cpp15
-rw-r--r--openmp/runtime/src/kmp_csupport.cpp5
-rw-r--r--openmp/runtime/src/kmp_os.h2
-rw-r--r--openmp/runtime/src/kmp_runtime.cpp18
-rw-r--r--openmp/runtime/src/kmp_taskdeps.cpp6
-rw-r--r--openmp/runtime/src/z_Linux_util.cpp39
-rw-r--r--openmp/runtime/test/lit.cfg2
-rw-r--r--openmp/runtime/test/tasking/hidden_helper_task/issue-87117.c36
-rw-r--r--openmp/runtime/test/worksharing/for/collapse_test.inc201
-rw-r--r--openmp/runtime/test/worksharing/for/omp_collapse_many_GELTGT_int.c65
-rw-r--r--openmp/runtime/test/worksharing/for/omp_collapse_many_GTGEGT_int.c71
-rw-r--r--openmp/runtime/test/worksharing/for/omp_collapse_many_LTLEGE_int.c66
-rw-r--r--openmp/runtime/test/worksharing/for/omp_collapse_many_int.c73
-rw-r--r--openmp/runtime/test/worksharing/for/omp_collapse_one_int.c32
-rw-r--r--polly/include/polly/Support/PollyDebug.h38
-rw-r--r--polly/lib/Analysis/DependenceInfo.cpp39
-rw-r--r--polly/lib/Analysis/PolyhedralInfo.cpp15
-rw-r--r--polly/lib/Analysis/PruneUnprofitable.cpp5
-rw-r--r--polly/lib/Analysis/ScopBuilder.cpp36
-rw-r--r--polly/lib/Analysis/ScopDetection.cpp27
-rw-r--r--polly/lib/Analysis/ScopInfo.cpp3
-rw-r--r--polly/lib/CMakeLists.txt1
-rw-r--r--polly/lib/CodeGen/CodeGeneration.cpp5
-rw-r--r--polly/lib/CodeGen/IslAst.cpp7
-rw-r--r--polly/lib/Support/PollyDebug.cpp27
-rw-r--r--polly/lib/Support/SCEVValidator.cpp34
-rw-r--r--polly/lib/Transform/DeLICM.cpp93
-rw-r--r--polly/lib/Transform/FlattenAlgo.cpp37
-rw-r--r--polly/lib/Transform/FlattenSchedule.cpp17
-rw-r--r--polly/lib/Transform/ForwardOpTree.cpp56
-rw-r--r--polly/lib/Transform/ManualOptimizer.cpp9
-rw-r--r--polly/lib/Transform/MatmulOptimizer.cpp5
-rw-r--r--polly/lib/Transform/ScheduleOptimizer.cpp31
-rw-r--r--polly/lib/Transform/ScheduleTreeTransform.cpp11
-rw-r--r--polly/lib/Transform/ScopInliner.cpp13
-rw-r--r--polly/lib/Transform/Simplify.cpp47
-rw-r--r--polly/lib/Transform/ZoneAlgo.cpp7
-rw-r--r--polly/test/Support/pollyDebug.ll85
-rw-r--r--utils/bazel/.bazelrc4
-rw-r--r--utils/bazel/llvm-project-overlay/bolt/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/clang-tools-extra/clang-tidy/defs.bzl4
-rw-r--r--utils/bazel/llvm-project-overlay/clang/BUILD.bazel23
-rw-r--r--utils/bazel/llvm-project-overlay/compiler-rt/BUILD.bazel4
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel34
-rw-r--r--utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl9
-rw-r--r--utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/lld/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel492
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/driver.bzl182
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/lit_test.bzl1
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel5
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel865
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch4/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch5/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch6/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch7/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel38
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel43
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/unittests/BUILD.bazel39
-rw-r--r--utils/bazel/third_party_build/pfm.BUILD10
3004 files changed, 124091 insertions, 43837 deletions
diff --git a/.ci/monolithic-linux.sh b/.ci/monolithic-linux.sh
index 9e670c447fba..b347c443da67 100755
--- a/.ci/monolithic-linux.sh
+++ b/.ci/monolithic-linux.sh
@@ -18,7 +18,7 @@ set -o pipefail
MONOREPO_ROOT="${MONOREPO_ROOT:="$(git rev-parse --show-toplevel)"}"
BUILD_DIR="${BUILD_DIR:=${MONOREPO_ROOT}/build}"
-rm -rf ${BUILD_DIR}
+rm -rf "${BUILD_DIR}"
ccache --zero-stats
@@ -37,8 +37,8 @@ projects="${1}"
targets="${2}"
echo "--- cmake"
-pip install -q -r ${MONOREPO_ROOT}/mlir/python/requirements.txt
-cmake -S ${MONOREPO_ROOT}/llvm -B ${BUILD_DIR} \
+pip install -q -r "${MONOREPO_ROOT}"/mlir/python/requirements.txt
+cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
-D LLVM_ENABLE_PROJECTS="${projects}" \
-G Ninja \
-D CMAKE_BUILD_TYPE=Release \
diff --git a/.ci/monolithic-windows.sh b/.ci/monolithic-windows.sh
index 52ba13036f91..4fd88ea81c84 100755
--- a/.ci/monolithic-windows.sh
+++ b/.ci/monolithic-windows.sh
@@ -19,7 +19,7 @@ set -o pipefail
MONOREPO_ROOT="${MONOREPO_ROOT:="$(git rev-parse --show-toplevel)"}"
BUILD_DIR="${BUILD_DIR:=${MONOREPO_ROOT}/build}"
-rm -rf ${BUILD_DIR}
+rm -rf "${BUILD_DIR}"
if [[ -n "${CLEAR_CACHE:-}" ]]; then
echo "clearing sccache"
@@ -37,14 +37,14 @@ projects="${1}"
targets="${2}"
echo "--- cmake"
-pip install -q -r ${MONOREPO_ROOT}/mlir/python/requirements.txt
+pip install -q -r "${MONOREPO_ROOT}"/mlir/python/requirements.txt
# The CMAKE_*_LINKER_FLAGS to disable the manifest come from research
# on fixing a build reliability issue on the build server, please
# see https://github.com/llvm/llvm-project/pull/82393 and
# https://discourse.llvm.org/t/rfc-future-of-windows-pre-commit-ci/76840/40
# for further information.
-cmake -S ${MONOREPO_ROOT}/llvm -B ${BUILD_DIR} \
+cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
-D LLVM_ENABLE_PROJECTS="${projects}" \
-G Ninja \
-D CMAKE_BUILD_TYPE=Release \
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index c246c42b0904..77ba81c58c5d 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -59,8 +59,8 @@ clang/test/AST/Interp/ @tbaederr
/mlir/Dialect/*/Transforms/Bufferize.cpp @matthias-springer
# Linalg Dialect in MLIR.
-/mlir/include/mlir/Dialect/Linalg @dcaballe @nicolasvasilache
-/mlir/lib/Dialect/Linalg @dcaballe @nicolasvasilache
+/mlir/include/mlir/Dialect/Linalg/* @dcaballe @nicolasvasilache
+/mlir/lib/Dialect/Linalg/* @dcaballe @nicolasvasilache
/mlir/lib/Dialect/Linalg/Transforms/DecomposeLinalgOps.cpp @MaheshRavishankar @nicolasvasilache
/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @MaheshRavishankar @nicolasvasilache
/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @MaheshRavishankar @nicolasvasilache
@@ -77,14 +77,14 @@ clang/test/AST/Interp/ @tbaederr
/mlir/**/*SME* @banach-space @dcaballe @nicolasvasilache
/mlir/**/*SVE* @banach-space @dcaballe @nicolasvasilache
/mlir/**/*VectorInterfaces* @dcaballe @nicolasvasilache
-/mlir/**/*VectorToSCF* @banach-space @dcaballe @nicolasvasilache @matthias-springer
+/mlir/**/*VectorToSCF* @banach-space @dcaballe @matthias-springer @nicolasvasilache
/mlir/**/*VectorToLLVM* @banach-space @dcaballe @nicolasvasilache
/mlir/**/*X86Vector* @aartbik @dcaballe @nicolasvasilache
-/mlir/include/mlir/Dialect/Vector @dcaballe @nicolasvasilache
-/mlir/lib/Dialect/Vector @dcaballe @nicolasvasilache
-/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @MaheshRavishankar @nicolasvasilache
-/mlir/**/*EmulateNarrowType* @hanhanW
+/mlir/include/mlir/Dialect/Vector/* @dcaballe @nicolasvasilache
+/mlir/lib/Dialect/Vector/* @dcaballe @nicolasvasilache
/mlir/lib/Dialect/Vector/Transforms/* @hanhanW @nicolasvasilache
+/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @MaheshRavishankar @nicolasvasilache
+/mlir/**/*EmulateNarrowType* @dcaballe @hanhanW
# Presburger library in MLIR
/mlir/**/*Presburger* @Groverkss @Superty
@@ -96,6 +96,7 @@ clang/test/AST/Interp/ @tbaederr
# Transform Dialect in MLIR.
/mlir/include/mlir/Dialect/Transform/* @ftynse @nicolasvasilache
/mlir/lib/Dialect/Transform/* @ftynse @nicolasvasilache
+/mlir/**/*TransformOps* @ftynse @nicolasvasilache
# SPIR-V Dialect in MLIR.
/mlir/**/SPIRV/ @antiagainst @kuhar
@@ -119,3 +120,8 @@ clang/test/AST/Interp/ @tbaederr
# Bazel build system.
/utils/bazel/ @rupprecht
+
+# InstallAPI and TextAPI
+/llvm/**/TextAPI/ @cyndyishida
+/clang/**/InstallAPI/ @cyndyishida
+/clang/tools/clang-installapi/ @cyndyishida
diff --git a/.github/workflows/issue-write.yml b/.github/workflows/issue-write.yml
new file mode 100644
index 000000000000..02a5f7c213e8
--- /dev/null
+++ b/.github/workflows/issue-write.yml
@@ -0,0 +1,128 @@
+name: Comment on an issue
+
+on:
+ workflow_run:
+ workflows: ["Check code formatting"]
+ types:
+ - completed
+
+permissions:
+ contents: read
+
+jobs:
+ pr-comment:
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ if: >
+ github.event.workflow_run.event == 'pull_request'
+ steps:
+ - name: 'Download artifact'
+ uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1
+ with:
+ github-token: ${{ secrets.ISSUE_WRITE_DOWNLOAD_ARTIFACT }}
+ run-id: ${{ github.event.workflow_run.id }}
+ name: workflow-args
+
+ - name: 'Comment on PR'
+ uses: actions/github-script@v3
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ var fs = require('fs');
+ const comments = JSON.parse(fs.readFileSync('./comments'));
+ if (!comments) {
+ return;
+ }
+
+ let runInfo = await github.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: context.payload.workflow_run.id
+ });
+
+ console.log(runInfo);
+
+
+ // Query to find the number of the pull request that triggered this job.
+ // The associated pull requests are based off of the branch name, so if
+ // you create a pull request for a branch, close it, and then create
+ // another pull request with the same branch, then this query will return
+ // two associated pull requests. This is why we have to fetch all the
+ // associated pull requests and then iterate through them to find the
+ // one that is open.
+ const gql_query = `
+ query($repo_owner : String!, $repo_name : String!, $branch: String!) {
+ repository(owner: $repo_owner, name: $repo_name) {
+ ref (qualifiedName: $branch) {
+ associatedPullRequests(first: 100) {
+ nodes {
+ baseRepository {
+ owner {
+ login
+ }
+ }
+ number
+ state
+ }
+ }
+ }
+ }
+ }
+ `
+ const gql_variables = {
+ repo_owner: runInfo.data.head_repository.owner.login,
+ repo_name: runInfo.data.head_repository.name,
+ branch: runInfo.data.head_branch
+ }
+ const gql_result = await github.graphql(gql_query, gql_variables);
+ console.log(gql_result);
+ console.log(gql_result.repository.ref.associatedPullRequests.nodes);
+
+ var pr_number = 0;
+ gql_result.repository.ref.associatedPullRequests.nodes.forEach((pr) => {
+ if (pr.baseRepository.owner.login = context.repo.owner && pr.state == 'OPEN') {
+ pr_number = pr.number;
+ }
+ });
+ if (pr_number == 0) {
+ console.log("Error retrieving pull request number");
+ return;
+ }
+
+ await comments.forEach(function (comment) {
+ if (comment.id) {
+ // Security check: Ensure that this comment was created by
+ // the github-actions bot, so a malicious input won't overwrite
+ // a user's comment.
+ github.issues.getComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: comment.id
+ }).then((old_comment) => {
+ console.log(old_comment);
+ if (old_comment.data.user.login != "github-actions[bot]") {
+ console.log("Invalid comment id: " + comment.id);
+ return;
+ }
+ github.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: pr_number,
+ comment_id: comment.id,
+ body: comment.body
+ });
+ });
+ } else {
+ github.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: pr_number,
+ body: comment.body
+ });
+ }
+ });
+
+ - name: Dump comments file
+ if: always()
+ run: cat comments
diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml
index 1d1fa2483b65..54dfe3aadbb4 100644
--- a/.github/workflows/pr-code-format.yml
+++ b/.github/workflows/pr-code-format.yml
@@ -1,12 +1,9 @@
name: "Check code formatting"
on:
- pull_request_target:
+ pull_request:
branches:
- main
-permissions:
- pull-requests: write
-
jobs:
code_formatter:
runs-on: ubuntu-latest
@@ -31,12 +28,13 @@ jobs:
separator: ","
skip_initial_fetch: true
- # We need to make sure that we aren't executing/using any code from the
- # PR for security reasons as we're using pull_request_target. Checkout
- # the target branch with the necessary files.
+ # We need to pull the script from the main branch, so that we ensure
+ # we get the latest version of this script.
- name: Fetch code formatting utils
uses: actions/checkout@v4
with:
+ reository: ${{ github.repository }}
+ ref: ${{ github.base_ref }}
sparse-checkout: |
llvm/utils/git/requirements_formatting.txt
llvm/utils/git/code-format-helper.py
@@ -75,10 +73,20 @@ jobs:
# to take advantage of the new --diff_from_common_commit option
# explicitly in code-format-helper.py and not have to diff starting at
# the merge base.
+ # Create an empty comments file so the pr-write job doesn't fail.
run: |
+ echo "[]" > comments &&
python ./code-format-tools/llvm/utils/git/code-format-helper.py \
+ --write-comment-to-file \
--token ${{ secrets.GITHUB_TOKEN }} \
--issue-number $GITHUB_PR_NUMBER \
--start-rev $(git merge-base $START_REV $END_REV) \
--end-rev $END_REV \
--changed-files "$CHANGED_FILES"
+
+ - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0
+ if: always()
+ with:
+ name: workflow-args
+ path: |
+ comments
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
index b8e8ab26c3ff..ff61cf83a6af 100644
--- a/.github/workflows/scorecard.yml
+++ b/.github/workflows/scorecard.yml
@@ -36,7 +36,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
+ uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
with:
results_file: results.sarif
results_format: sarif
diff --git a/bolt/docs/BAT.md b/bolt/docs/BAT.md
index 060fc632f686..f23ef1abf876 100644
--- a/bolt/docs/BAT.md
+++ b/bolt/docs/BAT.md
@@ -14,9 +14,8 @@ binary onto the original binary.
# Usage
`--enable-bat` flag controls the generation of BAT section. Sampled profile
needs to be passed along with the optimized binary containing BAT section to
-`perf2bolt` which reads BAT section and produces fdata profile for the original
-binary. Note that YAML profile generation is not supported since BAT doesn't
-contain the metadata for input functions.
+`perf2bolt` which reads BAT section and produces profile for the original
+binary.
# Internals
## Section contents
@@ -43,21 +42,21 @@ and [BoltAddressTranslation.cpp](/bolt/lib/Profile/BoltAddressTranslation.cpp).
### Layout
The general layout is as follows:
```
-Hot functions table header
-|------------------|
-| Function entry |
-| |--------------| |
-| | OutOff InOff | |
-| |--------------| |
-~~~~~~~~~~~~~~~~~~~~
+Hot functions table
+Cold functions table
-Cold functions table header
+Functions table:
|------------------|
| Function entry |
-| |--------------| |
-| | OutOff InOff | |
-| |--------------| |
-~~~~~~~~~~~~~~~~~~~~
+| |
+| Address |
+| translation |
+| table |
+| |
+| Secondary entry |
+| points |
+|------------------|
+
```
### Functions table
@@ -75,30 +74,43 @@ internal offsets, and between hot and cold fragments, to better spread deltas
and save space.
Hot indices are delta encoded, implicitly starting at zero.
-| Entry | Encoding | Description |
-| ------ | ------| ----------- |
-| `Address` | Continuous, Delta, ULEB128 | Function address in the output binary |
-| `HotIndex` | Delta, ULEB128 | Cold functions only: index of corresponding hot function in hot functions table |
-| `FuncHash` | 8b | Hot functions only: function hash for input function |
-| `NumEntries` | ULEB128 | Number of address translation entries for a function |
-| `EqualElems` | ULEB128 | Hot functions only: number of equal offsets in the beginning of a function |
-| `BranchEntries` | Bitmask, `alignTo(EqualElems, 8)` bits | Hot functions only: if `EqualElems` is non-zero, bitmask denoting entries with `BRANCHENTRY` bit |
-
-Function header is followed by `EqualElems` offsets (hot functions only) and
-`NumEntries-EqualElems` (`NumEntries` for cold functions) pairs of offsets for
-current function.
+| Entry | Encoding | Description | Hot/Cold |
+| ------ | ------| ----------- | ------ |
+| `Address` | Continuous, Delta, ULEB128 | Function address in the output binary | Both |
+| `HotIndex` | Delta, ULEB128 | Index of corresponding hot function in hot functions table | Cold |
+| `FuncHash` | 8b | Function hash for input function | Hot |
+| `NumBlocks` | ULEB128 | Number of basic blocks in the original function | Hot |
+| `NumSecEntryPoints` | ULEB128 | Number of secondary entry points in the original function | Hot |
+| `NumEntries` | ULEB128 | Number of address translation entries for a function | Both |
+| `EqualElems` | ULEB128 | Number of equal offsets in the beginning of a function | Hot |
+| `BranchEntries` | Bitmask, `alignTo(EqualElems, 8)` bits | If `EqualElems` is non-zero, bitmask denoting entries with `BRANCHENTRY` bit | Hot |
+
+Function header is followed by *Address Translation Table* with `NumEntries`
+total entries, and *Secondary Entry Points* table with `NumSecEntryPoints`
+entries (hot functions only).
### Address translation table
Delta encoding means that only the difference with the previous corresponding
entry is encoded. Input offsets implicitly start at zero.
-| Entry | Encoding | Description |
-| ------ | ------| ----------- |
-| `OutputOffset` | Continuous, Delta, ULEB128 | Function offset in output binary |
-| `InputOffset` | Optional, Delta, SLEB128 | Function offset in input binary with `BRANCHENTRY` LSB bit |
-| `BBHash` | Optional, 8b | Basic block entries only: basic block hash in input binary |
+| Entry | Encoding | Description | Branch/BB |
+| ------ | ------| ----------- | ------ |
+| `OutputOffset` | Continuous, Delta, ULEB128 | Function offset in output binary | Both |
+| `InputOffset` | Optional, Delta, SLEB128 | Function offset in input binary with `BRANCHENTRY` LSB bit | Both |
+| `BBHash` | Optional, 8b | Basic block hash in input binary | BB |
+| `BBIdx` | Optional, Delta, ULEB128 | Basic block index in input binary | BB |
+
+For hot fragments, the table omits the first `EqualElems` input offsets
+where the input offset equals output offset.
`BRANCHENTRY` bit denotes whether a given offset pair is a control flow source
(branch or call instruction). If not set, it signifies a control flow target
(basic block offset).
`InputAddr` is omitted for equal offsets in input and output function. In this
case, `BRANCHENTRY` bits are encoded separately in a `BranchEntries` bitvector.
+
+### Secondary Entry Points table
+The table is emitted for hot fragments only. It contains `NumSecEntryPoints`
+offsets denoting secondary entry points, delta encoded, implicitly starting at zero.
+| Entry | Encoding | Description |
+| ----- | -------- | ----------- |
+| `SecEntryPoint` | Delta, ULEB128 | Secondary entry point offset |
diff --git a/bolt/include/bolt/Core/AddressMap.h b/bolt/include/bolt/Core/AddressMap.h
index 85a9ab4473aa..31ed7d40ee7f 100644
--- a/bolt/include/bolt/Core/AddressMap.h
+++ b/bolt/include/bolt/Core/AddressMap.h
@@ -14,7 +14,6 @@
#ifndef BOLT_CORE_ADDRESS_MAP_H
#define BOLT_CORE_ADDRESS_MAP_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCSymbol.h"
#include <optional>
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 741b1a36af86..8b1af9e81539 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -265,7 +265,8 @@ class BinaryContext {
public:
static Expected<std::unique_ptr<BinaryContext>>
- createBinaryContext(const ObjectFile *File, bool IsPIC,
+ createBinaryContext(Triple TheTriple, StringRef InputFileName,
+ SubtargetFeatures *Features, bool IsPIC,
std::unique_ptr<DWARFContext> DwCtx,
JournalingStreams Logger);
diff --git a/bolt/include/bolt/Core/BinaryData.h b/bolt/include/bolt/Core/BinaryData.h
index 5f1efda78190..495163f1b61a 100644
--- a/bolt/include/bolt/Core/BinaryData.h
+++ b/bolt/include/bolt/Core/BinaryData.h
@@ -18,7 +18,6 @@
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <string>
#include <vector>
diff --git a/bolt/include/bolt/Core/BinaryDomTree.h b/bolt/include/bolt/Core/BinaryDomTree.h
index a9565795f946..de27aa78769d 100644
--- a/bolt/include/bolt/Core/BinaryDomTree.h
+++ b/bolt/include/bolt/Core/BinaryDomTree.h
@@ -16,7 +16,6 @@
#include "bolt/Core/BinaryBasicBlock.h"
#include "llvm/IR/Dominators.h"
-#include "llvm/Support/GenericDomTreeConstruction.h"
namespace llvm {
namespace bolt {
diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h
index c170fa6397cc..5089f8491280 100644
--- a/bolt/include/bolt/Core/BinaryFunction.h
+++ b/bolt/include/bolt/Core/BinaryFunction.h
@@ -27,6 +27,7 @@
#include "bolt/Core/BinaryBasicBlock.h"
#include "bolt/Core/BinaryContext.h"
+#include "bolt/Core/BinaryDomTree.h"
#include "bolt/Core/BinaryLoop.h"
#include "bolt/Core/BinarySection.h"
#include "bolt/Core/DebugData.h"
@@ -51,7 +52,6 @@
#include <iterator>
#include <limits>
#include <unordered_map>
-#include <unordered_set>
#include <utility>
#include <vector>
@@ -266,6 +266,7 @@ private:
BinaryContext &BC;
std::unique_ptr<BinaryLoopInfo> BLI;
+ std::unique_ptr<BinaryDominatorTree> BDT;
/// All labels in the function that are referenced via relocations from
/// data objects. Typically these are jump table destinations and computed
@@ -838,6 +839,14 @@ public:
/// stats.
void calculateMacroOpFusionStats();
+ /// Returns if BinaryDominatorTree has been constructed for this function.
+ bool hasDomTree() const { return BDT != nullptr; }
+
+ BinaryDominatorTree &getDomTree() { return *BDT.get(); }
+
+ /// Constructs DomTree for this function.
+ void constructDomTree();
+
/// Returns if loop detection has been run for this function.
bool hasLoopInfo() const { return BLI != nullptr; }
diff --git a/bolt/include/bolt/Core/BinaryLoop.h b/bolt/include/bolt/Core/BinaryLoop.h
index 72dce77df8c1..b425c75715d8 100644
--- a/bolt/include/bolt/Core/BinaryLoop.h
+++ b/bolt/include/bolt/Core/BinaryLoop.h
@@ -15,7 +15,7 @@
#ifndef BOLT_CORE_BINARY_LOOP_H
#define BOLT_CORE_BINARY_LOOP_H
-#include "llvm/Support/GenericLoopInfoImpl.h"
+#include "llvm/Support/GenericLoopInfo.h"
namespace llvm {
namespace bolt {
diff --git a/bolt/include/bolt/Core/BinarySection.h b/bolt/include/bolt/Core/BinarySection.h
index 0f179877bd3d..5b7a5b08820e 100644
--- a/bolt/include/bolt/Core/BinarySection.h
+++ b/bolt/include/bolt/Core/BinarySection.h
@@ -18,7 +18,6 @@
#include "bolt/Core/DebugData.h"
#include "bolt/Core/Relocation.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/MachO.h"
diff --git a/bolt/include/bolt/Core/DebugData.h b/bolt/include/bolt/Core/DebugData.h
index 7d10b208dc83..166bb3617e57 100644
--- a/bolt/include/bolt/Core/DebugData.h
+++ b/bolt/include/bolt/Core/DebugData.h
@@ -27,7 +27,6 @@
#include <mutex>
#include <string>
#include <unordered_map>
-#include <unordered_set>
#include <utility>
#include <vector>
diff --git a/bolt/include/bolt/Core/DebugNames.h b/bolt/include/bolt/Core/DebugNames.h
index 1f17f1ae4d13..a4fdde7c396a 100644
--- a/bolt/include/bolt/Core/DebugNames.h
+++ b/bolt/include/bolt/Core/DebugNames.h
@@ -14,7 +14,7 @@
#ifndef BOLT_CORE_DEBUG_NAMES_H
#define BOLT_CORE_DEBUG_NAMES_H
-#include "DebugData.h"
+#include "bolt/Core/DebugData.h"
#include "llvm/CodeGen/AccelTable.h"
namespace llvm {
@@ -68,6 +68,16 @@ public:
std::unique_ptr<DebugBufferVector> releaseBuffer() {
return std::move(FullTableBuffer);
}
+ /// Adds a DIE that is referenced across CUs.
+ void addCrossCUDie(const DIE *Die) {
+ CrossCUDies.insert({Die->getOffset(), Die});
+ }
+ /// Returns true if the DIE can generate an entry for a cross cu reference.
+ /// This only checks TAGs of a DIE because when this is invoked DIE might not
+ /// be fully constructed.
+ bool canGenerateEntryWithCrossCUReference(
+ const DWARFUnit &Unit, const DIE &Die,
+ const DWARFAbbreviationDeclaration::AttributeSpec &AttrSpec);
private:
BinaryContext &BC;
@@ -128,6 +138,7 @@ private:
llvm::DenseMap<uint64_t, uint32_t> CUOffsetsToPatch;
// Contains a map of Entry ID to Entry relative offset.
llvm::DenseMap<uint64_t, uint32_t> EntryRelativeOffsets;
+ llvm::DenseMap<uint64_t, const DIE *> CrossCUDies;
/// Adds Unit to either CUList, LocalTUList or ForeignTUList.
/// Input Unit being processed, and DWO ID if Unit is being processed comes
/// from a DWO section.
diff --git a/bolt/include/bolt/Core/FunctionLayout.h b/bolt/include/bolt/Core/FunctionLayout.h
index 2e4c184ba451..b685a99c79c1 100644
--- a/bolt/include/bolt/Core/FunctionLayout.h
+++ b/bolt/include/bolt/Core/FunctionLayout.h
@@ -25,7 +25,6 @@
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
-#include <utility>
namespace llvm {
namespace bolt {
diff --git a/bolt/include/bolt/Core/MCPlus.h b/bolt/include/bolt/Core/MCPlus.h
index b6a9e73f2347..601d70971286 100644
--- a/bolt/include/bolt/Core/MCPlus.h
+++ b/bolt/include/bolt/Core/MCPlus.h
@@ -14,10 +14,8 @@
#ifndef BOLT_CORE_MCPLUS_H
#define BOLT_CORE_MCPLUS_H
-#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Support/Casting.h"
#include <vector>
namespace llvm {
@@ -73,6 +71,7 @@ public:
kOffset, /// Offset in the function.
kLabel, /// MCSymbol pointing to this instruction.
kSize, /// Size of the instruction.
+ kDynamicBranch, /// Jit instruction patched at runtime.
kGeneric /// First generic annotation.
};
diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h
index 96b58f541623..f7614cf9ac97 100644
--- a/bolt/include/bolt/Core/MCPlusBuilder.h
+++ b/bolt/include/bolt/Core/MCPlusBuilder.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCDisassembler/MCSymbolizer.h"
#include "llvm/MC/MCExpr.h"
@@ -27,6 +28,7 @@
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/RWMutex.h"
@@ -533,9 +535,7 @@ public:
return Analysis->isReturn(Inst);
}
- virtual bool isTerminator(const MCInst &Inst) const {
- return Analysis->isTerminator(Inst);
- }
+ virtual bool isTerminator(const MCInst &Inst) const;
virtual bool isNoop(const MCInst &Inst) const {
llvm_unreachable("not implemented");
@@ -1199,6 +1199,16 @@ public:
/// Set instruction size.
void setSize(MCInst &Inst, uint32_t Size) const;
+ /// Check if the branch instruction could be modified at runtime.
+ bool isDynamicBranch(const MCInst &Inst) const;
+
+ /// Return ID for runtime-modifiable instruction.
+ std::optional<uint32_t> getDynamicBranchID(const MCInst &Inst) const;
+
+ /// Mark instruction as a dynamic branch, i.e. a branch that can be
+ /// overwritten at runtime.
+ void setDynamicBranch(MCInst &Inst, uint32_t ID) const;
+
/// Return MCSymbol that represents a target of this instruction at a given
/// operand number \p OpNum. If there's no symbol associated with
/// the operand - return nullptr.
@@ -1688,6 +1698,13 @@ public:
llvm_unreachable("not implemented");
}
+ /// Create long conditional branch with a target-specific conditional code
+ /// \p CC.
+ virtual void createLongCondBranch(MCInst &Inst, const MCSymbol *Target,
+ unsigned CC, MCContext *Ctx) const {
+ llvm_unreachable("not implemented");
+ }
+
/// Reverses the branch condition in Inst and update its taken target to TBB.
///
/// Returns true on success.
diff --git a/bolt/include/bolt/Passes/BinaryPasses.h b/bolt/include/bolt/Passes/BinaryPasses.h
index 046765b16f19..8d89ef8b5484 100644
--- a/bolt/include/bolt/Passes/BinaryPasses.h
+++ b/bolt/include/bolt/Passes/BinaryPasses.h
@@ -18,7 +18,6 @@
#include "bolt/Core/DynoStats.h"
#include "llvm/Support/CommandLine.h"
#include <atomic>
-#include <map>
#include <set>
#include <string>
#include <unordered_set>
diff --git a/bolt/include/bolt/Passes/CacheMetrics.h b/bolt/include/bolt/Passes/CacheMetrics.h
index 5c88d98c76c1..ea56d330446b 100644
--- a/bolt/include/bolt/Passes/CacheMetrics.h
+++ b/bolt/include/bolt/Passes/CacheMetrics.h
@@ -13,7 +13,6 @@
#ifndef BOLT_PASSES_CACHEMETRICS_H
#define BOLT_PASSES_CACHEMETRICS_H
-#include <cstdint>
#include <vector>
namespace llvm {
diff --git a/bolt/include/bolt/Passes/DominatorAnalysis.h b/bolt/include/bolt/Passes/DominatorAnalysis.h
index c2b5c3af0147..3f3afa943c06 100644
--- a/bolt/include/bolt/Passes/DominatorAnalysis.h
+++ b/bolt/include/bolt/Passes/DominatorAnalysis.h
@@ -11,7 +11,6 @@
#include "bolt/Passes/DataflowAnalysis.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Timer.h"
namespace opts {
extern llvm::cl::opt<bool> TimeOpts;
diff --git a/bolt/include/bolt/Passes/ReachingDefOrUse.h b/bolt/include/bolt/Passes/ReachingDefOrUse.h
index f38d1a373e18..585d673e3b84 100644
--- a/bolt/include/bolt/Passes/ReachingDefOrUse.h
+++ b/bolt/include/bolt/Passes/ReachingDefOrUse.h
@@ -11,9 +11,7 @@
#include "bolt/Passes/DataflowAnalysis.h"
#include "bolt/Passes/RegAnalysis.h"
-#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Timer.h"
#include <optional>
namespace opts {
diff --git a/bolt/include/bolt/Passes/ReachingInsns.h b/bolt/include/bolt/Passes/ReachingInsns.h
index 65782b12064b..ef878f5e452d 100644
--- a/bolt/include/bolt/Passes/ReachingInsns.h
+++ b/bolt/include/bolt/Passes/ReachingInsns.h
@@ -11,7 +11,6 @@
#include "bolt/Passes/DataflowAnalysis.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Timer.h"
namespace opts {
extern llvm::cl::opt<bool> TimeOpts;
diff --git a/bolt/include/bolt/Passes/ReorderUtils.h b/bolt/include/bolt/Passes/ReorderUtils.h
index bc82b4f436fa..8ceb8ba62690 100644
--- a/bolt/include/bolt/Passes/ReorderUtils.h
+++ b/bolt/include/bolt/Passes/ReorderUtils.h
@@ -14,7 +14,6 @@
#ifndef BOLT_PASSES_REORDER_UTILS_H
#define BOLT_PASSES_REORDER_UTILS_H
-#include <memory>
#include <vector>
#include "llvm/ADT/BitVector.h"
diff --git a/bolt/include/bolt/Profile/BoltAddressTranslation.h b/bolt/include/bolt/Profile/BoltAddressTranslation.h
index 5f2f0959d93f..caf907cc43da 100644
--- a/bolt/include/bolt/Profile/BoltAddressTranslation.h
+++ b/bolt/include/bolt/Profile/BoltAddressTranslation.h
@@ -115,12 +115,13 @@ public:
/// Save function and basic block hashes used for metadata dump.
void saveMetadata(BinaryContext &BC);
- /// Returns BB hash by function output address (after BOLT) and basic block
- /// input offset.
- size_t getBBHash(uint64_t FuncOutputAddress, uint32_t BBInputOffset) const;
+ /// True if a given \p Address is a function with translation table entry.
+ bool isBATFunction(uint64_t Address) const { return Maps.count(Address); }
- /// Returns BF hash by function output address (after BOLT).
- size_t getBFHash(uint64_t OutputAddress) const;
+ /// Returns branch offsets grouped by containing basic block in a given
+ /// function.
+ std::unordered_map<uint32_t, std::vector<uint32_t>>
+ getBFBranches(uint64_t FuncOutputAddress) const;
private:
/// Helper to update \p Map by inserting one or more BAT entries reflecting
@@ -128,7 +129,7 @@ private:
/// emitted for the start of the BB. More entries may be emitted to cover
/// the location of calls or any instruction that may change control flow.
void writeEntriesForBB(MapTy &Map, const BinaryBasicBlock &BB,
- uint64_t FuncAddress);
+ uint64_t FuncInputAddress, uint64_t FuncOutputAddress);
/// Write the serialized address translation table for a function.
template <bool Cold>
@@ -151,8 +152,11 @@ private:
std::map<uint64_t, MapTy> Maps;
- using BBHashMap = std::unordered_map<uint32_t, size_t>;
- std::unordered_map<uint64_t, std::pair<size_t, BBHashMap>> FuncHashes;
+ /// Map a function to its basic blocks count
+ std::unordered_map<uint64_t, size_t> NumBasicBlocksMap;
+
+ /// Map a function to its secondary entry points vector
+ std::unordered_map<uint64_t, std::vector<uint32_t>> SecondaryEntryPointsMap;
/// Links outlined cold bocks to their original function
std::map<uint64_t, uint64_t> ColdPartSource;
@@ -163,6 +167,111 @@ private:
/// Identifies the address of a control-flow changing instructions in a
/// translation map entry
const static uint32_t BRANCHENTRY = 0x1;
+
+public:
+ /// Map basic block input offset to a basic block index and hash pair.
+ class BBHashMapTy {
+ class EntryTy {
+ unsigned Index;
+ size_t Hash;
+
+ public:
+ unsigned getBBIndex() const { return Index; }
+ size_t getBBHash() const { return Hash; }
+ EntryTy(unsigned Index, size_t Hash) : Index(Index), Hash(Hash) {}
+ };
+
+ std::unordered_map<uint32_t, EntryTy> Map;
+ const EntryTy &getEntry(uint32_t BBInputOffset) const {
+ auto It = Map.find(BBInputOffset);
+ assert(It != Map.end());
+ return It->second;
+ }
+
+ public:
+ bool isInputBlock(uint32_t InputOffset) const {
+ return Map.count(InputOffset);
+ }
+
+ unsigned getBBIndex(uint32_t BBInputOffset) const {
+ return getEntry(BBInputOffset).getBBIndex();
+ }
+
+ size_t getBBHash(uint32_t BBInputOffset) const {
+ return getEntry(BBInputOffset).getBBHash();
+ }
+
+ void addEntry(uint32_t BBInputOffset, unsigned BBIndex, size_t BBHash) {
+ Map.emplace(BBInputOffset, EntryTy(BBIndex, BBHash));
+ }
+
+ size_t getNumBasicBlocks() const { return Map.size(); }
+ };
+
+ /// Map function output address to its hash and basic blocks hash map.
+ class FuncHashesTy {
+ class EntryTy {
+ size_t Hash;
+ BBHashMapTy BBHashMap;
+
+ public:
+ size_t getBFHash() const { return Hash; }
+ const BBHashMapTy &getBBHashMap() const { return BBHashMap; }
+ EntryTy(size_t Hash) : Hash(Hash) {}
+ };
+
+ std::unordered_map<uint64_t, EntryTy> Map;
+ const EntryTy &getEntry(uint64_t FuncOutputAddress) const {
+ auto It = Map.find(FuncOutputAddress);
+ assert(It != Map.end());
+ return It->second;
+ }
+
+ public:
+ size_t getBFHash(uint64_t FuncOutputAddress) const {
+ return getEntry(FuncOutputAddress).getBFHash();
+ }
+
+ const BBHashMapTy &getBBHashMap(uint64_t FuncOutputAddress) const {
+ return getEntry(FuncOutputAddress).getBBHashMap();
+ }
+
+ void addEntry(uint64_t FuncOutputAddress, size_t BFHash) {
+ Map.emplace(FuncOutputAddress, EntryTy(BFHash));
+ }
+
+ size_t getNumFunctions() const { return Map.size(); };
+
+ size_t getNumBasicBlocks() const {
+ size_t NumBasicBlocks{0};
+ for (auto &I : Map)
+ NumBasicBlocks += I.second.getBBHashMap().getNumBasicBlocks();
+ return NumBasicBlocks;
+ }
+ };
+
+ /// Returns BF hash by function output address (after BOLT).
+ size_t getBFHash(uint64_t FuncOutputAddress) const {
+ return FuncHashes.getBFHash(FuncOutputAddress);
+ }
+
+ /// Returns BBHashMap by function output address (after BOLT).
+ const BBHashMapTy &getBBHashMap(uint64_t FuncOutputAddress) const {
+ return FuncHashes.getBBHashMap(FuncOutputAddress);
+ }
+
+ BBHashMapTy &getBBHashMap(uint64_t FuncOutputAddress) {
+ return const_cast<BBHashMapTy &>(
+ std::as_const(*this).getBBHashMap(FuncOutputAddress));
+ }
+
+ /// Returns the number of basic blocks in a function.
+ size_t getNumBasicBlocks(uint64_t OutputAddress) const {
+ return NumBasicBlocksMap.at(OutputAddress);
+ }
+
+private:
+ FuncHashesTy FuncHashes;
};
} // namespace bolt
diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h
index 5bb4d00024c5..4fbe524b1c38 100644
--- a/bolt/include/bolt/Profile/DataAggregator.h
+++ b/bolt/include/bolt/Profile/DataAggregator.h
@@ -463,6 +463,13 @@ private:
/// Dump data structures into a file readable by llvm-bolt
std::error_code writeAggregatedFile(StringRef OutputFilename) const;
+ /// Dump translated data structures into YAML
+ std::error_code writeBATYAML(BinaryContext &BC,
+ StringRef OutputFilename) const;
+
+ /// Fixup profile collected on BOLTed binary, namely handle split functions.
+ void fixupBATProfile(BinaryContext &BC);
+
/// Filter out binaries based on PID
void filterBinaryMMapInfo();
diff --git a/bolt/include/bolt/Profile/ProfileReaderBase.h b/bolt/include/bolt/Profile/ProfileReaderBase.h
index 511718f3c0ec..3e5cf2612893 100644
--- a/bolt/include/bolt/Profile/ProfileReaderBase.h
+++ b/bolt/include/bolt/Profile/ProfileReaderBase.h
@@ -65,7 +65,7 @@ public:
/// Return true if the function \p BF may have a profile available.
/// The result is based on the name(s) of the function alone and the profile
/// match is not guaranteed.
- virtual bool mayHaveProfileData(const BinaryFunction &BF);
+ virtual bool mayHaveProfileData(const BinaryFunction &BF) { return true; }
/// Return true if the profile contains an entry for a local object
/// that has an associated file name.
diff --git a/bolt/include/bolt/Profile/ProfileYAMLMapping.h b/bolt/include/bolt/Profile/ProfileYAMLMapping.h
index 548b528ae2d6..9dd3920dbf09 100644
--- a/bolt/include/bolt/Profile/ProfileYAMLMapping.h
+++ b/bolt/include/bolt/Profile/ProfileYAMLMapping.h
@@ -14,7 +14,6 @@
#define BOLT_PROFILE_PROFILEYAMLMAPPING_H
#include "bolt/Core/BinaryFunction.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Support/YAMLTraits.h"
#include <vector>
diff --git a/bolt/include/bolt/Rewrite/DWARFRewriter.h b/bolt/include/bolt/Rewrite/DWARFRewriter.h
index 20972f3d0b85..2c482bd2b9ea 100644
--- a/bolt/include/bolt/Rewrite/DWARFRewriter.h
+++ b/bolt/include/bolt/Rewrite/DWARFRewriter.h
@@ -22,9 +22,7 @@
#include <memory>
#include <mutex>
#include <optional>
-#include <set>
#include <unordered_map>
-#include <unordered_set>
#include <vector>
namespace llvm {
diff --git a/bolt/include/bolt/Rewrite/MetadataManager.h b/bolt/include/bolt/Rewrite/MetadataManager.h
index efbc74b4daba..2ff70dbaab3d 100644
--- a/bolt/include/bolt/Rewrite/MetadataManager.h
+++ b/bolt/include/bolt/Rewrite/MetadataManager.h
@@ -11,7 +11,6 @@
#include "bolt/Rewrite/MetadataRewriter.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Error.h"
namespace llvm {
namespace bolt {
diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h
index 97ab65cd5a4a..826677cd63b2 100644
--- a/bolt/include/bolt/Rewrite/RewriteInstance.h
+++ b/bolt/include/bolt/Rewrite/RewriteInstance.h
@@ -17,7 +17,6 @@
#include "bolt/Core/Linker.h"
#include "bolt/Rewrite/MetadataManager.h"
#include "bolt/Utils/NameResolver.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ObjectFile.h"
diff --git a/bolt/include/bolt/RuntimeLibs/RuntimeLibrary.h b/bolt/include/bolt/RuntimeLibs/RuntimeLibrary.h
index c845cb7f7b21..e392029156bc 100644
--- a/bolt/include/bolt/RuntimeLibs/RuntimeLibrary.h
+++ b/bolt/include/bolt/RuntimeLibs/RuntimeLibrary.h
@@ -17,7 +17,6 @@
#include "bolt/Core/Linker.h"
#include "llvm/ADT/StringRef.h"
-#include <functional>
#include <vector>
namespace llvm {
diff --git a/bolt/include/bolt/Utils/NameShortener.h b/bolt/include/bolt/Utils/NameShortener.h
index 9c7b7ec9ba65..fd61235f93c8 100644
--- a/bolt/include/bolt/Utils/NameShortener.h
+++ b/bolt/include/bolt/Utils/NameShortener.h
@@ -14,7 +14,6 @@
#define BOLT_UTILS_NAME_SHORTENER_H
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Twine.h"
namespace llvm {
namespace bolt {
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index b29ebbbfa18c..47eae964e816 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -14,7 +14,6 @@
#include "bolt/Core/BinaryEmitter.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Utils/CommandLineOpts.h"
-#include "bolt/Utils/NameResolver.h"
#include "bolt/Utils/Utils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
@@ -39,7 +38,6 @@
#include <algorithm>
#include <functional>
#include <iterator>
-#include <numeric>
#include <unordered_set>
using namespace llvm;
@@ -162,28 +160,30 @@ BinaryContext::~BinaryContext() {
/// Create BinaryContext for a given architecture \p ArchName and
/// triple \p TripleName.
-Expected<std::unique_ptr<BinaryContext>>
-BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
- std::unique_ptr<DWARFContext> DwCtx,
- JournalingStreams Logger) {
+Expected<std::unique_ptr<BinaryContext>> BinaryContext::createBinaryContext(
+ Triple TheTriple, StringRef InputFileName, SubtargetFeatures *Features,
+ bool IsPIC, std::unique_ptr<DWARFContext> DwCtx, JournalingStreams Logger) {
StringRef ArchName = "";
std::string FeaturesStr = "";
- switch (File->getArch()) {
+ switch (TheTriple.getArch()) {
case llvm::Triple::x86_64:
+ if (Features)
+ return createFatalBOLTError(
+ "x86_64 target does not use SubtargetFeatures");
ArchName = "x86-64";
FeaturesStr = "+nopl";
break;
case llvm::Triple::aarch64:
+ if (Features)
+ return createFatalBOLTError(
+ "AArch64 target does not use SubtargetFeatures");
ArchName = "aarch64";
FeaturesStr = "+all";
break;
case llvm::Triple::riscv64: {
ArchName = "riscv64";
- Expected<SubtargetFeatures> Features = File->getFeatures();
-
- if (auto E = Features.takeError())
- return std::move(E);
-
+ if (!Features)
+ return createFatalBOLTError("RISCV target needs SubtargetFeatures");
// We rely on relaxation for some transformations (e.g., promoting all calls
// to PseudoCALL and then making JITLink relax them). Since the relax
// feature is not stored in the object file, we manually enable it.
@@ -196,12 +196,11 @@ BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
"BOLT-ERROR: Unrecognized machine in ELF file");
}
- auto TheTriple = std::make_unique<Triple>(File->makeTriple());
- const std::string TripleName = TheTriple->str();
+ const std::string TripleName = TheTriple.str();
std::string Error;
const Target *TheTarget =
- TargetRegistry::lookupTarget(std::string(ArchName), *TheTriple, Error);
+ TargetRegistry::lookupTarget(std::string(ArchName), TheTriple, Error);
if (!TheTarget)
return createStringError(make_error_code(std::errc::not_supported),
Twine("BOLT-ERROR: ", Error));
@@ -240,13 +239,13 @@ BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
Twine("BOLT-ERROR: no instruction info for target ", TripleName));
std::unique_ptr<MCContext> Ctx(
- new MCContext(*TheTriple, AsmInfo.get(), MRI.get(), STI.get()));
+ new MCContext(TheTriple, AsmInfo.get(), MRI.get(), STI.get()));
std::unique_ptr<MCObjectFileInfo> MOFI(
TheTarget->createMCObjectFileInfo(*Ctx, IsPIC));
Ctx->setObjectFileInfo(MOFI.get());
// We do not support X86 Large code model. Change this in the future.
bool Large = false;
- if (TheTriple->getArch() == llvm::Triple::aarch64)
+ if (TheTriple.getArch() == llvm::Triple::aarch64)
Large = true;
unsigned LSDAEncoding =
Large ? dwarf::DW_EH_PE_absptr : dwarf::DW_EH_PE_udata4;
@@ -273,7 +272,7 @@ BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
std::unique_ptr<MCInstPrinter> InstructionPrinter(
- TheTarget->createMCInstPrinter(*TheTriple, AsmPrinterVariant, *AsmInfo,
+ TheTarget->createMCInstPrinter(TheTriple, AsmPrinterVariant, *AsmInfo,
*MII, *MRI));
if (!InstructionPrinter)
return createStringError(
@@ -285,8 +284,8 @@ BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
TheTarget->createMCCodeEmitter(*MII, *Ctx));
auto BC = std::make_unique<BinaryContext>(
- std::move(Ctx), std::move(DwCtx), std::move(TheTriple), TheTarget,
- std::string(TripleName), std::move(MCE), std::move(MOFI),
+ std::move(Ctx), std::move(DwCtx), std::make_unique<Triple>(TheTriple),
+ TheTarget, std::string(TripleName), std::move(MCE), std::move(MOFI),
std::move(AsmInfo), std::move(MII), std::move(STI),
std::move(InstructionPrinter), std::move(MIA), nullptr, std::move(MRI),
std::move(DisAsm), Logger);
@@ -296,7 +295,7 @@ BinaryContext::createBinaryContext(const ObjectFile *File, bool IsPIC,
BC->MAB = std::unique_ptr<MCAsmBackend>(
BC->TheTarget->createMCAsmBackend(*BC->STI, *BC->MRI, MCTargetOptions()));
- BC->setFilename(File->getFileName());
+ BC->setFilename(InputFileName);
BC->HasFixedLoadAddress = !IsPIC;
@@ -1939,7 +1938,13 @@ void BinaryContext::printInstruction(raw_ostream &OS, const MCInst &Instruction,
OS << Endl;
return;
}
- InstPrinter->printInst(&Instruction, 0, "", *STI, OS);
+ if (std::optional<uint32_t> DynamicID =
+ MIB->getDynamicBranchID(Instruction)) {
+ OS << "\tjit\t" << MIB->getTargetSymbol(Instruction)->getName()
+ << " # ID: " << DynamicID;
+ } else {
+ InstPrinter->printInst(&Instruction, 0, "", *STI, OS);
+ }
if (MIB->isCall(Instruction)) {
if (MIB->isTailCall(Instruction))
OS << " # TAILCALL ";
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index ce4dd29f542b..1fa96dfaabde 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -12,7 +12,6 @@
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Core/BinaryBasicBlock.h"
-#include "bolt/Core/BinaryDomTree.h"
#include "bolt/Core/DynoStats.h"
#include "bolt/Core/HashUtilities.h"
#include "bolt/Core/MCPlusBuilder.h"
@@ -35,6 +34,8 @@
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
+#include "llvm/Support/GenericLoopInfoImpl.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Regex.h"
@@ -3350,6 +3351,16 @@ void BinaryFunction::fixBranches() {
// Eliminate unnecessary conditional branch.
if (TSuccessor == FSuccessor) {
+ // FIXME: at the moment, we cannot safely remove static key branches.
+ if (MIB->isDynamicBranch(*CondBranch)) {
+ if (opts::Verbosity) {
+ BC.outs()
+ << "BOLT-INFO: unable to remove redundant dynamic branch in "
+ << *this << '\n';
+ }
+ continue;
+ }
+
BB->removeDuplicateConditionalSuccessor(CondBranch);
if (TSuccessor != NextBB)
BB->addBranchInstruction(TSuccessor);
@@ -3358,8 +3369,13 @@ void BinaryFunction::fixBranches() {
// Reverse branch condition and swap successors.
auto swapSuccessors = [&]() {
- if (MIB->isUnsupportedBranch(*CondBranch))
+ if (MIB->isUnsupportedBranch(*CondBranch)) {
+ if (opts::Verbosity) {
+ BC.outs() << "BOLT-INFO: unable to swap successors in " << *this
+ << '\n';
+ }
return false;
+ }
std::swap(TSuccessor, FSuccessor);
BB->swapConditionalSuccessors();
auto L = BC.scopeLock();
@@ -3532,7 +3548,7 @@ MCSymbol *BinaryFunction::getSymbolForEntryID(uint64_t EntryID) {
if (!isMultiEntry())
return nullptr;
- uint64_t NumEntries = 0;
+ uint64_t NumEntries = 1;
if (hasCFG()) {
for (BinaryBasicBlock *BB : BasicBlocks) {
MCSymbol *EntrySymbol = getSecondaryEntryPointSymbol(*BB);
@@ -3565,7 +3581,7 @@ uint64_t BinaryFunction::getEntryIDForSymbol(const MCSymbol *Symbol) const {
return 0;
// Check all secondary entries available as either basic blocks or lables.
- uint64_t NumEntries = 0;
+ uint64_t NumEntries = 1;
for (const BinaryBasicBlock *BB : BasicBlocks) {
MCSymbol *EntrySymbol = getSecondaryEntryPointSymbol(*BB);
if (!EntrySymbol)
@@ -3574,7 +3590,7 @@ uint64_t BinaryFunction::getEntryIDForSymbol(const MCSymbol *Symbol) const {
return NumEntries;
++NumEntries;
}
- NumEntries = 0;
+ NumEntries = 1;
for (const std::pair<const uint32_t, MCSymbol *> &KV : Labels) {
MCSymbol *EntrySymbol = getSecondaryEntryPointSymbol(KV.second);
if (!EntrySymbol)
@@ -4061,12 +4077,17 @@ BinaryFunction::~BinaryFunction() {
delete BB;
}
+void BinaryFunction::constructDomTree() {
+ BDT.reset(new BinaryDominatorTree);
+ BDT->recalculate(*this);
+}
+
void BinaryFunction::calculateLoopInfo() {
+ if (!hasDomTree())
+ constructDomTree();
// Discover loops.
- BinaryDominatorTree DomTree;
- DomTree.recalculate(*this);
BLI.reset(new BinaryLoopInfo());
- BLI->analyze(DomTree);
+ BLI->analyze(getDomTree());
// Traverse discovered loops and add depth and profile information.
std::stack<BinaryLoop *> St;
diff --git a/bolt/lib/Core/DIEBuilder.cpp b/bolt/lib/Core/DIEBuilder.cpp
index 0cf8a5e8c2c3..c4b0b251c120 100644
--- a/bolt/lib/Core/DIEBuilder.cpp
+++ b/bolt/lib/Core/DIEBuilder.cpp
@@ -22,7 +22,6 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
#include "llvm/Support/LEB128.h"
#include <algorithm>
@@ -545,6 +544,10 @@ void DIEBuilder::cloneDieReferenceAttribute(
NewRefDie = DieInfo.Die;
if (AttrSpec.Form == dwarf::DW_FORM_ref_addr) {
+ // Adding referenced DIE to DebugNames to be used when entries are created
+ // that contain cross cu references.
+ if (DebugNamesTable.canGenerateEntryWithCrossCUReference(U, Die, AttrSpec))
+ DebugNamesTable.addCrossCUDie(DieInfo.Die);
// no matter forward reference or backward reference, we are supposed
// to calculate them in `finish` due to the possible modification of
// the DIE.
@@ -554,7 +557,7 @@ void DIEBuilder::cloneDieReferenceAttribute(
std::make_pair(CurDieInfo, AddrReferenceInfo(&DieInfo, AttrSpec)));
Die.addValue(getState().DIEAlloc, AttrSpec.Attr, dwarf::DW_FORM_ref_addr,
- DIEInteger(0xDEADBEEF));
+ DIEInteger(DieInfo.Die->getOffset()));
return;
}
diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp
index a75016ede309..a987a103a08b 100644
--- a/bolt/lib/Core/DebugData.cpp
+++ b/bolt/lib/Core/DebugData.cpp
@@ -13,7 +13,6 @@
#include "bolt/Core/DebugData.h"
#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/DIEBuilder.h"
-#include "bolt/Rewrite/RewriteInstance.h"
#include "bolt/Utils/Utils.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/DIE.h"
@@ -23,7 +22,6 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/Support/Allocator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/LEB128.h"
@@ -32,7 +30,6 @@
#include <cassert>
#include <cstdint>
#include <functional>
-#include <limits>
#include <memory>
#include <unordered_map>
#include <vector>
diff --git a/bolt/lib/Core/DebugNames.cpp b/bolt/lib/Core/DebugNames.cpp
index 23a29f52513c..049244c4b515 100644
--- a/bolt/lib/Core/DebugNames.cpp
+++ b/bolt/lib/Core/DebugNames.cpp
@@ -146,6 +146,55 @@ static bool shouldIncludeVariable(const DWARFUnit &Unit, const DIE &Die) {
return false;
}
+bool static canProcess(const DWARFUnit &Unit, const DIE &Die,
+ std::string &NameToUse, const bool TagsOnly) {
+ switch (Die.getTag()) {
+ case dwarf::DW_TAG_base_type:
+ case dwarf::DW_TAG_class_type:
+ case dwarf::DW_TAG_enumeration_type:
+ case dwarf::DW_TAG_imported_declaration:
+ case dwarf::DW_TAG_pointer_type:
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_typedef:
+ case dwarf::DW_TAG_unspecified_type:
+ if (TagsOnly || Die.findAttribute(dwarf::Attribute::DW_AT_name))
+ return true;
+ return false;
+ case dwarf::DW_TAG_namespace:
+ // According to DWARF5 spec namespaces without DW_AT_name needs to have
+ // "(anonymous namespace)"
+ if (!Die.findAttribute(dwarf::Attribute::DW_AT_name))
+ NameToUse = "(anonymous namespace)";
+ return true;
+ case dwarf::DW_TAG_inlined_subroutine:
+ case dwarf::DW_TAG_label:
+ case dwarf::DW_TAG_subprogram:
+ if (TagsOnly || Die.findAttribute(dwarf::Attribute::DW_AT_low_pc) ||
+ Die.findAttribute(dwarf::Attribute::DW_AT_high_pc) ||
+ Die.findAttribute(dwarf::Attribute::DW_AT_ranges) ||
+ Die.findAttribute(dwarf::Attribute::DW_AT_entry_pc))
+ return true;
+ return false;
+ case dwarf::DW_TAG_variable:
+ return TagsOnly || shouldIncludeVariable(Unit, Die);
+ default:
+ break;
+ }
+ return false;
+}
+
+bool DWARF5AcceleratorTable::canGenerateEntryWithCrossCUReference(
+ const DWARFUnit &Unit, const DIE &Die,
+ const DWARFAbbreviationDeclaration::AttributeSpec &AttrSpec) {
+ if (!isCreated())
+ return false;
+ std::string NameToUse = "";
+ if (!canProcess(Unit, Die, NameToUse, true))
+ return false;
+ return (AttrSpec.Attr == dwarf::Attribute::DW_AT_abstract_origin ||
+ AttrSpec.Attr == dwarf::Attribute::DW_AT_specification) &&
+ AttrSpec.Form == dwarf::DW_FORM_ref_addr;
+}
/// Returns name offset in String Offset section.
static uint64_t getNameOffset(BinaryContext &BC, DWARFUnit &Unit,
const uint64_t Index) {
@@ -175,41 +224,6 @@ DWARF5AcceleratorTable::addAccelTableEntry(
if (Unit.getVersion() < 5 || !NeedToCreate)
return std::nullopt;
std::string NameToUse = "";
- auto canProcess = [&](const DIE &Die) -> bool {
- switch (Die.getTag()) {
- case dwarf::DW_TAG_base_type:
- case dwarf::DW_TAG_class_type:
- case dwarf::DW_TAG_enumeration_type:
- case dwarf::DW_TAG_imported_declaration:
- case dwarf::DW_TAG_pointer_type:
- case dwarf::DW_TAG_structure_type:
- case dwarf::DW_TAG_typedef:
- case dwarf::DW_TAG_unspecified_type:
- if (Die.findAttribute(dwarf::Attribute::DW_AT_name))
- return true;
- return false;
- case dwarf::DW_TAG_namespace:
- // According to DWARF5 spec namespaces without DW_AT_name needs to have
- // "(anonymous namespace)"
- if (!Die.findAttribute(dwarf::Attribute::DW_AT_name))
- NameToUse = "(anonymous namespace)";
- return true;
- case dwarf::DW_TAG_inlined_subroutine:
- case dwarf::DW_TAG_label:
- case dwarf::DW_TAG_subprogram:
- if (Die.findAttribute(dwarf::Attribute::DW_AT_low_pc) ||
- Die.findAttribute(dwarf::Attribute::DW_AT_high_pc) ||
- Die.findAttribute(dwarf::Attribute::DW_AT_ranges) ||
- Die.findAttribute(dwarf::Attribute::DW_AT_entry_pc))
- return true;
- return false;
- case dwarf::DW_TAG_variable:
- return shouldIncludeVariable(Unit, Die);
- default:
- break;
- }
- return false;
- };
auto getUnitID = [&](const DWARFUnit &Unit, bool &IsTU,
uint32_t &DieTag) -> uint32_t {
@@ -223,7 +237,7 @@ DWARF5AcceleratorTable::addAccelTableEntry(
return CUList.size() - 1;
};
- if (!canProcess(Die))
+ if (!canProcess(Unit, Die, NameToUse, false))
return std::nullopt;
// Addes a Unit to either CU, LocalTU or ForeignTU list the first time we
@@ -318,10 +332,24 @@ DWARF5AcceleratorTable::addAccelTableEntry(
const DIEValue Value = Die.findAttribute(Attr);
if (!Value)
return std::nullopt;
- const DIEEntry &DIEENtry = Value.getDIEEntry();
- DIE &EntryDie = DIEENtry.getEntry();
- addEntry(EntryDie.findAttribute(dwarf::Attribute::DW_AT_linkage_name));
- return addEntry(EntryDie.findAttribute(dwarf::Attribute::DW_AT_name));
+ const DIE *EntryDie = nullptr;
+ if (Value.getForm() == dwarf::DW_FORM_ref_addr) {
+ auto Iter = CrossCUDies.find(Value.getDIEInteger().getValue());
+ if (Iter == CrossCUDies.end()) {
+ BC.errs() << "BOLT-WARNING: [internal-dwarf-warning]: Could not find "
+ "referenced DIE in CrossCUDies for "
+ << Twine::utohexstr(Value.getDIEInteger().getValue())
+ << ".\n";
+ return std::nullopt;
+ }
+ EntryDie = Iter->second;
+ } else {
+ const DIEEntry &DIEENtry = Value.getDIEEntry();
+ EntryDie = &DIEENtry.getEntry();
+ }
+
+ addEntry(EntryDie->findAttribute(dwarf::Attribute::DW_AT_linkage_name));
+ return addEntry(EntryDie->findAttribute(dwarf::Attribute::DW_AT_name));
};
if (std::optional<BOLTDWARF5AccelTableData *> Entry =
@@ -332,7 +360,6 @@ DWARF5AcceleratorTable::addAccelTableEntry(
return *Entry;
return addEntry(Die.findAttribute(dwarf::Attribute::DW_AT_name));
- ;
}
/// Algorithm from llvm implementation.
diff --git a/bolt/lib/Core/FunctionLayout.cpp b/bolt/lib/Core/FunctionLayout.cpp
index 27e40de94be6..73f4d5247d9a 100644
--- a/bolt/lib/Core/FunctionLayout.cpp
+++ b/bolt/lib/Core/FunctionLayout.cpp
@@ -1,12 +1,17 @@
+//===- bolt/Core/FunctionLayout.cpp - Fragmented Function Layout -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Core/FunctionLayout.h"
-#include "bolt/Core/BinaryFunction.h"
+#include "bolt/Core/BinaryBasicBlock.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/edit_distance.h"
#include <algorithm>
-#include <cstddef>
-#include <functional>
#include <iterator>
-#include <memory>
using namespace llvm;
using namespace bolt;
diff --git a/bolt/lib/Core/HashUtilities.cpp b/bolt/lib/Core/HashUtilities.cpp
index d40159b2e216..c4c67bd68198 100644
--- a/bolt/lib/Core/HashUtilities.cpp
+++ b/bolt/lib/Core/HashUtilities.cpp
@@ -12,7 +12,6 @@
#include "bolt/Core/HashUtilities.h"
#include "bolt/Core/BinaryContext.h"
-#include "bolt/Core/BinaryFunction.h"
#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
diff --git a/bolt/lib/Core/MCPlusBuilder.cpp b/bolt/lib/Core/MCPlusBuilder.cpp
index bd9bd0c45922..7ff7a2288451 100644
--- a/bolt/lib/Core/MCPlusBuilder.cpp
+++ b/bolt/lib/Core/MCPlusBuilder.cpp
@@ -12,15 +12,16 @@
#include "bolt/Core/MCPlusBuilder.h"
#include "bolt/Core/MCPlus.h"
+#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include <cstdint>
-#include <queue>
#define DEBUG_TYPE "mcplus"
@@ -28,6 +29,13 @@ using namespace llvm;
using namespace bolt;
using namespace MCPlus;
+namespace opts {
+cl::opt<bool>
+ TerminalTrap("terminal-trap",
+ cl::desc("Assume that execution stops at trap instruction"),
+ cl::init(true), cl::Hidden, cl::cat(BoltCategory));
+}
+
bool MCPlusBuilder::equals(const MCInst &A, const MCInst &B,
CompFuncTy Comp) const {
if (A.getOpcode() != B.getOpcode())
@@ -121,6 +129,11 @@ bool MCPlusBuilder::equals(const MCTargetExpr &A, const MCTargetExpr &B,
llvm_unreachable("target-specific expressions are unsupported");
}
+bool MCPlusBuilder::isTerminator(const MCInst &Inst) const {
+ return Analysis->isTerminator(Inst) ||
+ (opts::TerminalTrap && Info->get(Inst.getOpcode()).isTrap());
+}
+
void MCPlusBuilder::setTailCall(MCInst &Inst) const {
assert(!hasAnnotation(Inst, MCAnnotation::kTailCall));
setAnnotationOpValue(Inst, MCAnnotation::kTailCall, true);
@@ -303,6 +316,28 @@ void MCPlusBuilder::setSize(MCInst &Inst, uint32_t Size) const {
setAnnotationOpValue(Inst, MCAnnotation::kSize, Size);
}
+bool MCPlusBuilder::isDynamicBranch(const MCInst &Inst) const {
+ if (!hasAnnotation(Inst, MCAnnotation::kDynamicBranch))
+ return false;
+ assert(isBranch(Inst) && "Branch expected.");
+ return true;
+}
+
+std::optional<uint32_t>
+MCPlusBuilder::getDynamicBranchID(const MCInst &Inst) const {
+ if (std::optional<int64_t> Value =
+ getAnnotationOpValue(Inst, MCAnnotation::kDynamicBranch)) {
+ assert(isBranch(Inst) && "Branch expected.");
+ return static_cast<uint32_t>(*Value);
+ }
+ return std::nullopt;
+}
+
+void MCPlusBuilder::setDynamicBranch(MCInst &Inst, uint32_t ID) const {
+ assert(isBranch(Inst) && "Branch expected.");
+ setAnnotationOpValue(Inst, MCAnnotation::kDynamicBranch, ID);
+}
+
bool MCPlusBuilder::hasAnnotation(const MCInst &Inst, unsigned Index) const {
return (bool)getAnnotationOpValue(Inst, Index);
}
diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp
index bf1c2ddd37dd..c0ba73108f57 100644
--- a/bolt/lib/Passes/BinaryPasses.cpp
+++ b/bolt/lib/Passes/BinaryPasses.cpp
@@ -107,6 +107,12 @@ static cl::opt<unsigned>
cl::desc("print statistics about basic block ordering"),
cl::init(0), cl::cat(BoltOptCategory));
+static cl::opt<bool> PrintLargeFunctions(
+ "print-large-functions",
+ cl::desc("print functions that could not be overwritten due to excessive "
+ "size"),
+ cl::init(false), cl::cat(BoltOptCategory));
+
static cl::list<bolt::DynoStats::Category>
PrintSortedBy("print-sorted-by", cl::CommaSeparated,
cl::desc("print functions sorted by order of dyno stats"),
@@ -570,8 +576,12 @@ Error CheckLargeFunctions::runOnFunctions(BinaryContext &BC) {
uint64_t HotSize, ColdSize;
std::tie(HotSize, ColdSize) =
BC.calculateEmittedSize(BF, /*FixBranches=*/false);
- if (HotSize > BF.getMaxSize())
+ if (HotSize > BF.getMaxSize()) {
+ if (opts::PrintLargeFunctions)
+ BC.outs() << "BOLT-INFO: " << BF << " size exceeds allocated space by "
+ << (HotSize - BF.getMaxSize()) << " bytes\n";
BF.setSimple(false);
+ }
};
ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) {
@@ -852,6 +862,10 @@ uint64_t SimplifyConditionalTailCalls::fixTailCalls(BinaryFunction &BF) {
assert(Result && "internal error analyzing conditional branch");
assert(CondBranch && "conditional branch expected");
+ // Skip dynamic branches for now.
+ if (BF.getBinaryContext().MIB->isDynamicBranch(*CondBranch))
+ continue;
+
// It's possible that PredBB is also a successor to BB that may have
// been processed by a previous iteration of the SCTC loop, in which
// case it may have been marked invalid. We should skip rewriting in
@@ -1012,6 +1026,10 @@ uint64_t ShortenInstructions::shortenInstructions(BinaryFunction &Function) {
const BinaryContext &BC = Function.getBinaryContext();
for (BinaryBasicBlock &BB : Function) {
for (MCInst &Inst : BB) {
+ // Skip shortening instructions with Size annotation.
+ if (BC.MIB->getSize(Inst))
+ continue;
+
MCInst OriginalInst;
if (opts::Verbosity > 2)
OriginalInst = Inst;
diff --git a/bolt/lib/Passes/CMOVConversion.cpp b/bolt/lib/Passes/CMOVConversion.cpp
index 2492ff217946..cdd99b55207e 100644
--- a/bolt/lib/Passes/CMOVConversion.cpp
+++ b/bolt/lib/Passes/CMOVConversion.cpp
@@ -17,7 +17,6 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
-#include <numeric>
#define DEBUG_TYPE "cmov"
diff --git a/bolt/lib/Passes/FixRISCVCallsPass.cpp b/bolt/lib/Passes/FixRISCVCallsPass.cpp
index 83c745facb29..9011ef303a80 100644
--- a/bolt/lib/Passes/FixRISCVCallsPass.cpp
+++ b/bolt/lib/Passes/FixRISCVCallsPass.cpp
@@ -1,3 +1,11 @@
+//===- bolt/Passes/FixRISCVCallsPass.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Passes/FixRISCVCallsPass.h"
#include "bolt/Core/ParallelUtilities.h"
diff --git a/bolt/lib/Passes/FixRelaxationPass.cpp b/bolt/lib/Passes/FixRelaxationPass.cpp
index a49fb9894e80..7c970e464a94 100644
--- a/bolt/lib/Passes/FixRelaxationPass.cpp
+++ b/bolt/lib/Passes/FixRelaxationPass.cpp
@@ -1,3 +1,11 @@
+//===- bolt/Passes/FixRelaxationPass.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Passes/FixRelaxationPass.h"
#include "bolt/Core/ParallelUtilities.h"
diff --git a/bolt/lib/Passes/FrameOptimizer.cpp b/bolt/lib/Passes/FrameOptimizer.cpp
index fb5f8eafa5cf..8461225e1819 100644
--- a/bolt/lib/Passes/FrameOptimizer.cpp
+++ b/bolt/lib/Passes/FrameOptimizer.cpp
@@ -20,7 +20,6 @@
#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/Support/Timer.h"
#include <deque>
-#include <unordered_map>
#define DEBUG_TYPE "fop"
diff --git a/bolt/lib/Passes/Hugify.cpp b/bolt/lib/Passes/Hugify.cpp
index b77356153bfd..1ac1b08573b8 100644
--- a/bolt/lib/Passes/Hugify.cpp
+++ b/bolt/lib/Passes/Hugify.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "bolt/Passes/Hugify.h"
-#include "llvm/Support/CommandLine.h"
#define DEBUG_TYPE "bolt-hugify"
diff --git a/bolt/lib/Passes/Inliner.cpp b/bolt/lib/Passes/Inliner.cpp
index a3b2017aa32a..84e7d97067b0 100644
--- a/bolt/lib/Passes/Inliner.cpp
+++ b/bolt/lib/Passes/Inliner.cpp
@@ -27,7 +27,6 @@
#include "bolt/Passes/Inliner.h"
#include "bolt/Core/MCPlus.h"
#include "llvm/Support/CommandLine.h"
-#include <map>
#define DEBUG_TYPE "bolt-inliner"
diff --git a/bolt/lib/Passes/ShrinkWrapping.cpp b/bolt/lib/Passes/ShrinkWrapping.cpp
index c9706500758d..176321c58dc9 100644
--- a/bolt/lib/Passes/ShrinkWrapping.cpp
+++ b/bolt/lib/Passes/ShrinkWrapping.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "bolt/Passes/ShrinkWrapping.h"
-#include "bolt/Core/MCPlus.h"
#include "bolt/Passes/DataflowInfoManager.h"
#include "bolt/Passes/MCF.h"
#include "bolt/Utils/CommandLineOpts.h"
diff --git a/bolt/lib/Passes/SplitFunctions.cpp b/bolt/lib/Passes/SplitFunctions.cpp
index cdbb2a15f667..f9e634d15a97 100644
--- a/bolt/lib/Passes/SplitFunctions.cpp
+++ b/bolt/lib/Passes/SplitFunctions.cpp
@@ -17,7 +17,6 @@
#include "bolt/Core/ParallelUtilities.h"
#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/CommandLine.h"
diff --git a/bolt/lib/Passes/TailDuplication.cpp b/bolt/lib/Passes/TailDuplication.cpp
index 2163e3a6a008..463ea49527fa 100644
--- a/bolt/lib/Passes/TailDuplication.cpp
+++ b/bolt/lib/Passes/TailDuplication.cpp
@@ -13,9 +13,9 @@
#include "bolt/Passes/TailDuplication.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include <queue>
#include <numeric>
+#include <queue>
#define DEBUG_TYPE "taildup"
diff --git a/bolt/lib/Passes/ValidateInternalCalls.cpp b/bolt/lib/Passes/ValidateInternalCalls.cpp
index 54ae621159cf..88df2e5b59f3 100644
--- a/bolt/lib/Passes/ValidateInternalCalls.cpp
+++ b/bolt/lib/Passes/ValidateInternalCalls.cpp
@@ -14,7 +14,6 @@
#include "bolt/Core/BinaryBasicBlock.h"
#include "bolt/Passes/DataflowInfoManager.h"
#include "bolt/Passes/FrameAnalysis.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCInstPrinter.h"
#include <optional>
#include <queue>
diff --git a/bolt/lib/Profile/BoltAddressTranslation.cpp b/bolt/lib/Profile/BoltAddressTranslation.cpp
index 1d61a1b735b4..bcd4a457ce3b 100644
--- a/bolt/lib/Profile/BoltAddressTranslation.cpp
+++ b/bolt/lib/Profile/BoltAddressTranslation.cpp
@@ -22,12 +22,10 @@ const char *BoltAddressTranslation::SECTION_NAME = ".note.bolt_bat";
void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
const BinaryBasicBlock &BB,
- uint64_t FuncAddress) {
- uint64_t HotFuncAddress = ColdPartSource.count(FuncAddress)
- ? ColdPartSource[FuncAddress]
- : FuncAddress;
+ uint64_t FuncInputAddress,
+ uint64_t FuncOutputAddress) {
const uint64_t BBOutputOffset =
- BB.getOutputAddressRange().first - FuncAddress;
+ BB.getOutputAddressRange().first - FuncOutputAddress;
const uint32_t BBInputOffset = BB.getInputOffset();
// Every output BB must track back to an input BB for profile collection
@@ -42,9 +40,14 @@ void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
LLVM_DEBUG(dbgs() << "BB " << BB.getName() << "\n");
LLVM_DEBUG(dbgs() << " Key: " << Twine::utohexstr(BBOutputOffset)
<< " Val: " << Twine::utohexstr(BBInputOffset) << "\n");
- LLVM_DEBUG(dbgs() << formatv(" Hash: {0:x}\n",
- getBBHash(HotFuncAddress, BBInputOffset)));
- (void)HotFuncAddress;
+ // NB: in `writeEntriesForBB` we use the input address because hashes are
+ // saved early in `saveMetadata` before output addresses are assigned.
+ const BBHashMapTy &BBHashMap = getBBHashMap(FuncInputAddress);
+ (void)BBHashMap;
+ LLVM_DEBUG(
+ dbgs() << formatv(" Hash: {0:x}\n", BBHashMap.getBBHash(BBInputOffset)));
+ LLVM_DEBUG(
+ dbgs() << formatv(" Index: {0}\n", BBHashMap.getBBIndex(BBInputOffset)));
// In case of conflicts (same Key mapping to different Vals), the last
// update takes precedence. Of course it is not ideal to have conflicts and
// those happen when we have an empty BB that either contained only
@@ -61,7 +64,7 @@ void BoltAddressTranslation::writeEntriesForBB(MapTy &Map,
const auto InputAddress = BB.getFunction()->getAddress() + InputOffset;
const auto OutputAddress = IOAddressMap.lookup(InputAddress);
assert(OutputAddress && "Unknown instruction address");
- const auto OutputOffset = *OutputAddress - FuncAddress;
+ const auto OutputOffset = *OutputAddress - FuncOutputAddress;
// Is this the first instruction in the BB? No need to duplicate the entry.
if (OutputOffset == BBOutputOffset)
@@ -85,19 +88,26 @@ void BoltAddressTranslation::write(const BinaryContext &BC, raw_ostream &OS) {
if (Function.isIgnored() || (!BC.HasRelocations && !Function.isSimple()))
continue;
- // TBD: handle BAT functions w/multiple entry points.
- if (Function.isMultiEntry())
- continue;
+ uint32_t NumSecondaryEntryPoints = 0;
+ Function.forEachEntryPoint([&](uint64_t Offset, const MCSymbol *) {
+ if (!Offset)
+ return true;
+ ++NumSecondaryEntryPoints;
+ SecondaryEntryPointsMap[OutputAddress].push_back(Offset);
+ return true;
+ });
LLVM_DEBUG(dbgs() << "Function name: " << Function.getPrintName() << "\n");
LLVM_DEBUG(dbgs() << " Address reference: 0x"
<< Twine::utohexstr(Function.getOutputAddress()) << "\n");
LLVM_DEBUG(dbgs() << formatv(" Hash: {0:x}\n", getBFHash(OutputAddress)));
+ LLVM_DEBUG(dbgs() << " Secondary Entry Points: " << NumSecondaryEntryPoints
+ << '\n');
MapTy Map;
for (const BinaryBasicBlock *const BB :
Function.getLayout().getMainFragment())
- writeEntriesForBB(Map, *BB, Function.getOutputAddress());
+ writeEntriesForBB(Map, *BB, InputAddress, OutputAddress);
Maps.emplace(Function.getOutputAddress(), std::move(Map));
ReverseMap.emplace(OutputAddress, InputAddress);
@@ -111,7 +121,7 @@ void BoltAddressTranslation::write(const BinaryContext &BC, raw_ostream &OS) {
ColdPartSource.emplace(FF.getAddress(), Function.getOutputAddress());
Map.clear();
for (const BinaryBasicBlock *const BB : FF)
- writeEntriesForBB(Map, *BB, FF.getAddress());
+ writeEntriesForBB(Map, *BB, InputAddress, FF.getAddress());
Maps.emplace(FF.getAddress(), std::move(Map));
}
@@ -123,11 +133,9 @@ void BoltAddressTranslation::write(const BinaryContext &BC, raw_ostream &OS) {
writeMaps</*Cold=*/true>(Maps, PrevAddress, OS);
BC.outs() << "BOLT-INFO: Wrote " << Maps.size() << " BAT maps\n";
- const uint64_t NumBBHashes = std::accumulate(
- FuncHashes.begin(), FuncHashes.end(), 0ull,
- [](size_t Acc, const auto &B) { return Acc + B.second.second.size(); });
- BC.outs() << "BOLT-INFO: Wrote " << FuncHashes.size() << " function and "
- << NumBBHashes << " basic block hashes\n";
+ BC.outs() << "BOLT-INFO: Wrote " << FuncHashes.getNumFunctions()
+ << " function and " << FuncHashes.getNumBasicBlocks()
+ << " basic block hashes\n";
}
APInt BoltAddressTranslation::calculateBranchEntriesBitMask(MapTy &Map,
@@ -174,17 +182,20 @@ void BoltAddressTranslation::writeMaps(std::map<uint64_t, MapTy> &Maps,
// Only process cold fragments in cold mode, and vice versa.
if (Cold != ColdPartSource.count(Address))
continue;
- // NB: here we use the input address because hashes are saved early (in
- // `saveMetadata`) before output addresses are assigned.
+ // NB: in `writeMaps` we use the input address because hashes are saved
+ // early in `saveMetadata` before output addresses are assigned.
const uint64_t HotInputAddress =
ReverseMap[Cold ? ColdPartSource[Address] : Address];
- std::pair<size_t, BBHashMap> &FuncHashPair = FuncHashes[HotInputAddress];
MapTy &Map = MapEntry.second;
const uint32_t NumEntries = Map.size();
LLVM_DEBUG(dbgs() << "Writing " << NumEntries << " entries for 0x"
<< Twine::utohexstr(Address) << ".\n");
encodeULEB128(Address - PrevAddress, OS);
PrevAddress = Address;
+ const uint32_t NumSecondaryEntryPoints =
+ SecondaryEntryPointsMap.count(Address)
+ ? SecondaryEntryPointsMap[Address].size()
+ : 0;
if (Cold) {
size_t HotIndex =
std::distance(ColdPartSource.begin(), ColdPartSource.find(Address));
@@ -192,8 +203,17 @@ void BoltAddressTranslation::writeMaps(std::map<uint64_t, MapTy> &Maps,
PrevIndex = HotIndex;
} else {
// Function hash
- LLVM_DEBUG(dbgs() << "Hash: " << formatv("{0:x}\n", FuncHashPair.first));
- OS.write(reinterpret_cast<char *>(&FuncHashPair.first), 8);
+ size_t BFHash = getBFHash(HotInputAddress);
+ LLVM_DEBUG(dbgs() << "Hash: " << formatv("{0:x}\n", BFHash));
+ OS.write(reinterpret_cast<char *>(&BFHash), 8);
+ // Number of basic blocks
+ size_t NumBasicBlocks = getBBHashMap(HotInputAddress).getNumBasicBlocks();
+ LLVM_DEBUG(dbgs() << "Basic blocks: " << NumBasicBlocks << '\n');
+ encodeULEB128(NumBasicBlocks, OS);
+ // Secondary entry points
+ encodeULEB128(NumSecondaryEntryPoints, OS);
+ LLVM_DEBUG(dbgs() << "Secondary Entry Points: " << NumSecondaryEntryPoints
+ << '\n');
}
encodeULEB128(NumEntries, OS);
// For hot fragments only: encode the number of equal offsets
@@ -215,8 +235,10 @@ void BoltAddressTranslation::writeMaps(std::map<uint64_t, MapTy> &Maps,
});
}
}
+ const BBHashMapTy &BBHashMap = getBBHashMap(HotInputAddress);
size_t Index = 0;
uint64_t InOffset = 0;
+ size_t PrevBBIndex = 0;
// Output and Input addresses and delta-encoded
for (std::pair<const uint32_t, uint32_t> &KeyVal : Map) {
const uint64_t OutputAddress = KeyVal.first + Address;
@@ -226,12 +248,27 @@ void BoltAddressTranslation::writeMaps(std::map<uint64_t, MapTy> &Maps,
encodeSLEB128(KeyVal.second - InOffset, OS);
InOffset = KeyVal.second; // Keeping InOffset as if BRANCHENTRY is encoded
if ((InOffset & BRANCHENTRY) == 0) {
- // Basic block hash
- size_t BBHash = FuncHashPair.second[InOffset >> 1];
+ const bool IsBlock = BBHashMap.isInputBlock(InOffset >> 1);
+ unsigned BBIndex = IsBlock ? BBHashMap.getBBIndex(InOffset >> 1) : 0;
+ size_t BBHash = IsBlock ? BBHashMap.getBBHash(InOffset >> 1) : 0;
OS.write(reinterpret_cast<char *>(&BBHash), 8);
- LLVM_DEBUG(dbgs() << formatv("{0:x} -> {1:x} {2:x}\n", KeyVal.first,
- InOffset >> 1, BBHash));
+ // Basic block index in the input binary
+ encodeULEB128(BBIndex - PrevBBIndex, OS);
+ PrevBBIndex = BBIndex;
+ LLVM_DEBUG(dbgs() << formatv("{0:x} -> {1:x} {2:x} {3}\n", KeyVal.first,
+ InOffset >> 1, BBHash, BBIndex));
+ }
+ }
+ uint32_t PrevOffset = 0;
+ if (!Cold && NumSecondaryEntryPoints) {
+ LLVM_DEBUG(dbgs() << "Secondary entry points: ");
+ // Secondary entry point offsets, delta-encoded
+ for (uint32_t Offset : SecondaryEntryPointsMap[Address]) {
+ encodeULEB128(Offset - PrevOffset, OS);
+ LLVM_DEBUG(dbgs() << formatv("{0:x} ", Offset));
+ PrevOffset = Offset;
}
+ LLVM_DEBUG(dbgs() << '\n');
}
}
}
@@ -276,6 +313,7 @@ void BoltAddressTranslation::parseMaps(std::vector<uint64_t> &HotFuncs,
const uint64_t Address = PrevAddress + DE.getULEB128(&Offset, &Err);
uint64_t HotAddress = Cold ? 0 : Address;
PrevAddress = Address;
+ uint32_t SecondaryEntryPoints = 0;
if (Cold) {
HotIndex += DE.getULEB128(&Offset, &Err);
HotAddress = HotFuncs[HotIndex];
@@ -284,8 +322,20 @@ void BoltAddressTranslation::parseMaps(std::vector<uint64_t> &HotFuncs,
HotFuncs.push_back(Address);
// Function hash
const size_t FuncHash = DE.getU64(&Offset, &Err);
- FuncHashes[Address].first = FuncHash;
+ FuncHashes.addEntry(Address, FuncHash);
LLVM_DEBUG(dbgs() << formatv("{0:x}: hash {1:x}\n", Address, FuncHash));
+ // Number of basic blocks
+ const size_t NumBasicBlocks = DE.getULEB128(&Offset, &Err);
+ NumBasicBlocksMap.emplace(Address, NumBasicBlocks);
+ LLVM_DEBUG(dbgs() << formatv("{0:x}: #bbs {1}, {2} bytes\n", Address,
+ NumBasicBlocks,
+ getULEB128Size(NumBasicBlocks)));
+ // Secondary entry points
+ SecondaryEntryPoints = DE.getULEB128(&Offset, &Err);
+ LLVM_DEBUG(
+ dbgs() << formatv("{0:x}: secondary entry points {1}, {2} bytes\n",
+ Address, SecondaryEntryPoints,
+ getULEB128Size(SecondaryEntryPoints)));
}
const uint32_t NumEntries = DE.getULEB128(&Offset, &Err);
// Equal offsets, hot fragments only.
@@ -316,6 +366,7 @@ void BoltAddressTranslation::parseMaps(std::vector<uint64_t> &HotFuncs,
LLVM_DEBUG(dbgs() << "Parsing " << NumEntries << " entries for 0x"
<< Twine::utohexstr(Address) << "\n");
uint64_t InputOffset = 0;
+ size_t BBIndex = 0;
for (uint32_t J = 0; J < NumEntries; ++J) {
const uint64_t OutputDelta = DE.getULEB128(&Offset, &Err);
const uint64_t OutputAddress = PrevAddress + OutputDelta;
@@ -330,23 +381,41 @@ void BoltAddressTranslation::parseMaps(std::vector<uint64_t> &HotFuncs,
}
Map.insert(std::pair<uint32_t, uint32_t>(OutputOffset, InputOffset));
size_t BBHash = 0;
+ size_t BBIndexDelta = 0;
const bool IsBranchEntry = InputOffset & BRANCHENTRY;
if (!IsBranchEntry) {
BBHash = DE.getU64(&Offset, &Err);
+ BBIndexDelta = DE.getULEB128(&Offset, &Err);
+ BBIndex += BBIndexDelta;
// Map basic block hash to hot fragment by input offset
- FuncHashes[HotAddress].second.emplace(InputOffset >> 1, BBHash);
+ getBBHashMap(HotAddress).addEntry(InputOffset >> 1, BBIndex, BBHash);
}
LLVM_DEBUG({
dbgs() << formatv(
"{0:x} -> {1:x} ({2}/{3}b -> {4}/{5}b), {6:x}", OutputOffset,
InputOffset, OutputDelta, getULEB128Size(OutputDelta), InputDelta,
(J < EqualElems) ? 0 : getSLEB128Size(InputDelta), OutputAddress);
- if (BBHash)
- dbgs() << formatv(" {0:x}", BBHash);
+ if (!IsBranchEntry) {
+ dbgs() << formatv(" {0:x} {1}/{2}b", BBHash, BBIndex,
+ getULEB128Size(BBIndexDelta));
+ }
dbgs() << '\n';
});
}
Maps.insert(std::pair<uint64_t, MapTy>(Address, Map));
+ if (!Cold && SecondaryEntryPoints) {
+ uint32_t EntryPointOffset = 0;
+ LLVM_DEBUG(dbgs() << "Secondary entry points: ");
+ for (uint32_t EntryPointId = 0; EntryPointId != SecondaryEntryPoints;
+ ++EntryPointId) {
+ uint32_t OffsetDelta = DE.getULEB128(&Offset, &Err);
+ EntryPointOffset += OffsetDelta;
+ SecondaryEntryPointsMap[Address].push_back(EntryPointOffset);
+ LLVM_DEBUG(dbgs() << formatv("{0:x}/{1}b ", EntryPointOffset,
+ getULEB128Size(OffsetDelta)));
+ }
+ LLVM_DEBUG(dbgs() << '\n');
+ }
}
}
@@ -361,6 +430,8 @@ void BoltAddressTranslation::dump(raw_ostream &OS) {
OS << formatv(", hash: {0:x}", getBFHash(Address));
OS << "\n";
OS << "BB mappings:\n";
+ const BBHashMapTy &BBHashMap =
+ getBBHashMap(HotAddress ? HotAddress : Address);
for (const auto &Entry : MapEntry.second) {
const bool IsBranch = Entry.second & BRANCHENTRY;
const uint32_t Val = Entry.second >> 1; // dropping BRANCHENTRY bit
@@ -369,10 +440,16 @@ void BoltAddressTranslation::dump(raw_ostream &OS) {
if (IsBranch)
OS << " (branch)";
else
- OS << formatv(" hash: {0:x}",
- getBBHash(HotAddress ? HotAddress : Address, Val));
+ OS << formatv(" hash: {0:x}", BBHashMap.getBBHash(Val));
OS << "\n";
}
+ if (SecondaryEntryPointsMap.count(Address)) {
+ const std::vector<uint32_t> &SecondaryEntryPoints =
+ SecondaryEntryPointsMap[Address];
+ OS << SecondaryEntryPoints.size() << " secondary entry points:\n";
+ for (uint32_t EntryPointOffset : SecondaryEntryPoints)
+ OS << formatv("{0:x}\n", EntryPointOffset);
+ }
OS << "\n";
}
const size_t NumColdParts = ColdPartSource.size();
@@ -491,21 +568,33 @@ void BoltAddressTranslation::saveMetadata(BinaryContext &BC) {
if (BF.isIgnored() || (!BC.HasRelocations && !BF.isSimple()))
continue;
// Prepare function and block hashes
- FuncHashes[BF.getAddress()].first = BF.computeHash();
+ FuncHashes.addEntry(BF.getAddress(), BF.computeHash());
BF.computeBlockHashes();
+ BBHashMapTy &BBHashMap = getBBHashMap(BF.getAddress());
+ // Set BF/BB metadata
for (const BinaryBasicBlock &BB : BF)
- FuncHashes[BF.getAddress()].second.emplace(BB.getInputOffset(),
- BB.getHash());
+ BBHashMap.addEntry(BB.getInputOffset(), BB.getIndex(), BB.getHash());
}
}
-size_t BoltAddressTranslation::getBBHash(uint64_t FuncOutputAddress,
- uint32_t BBInputOffset) const {
- return FuncHashes.at(FuncOutputAddress).second.at(BBInputOffset);
-}
-
-size_t BoltAddressTranslation::getBFHash(uint64_t OutputAddress) const {
- return FuncHashes.at(OutputAddress).first;
+std::unordered_map<uint32_t, std::vector<uint32_t>>
+BoltAddressTranslation::getBFBranches(uint64_t OutputAddress) const {
+ std::unordered_map<uint32_t, std::vector<uint32_t>> Branches;
+ auto FuncIt = Maps.find(OutputAddress);
+ assert(FuncIt != Maps.end());
+ std::vector<uint32_t> InputOffsets;
+ for (const auto &KV : FuncIt->second)
+ InputOffsets.emplace_back(KV.second);
+ // Sort with LSB BRANCHENTRY bit.
+ llvm::sort(InputOffsets);
+ uint32_t BBOffset{0};
+ for (uint32_t InOffset : InputOffsets) {
+ if (InOffset & BRANCHENTRY)
+ Branches[BBOffset].push_back(InOffset >> 1);
+ else
+ BBOffset = InOffset >> 1;
+ }
+ return Branches;
}
} // namespace bolt
diff --git a/bolt/lib/Profile/CMakeLists.txt b/bolt/lib/Profile/CMakeLists.txt
index 3a31a9cc1919..045ac47edb95 100644
--- a/bolt/lib/Profile/CMakeLists.txt
+++ b/bolt/lib/Profile/CMakeLists.txt
@@ -3,7 +3,6 @@ add_llvm_library(LLVMBOLTProfile
DataAggregator.cpp
DataReader.cpp
Heatmap.cpp
- ProfileReaderBase.cpp
StaleProfileMatching.cpp
YAMLProfileReader.cpp
YAMLProfileWriter.cpp
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index 6a64bcde911e..05099aa25ce2 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -16,6 +16,7 @@
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Profile/BoltAddressTranslation.h"
#include "bolt/Profile/Heatmap.h"
+#include "bolt/Profile/YAMLProfileWriter.h"
#include "bolt/Utils/CommandLineOpts.h"
#include "bolt/Utils/Utils.h"
#include "llvm/ADT/STLExtras.h"
@@ -85,6 +86,7 @@ MaxSamples("max-samples",
cl::cat(AggregatorCategory));
extern cl::opt<opts::ProfileFormatKind> ProfileFormat;
+extern cl::opt<std::string> SaveProfile;
cl::opt<bool> ReadPreAggregated(
"pa", cl::desc("skip perf and read data from a pre-aggregated file format"),
@@ -594,10 +596,23 @@ Error DataAggregator::readProfile(BinaryContext &BC) {
convertBranchData(Function);
}
- if (opts::AggregateOnly &&
- opts::ProfileFormat == opts::ProfileFormatKind::PF_Fdata) {
- if (std::error_code EC = writeAggregatedFile(opts::OutputFilename))
- report_error("cannot create output data file", EC);
+ if (opts::AggregateOnly) {
+ if (opts::ProfileFormat == opts::ProfileFormatKind::PF_Fdata)
+ if (std::error_code EC = writeAggregatedFile(opts::OutputFilename))
+ report_error("cannot create output data file", EC);
+
+ // BAT YAML is handled by DataAggregator since normal YAML output requires
+ // CFG which is not available in BAT mode.
+ if (usesBAT()) {
+ // Postprocess split function profile for BAT
+ fixupBATProfile(BC);
+ if (opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML)
+ if (std::error_code EC = writeBATYAML(BC, opts::OutputFilename))
+ report_error("cannot create output data file", EC);
+ if (!opts::SaveProfile.empty())
+ if (std::error_code EC = writeBATYAML(BC, opts::SaveProfile))
+ report_error("cannot create output data file", EC);
+ }
}
return Error::success();
@@ -2258,6 +2273,181 @@ DataAggregator::writeAggregatedFile(StringRef OutputFilename) const {
return std::error_code();
}
+void DataAggregator::fixupBATProfile(BinaryContext &BC) {
+ for (auto &[FuncName, Branches] : NamesToBranches) {
+ BinaryData *BD = BC.getBinaryDataByName(FuncName);
+ assert(BD);
+ uint64_t FuncAddress = BD->getAddress();
+ if (!BAT->isBATFunction(FuncAddress))
+ continue;
+ // Filter out cold fragments
+ if (!BD->getSectionName().equals(BC.getMainCodeSectionName()))
+ continue;
+ // Convert inter-branches between hot and cold fragments into
+ // intra-branches.
+ for (auto &[OffsetFrom, CallToMap] : Branches.InterIndex) {
+ for (auto &[CallToLoc, CallToIdx] : CallToMap) {
+ if (CallToLoc.Name != FuncName)
+ continue;
+ Branches.IntraIndex[OffsetFrom][CallToLoc.Offset] = CallToIdx;
+ Branches.InterIndex[OffsetFrom].erase(CallToLoc);
+ }
+ }
+ }
+}
+
+std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
+ StringRef OutputFilename) const {
+ std::error_code EC;
+ raw_fd_ostream OutFile(OutputFilename, EC, sys::fs::OpenFlags::OF_None);
+ if (EC)
+ return EC;
+
+ yaml::bolt::BinaryProfile BP;
+
+ // Fill out the header info.
+ BP.Header.Version = 1;
+ BP.Header.FileName = std::string(BC.getFilename());
+ std::optional<StringRef> BuildID = BC.getFileBuildID();
+ BP.Header.Id = BuildID ? std::string(*BuildID) : "<unknown>";
+ BP.Header.Origin = std::string(getReaderName());
+ // Only the input binary layout order is supported.
+ BP.Header.IsDFSOrder = false;
+ // FIXME: Need to match hash function used to produce BAT hashes.
+ BP.Header.HashFunction = HashFunction::Default;
+
+ ListSeparator LS(",");
+ raw_string_ostream EventNamesOS(BP.Header.EventNames);
+ for (const StringMapEntry<std::nullopt_t> &EventEntry : EventNames)
+ EventNamesOS << LS << EventEntry.first().str();
+
+ BP.Header.Flags = opts::BasicAggregation ? BinaryFunction::PF_SAMPLE
+ : BinaryFunction::PF_LBR;
+
+ if (!opts::BasicAggregation) {
+ // Convert profile for functions not covered by BAT
+ for (auto &BFI : BC.getBinaryFunctions()) {
+ BinaryFunction &Function = BFI.second;
+ if (!Function.hasProfile())
+ continue;
+ if (BAT->isBATFunction(Function.getAddress()))
+ continue;
+ BP.Functions.emplace_back(
+ YAMLProfileWriter::convert(Function, /*UseDFS=*/false));
+ }
+
+ for (const auto &KV : NamesToBranches) {
+ const StringRef FuncName = KV.first;
+ const FuncBranchData &Branches = KV.second;
+ yaml::bolt::BinaryFunctionProfile YamlBF;
+ BinaryData *BD = BC.getBinaryDataByName(FuncName);
+ assert(BD);
+ uint64_t FuncAddress = BD->getAddress();
+ if (!BAT->isBATFunction(FuncAddress))
+ continue;
+ // Filter out cold fragments
+ if (!BD->getSectionName().equals(BC.getMainCodeSectionName()))
+ continue;
+ BinaryFunction *BF = BC.getBinaryFunctionAtAddress(FuncAddress);
+ assert(BF);
+ YamlBF.Name = FuncName.str();
+ YamlBF.Id = BF->getFunctionNumber();
+ YamlBF.Hash = BAT->getBFHash(FuncAddress);
+ YamlBF.ExecCount = BF->getKnownExecutionCount();
+ YamlBF.NumBasicBlocks = BAT->getNumBasicBlocks(FuncAddress);
+ const BoltAddressTranslation::BBHashMapTy &BlockMap =
+ BAT->getBBHashMap(FuncAddress);
+
+ auto addSuccProfile = [&](yaml::bolt::BinaryBasicBlockProfile &YamlBB,
+ uint64_t SuccOffset, unsigned SuccDataIdx) {
+ const llvm::bolt::BranchInfo &BI = Branches.Data.at(SuccDataIdx);
+ yaml::bolt::SuccessorInfo SI;
+ SI.Index = BlockMap.getBBIndex(SuccOffset);
+ SI.Count = BI.Branches;
+ SI.Mispreds = BI.Mispreds;
+ YamlBB.Successors.emplace_back(SI);
+ };
+
+ std::unordered_map<uint32_t, std::vector<uint32_t>> BFBranches =
+ BAT->getBFBranches(FuncAddress);
+
+ auto addCallsProfile = [&](yaml::bolt::BinaryBasicBlockProfile &YamlBB,
+ uint64_t Offset) {
+ // Iterate over BRANCHENTRY records in the current block
+ for (uint32_t BranchOffset : BFBranches[Offset]) {
+ if (!Branches.InterIndex.contains(BranchOffset))
+ continue;
+ for (const auto &[CallToLoc, CallToIdx] :
+ Branches.InterIndex.at(BranchOffset)) {
+ const llvm::bolt::BranchInfo &BI = Branches.Data.at(CallToIdx);
+ yaml::bolt::CallSiteInfo YamlCSI;
+ YamlCSI.DestId = 0; // designated for unknown functions
+ YamlCSI.EntryDiscriminator = 0;
+ YamlCSI.Count = BI.Branches;
+ YamlCSI.Mispreds = BI.Mispreds;
+ YamlCSI.Offset = BranchOffset - Offset;
+ BinaryData *CallTargetBD = BC.getBinaryDataByName(CallToLoc.Name);
+ if (!CallTargetBD) {
+ YamlBB.CallSites.emplace_back(YamlCSI);
+ continue;
+ }
+ uint64_t CallTargetAddress = CallTargetBD->getAddress();
+ BinaryFunction *CallTargetBF =
+ BC.getBinaryFunctionAtAddress(CallTargetAddress);
+ if (!CallTargetBF) {
+ YamlBB.CallSites.emplace_back(YamlCSI);
+ continue;
+ }
+ // Calls between hot and cold fragments must be handled in
+ // fixupBATProfile.
+ assert(CallTargetBF != BF && "invalid CallTargetBF");
+ YamlCSI.DestId = CallTargetBF->getFunctionNumber();
+ if (CallToLoc.Offset) {
+ if (BAT->isBATFunction(CallTargetAddress)) {
+ LLVM_DEBUG(dbgs() << "BOLT-DEBUG: Unsupported secondary "
+ "entry point in BAT function "
+ << CallToLoc.Name << '\n');
+ } else if (const BinaryBasicBlock *CallTargetBB =
+ CallTargetBF->getBasicBlockAtOffset(
+ CallToLoc.Offset)) {
+ // Only record true call information, ignoring returns (normally
+ // won't have a target basic block) and jumps to the landing
+ // pads (not an entry point).
+ if (CallTargetBB->isEntryPoint()) {
+ YamlCSI.EntryDiscriminator =
+ CallTargetBF->getEntryIDForSymbol(
+ CallTargetBB->getLabel());
+ }
+ }
+ }
+ YamlBB.CallSites.emplace_back(YamlCSI);
+ }
+ }
+ };
+
+ for (const auto &[FromOffset, SuccKV] : Branches.IntraIndex) {
+ yaml::bolt::BinaryBasicBlockProfile YamlBB;
+ if (!BlockMap.isInputBlock(FromOffset))
+ continue;
+ YamlBB.Index = BlockMap.getBBIndex(FromOffset);
+ YamlBB.Hash = BlockMap.getBBHash(FromOffset);
+ for (const auto &[SuccOffset, SuccDataIdx] : SuccKV)
+ addSuccProfile(YamlBB, SuccOffset, SuccDataIdx);
+ addCallsProfile(YamlBB, FromOffset);
+ if (YamlBB.ExecCount || !YamlBB.Successors.empty() ||
+ !YamlBB.CallSites.empty())
+ YamlBF.Blocks.emplace_back(YamlBB);
+ }
+ BP.Functions.emplace_back(YamlBF);
+ }
+ }
+
+ // Write the profile.
+ yaml::Output Out(OutFile, nullptr, 0);
+ Out << BP;
+ return std::error_code();
+}
+
void DataAggregator::dump() const { DataReader::dump(); }
void DataAggregator::dump(const LBREntry &LBR) const {
diff --git a/bolt/lib/Profile/DataReader.cpp b/bolt/lib/Profile/DataReader.cpp
index aa21eb121ad6..67f357fe4d3f 100644
--- a/bolt/lib/Profile/DataReader.cpp
+++ b/bolt/lib/Profile/DataReader.cpp
@@ -18,7 +18,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errc.h"
-#include <map>
#undef DEBUG_TYPE
#define DEBUG_TYPE "bolt-prof"
diff --git a/bolt/lib/Profile/Heatmap.cpp b/bolt/lib/Profile/Heatmap.cpp
index 13541f6f6a4b..210a5cc98c10 100644
--- a/bolt/lib/Profile/Heatmap.cpp
+++ b/bolt/lib/Profile/Heatmap.cpp
@@ -10,7 +10,6 @@
#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
diff --git a/bolt/lib/Profile/YAMLProfileWriter.cpp b/bolt/lib/Profile/YAMLProfileWriter.cpp
index 6fcc4a956fa1..0f082086c1fc 100644
--- a/bolt/lib/Profile/YAMLProfileWriter.cpp
+++ b/bolt/lib/Profile/YAMLProfileWriter.cpp
@@ -25,6 +25,25 @@ extern llvm::cl::opt<bool> ProfileUseDFS;
namespace llvm {
namespace bolt {
+/// Set CallSiteInfo destination fields from \p Symbol and return a target
+/// BinaryFunction for that symbol.
+static const BinaryFunction *setCSIDestination(const BinaryContext &BC,
+ yaml::bolt::CallSiteInfo &CSI,
+ const MCSymbol *Symbol) {
+ CSI.DestId = 0; // designated for unknown functions
+ CSI.EntryDiscriminator = 0;
+ if (Symbol) {
+ uint64_t EntryID = 0;
+ if (const BinaryFunction *const Callee =
+ BC.getFunctionForSymbol(Symbol, &EntryID)) {
+ CSI.DestId = Callee->getFunctionNumber();
+ CSI.EntryDiscriminator = EntryID;
+ return Callee;
+ }
+ }
+ return nullptr;
+}
+
yaml::bolt::BinaryFunctionProfile
YAMLProfileWriter::convert(const BinaryFunction &BF, bool UseDFS) {
yaml::bolt::BinaryFunctionProfile YamlBF;
@@ -79,31 +98,20 @@ YAMLProfileWriter::convert(const BinaryFunction &BF, bool UseDFS) {
continue;
for (const IndirectCallProfile &CSP : ICSP.get()) {
StringRef TargetName = "";
- CSI.DestId = 0; // designated for unknown functions
- CSI.EntryDiscriminator = 0;
- if (CSP.Symbol) {
- const BinaryFunction *Callee = BC.getFunctionForSymbol(CSP.Symbol);
- if (Callee) {
- CSI.DestId = Callee->getFunctionNumber();
- TargetName = Callee->getOneName();
- }
- }
+ const BinaryFunction *Callee = setCSIDestination(BC, CSI, CSP.Symbol);
+ if (Callee)
+ TargetName = Callee->getOneName();
CSI.Count = CSP.Count;
CSI.Mispreds = CSP.Mispreds;
CSTargets.emplace_back(TargetName, CSI);
}
} else { // direct call or a tail call
- uint64_t EntryID = 0;
- CSI.DestId = 0;
StringRef TargetName = "";
const MCSymbol *CalleeSymbol = BC.MIB->getTargetSymbol(Instr);
const BinaryFunction *const Callee =
- BC.getFunctionForSymbol(CalleeSymbol, &EntryID);
- if (Callee) {
- CSI.DestId = Callee->getFunctionNumber();
- CSI.EntryDiscriminator = EntryID;
+ setCSIDestination(BC, CSI, CalleeSymbol);
+ if (Callee)
TargetName = Callee->getOneName();
- }
auto getAnnotationWithDefault = [&](const MCInst &Inst, StringRef Ann) {
return BC.MIB->getAnnotationWithDefault(Instr, Ann, 0ull);
diff --git a/bolt/lib/Rewrite/BinaryPassManager.cpp b/bolt/lib/Rewrite/BinaryPassManager.cpp
index 489b33fe1c7c..6c26bb795726 100644
--- a/bolt/lib/Rewrite/BinaryPassManager.cpp
+++ b/bolt/lib/Rewrite/BinaryPassManager.cpp
@@ -72,7 +72,7 @@ static cl::opt<bool> JTFootprintReductionFlag(
"instructions at jump sites"),
cl::cat(BoltOptCategory));
-static cl::opt<bool>
+cl::opt<bool>
KeepNops("keep-nops",
cl::desc("keep no-op instructions. By default they are removed."),
cl::Hidden, cl::cat(BoltOptCategory));
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index 8dc7b90a6e30..feeba89a40dc 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -14,7 +14,6 @@
#include "bolt/Core/DynoStats.h"
#include "bolt/Core/ParallelUtilities.h"
#include "bolt/Rewrite/RewriteInstance.h"
-#include "bolt/Utils/Utils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -375,12 +374,11 @@ static cl::opt<bool> AlwaysConvertToRanges(
extern cl::opt<std::string> CompDirOverride;
} // namespace opts
-static bool getLowAndHighPC(const DIE &Die, const DWARFUnit &DU,
- uint64_t &LowPC, uint64_t &HighPC,
- uint64_t &SectionIndex) {
+/// If DW_AT_low_pc exists sets LowPC and returns true.
+static bool getLowPC(const DIE &Die, const DWARFUnit &DU, uint64_t &LowPC,
+ uint64_t &SectionIndex) {
DIEValue DvalLowPc = Die.findAttribute(dwarf::DW_AT_low_pc);
- DIEValue DvalHighPc = Die.findAttribute(dwarf::DW_AT_high_pc);
- if (!DvalLowPc || !DvalHighPc)
+ if (!DvalLowPc)
return false;
dwarf::Form Form = DvalLowPc.getForm();
@@ -403,14 +401,39 @@ static bool getLowAndHighPC(const DIE &Die, const DWARFUnit &DU,
LowPC = LowPcValue;
SectionIndex = 0;
}
+ return true;
+}
+
+/// If DW_AT_high_pc exists sets HighPC and returns true.
+static bool getHighPC(const DIE &Die, const uint64_t LowPC, uint64_t &HighPC) {
+ DIEValue DvalHighPc = Die.findAttribute(dwarf::DW_AT_high_pc);
+ if (!DvalHighPc)
+ return false;
if (DvalHighPc.getForm() == dwarf::DW_FORM_addr)
HighPC = DvalHighPc.getDIEInteger().getValue();
else
HighPC = LowPC + DvalHighPc.getDIEInteger().getValue();
-
return true;
}
+/// If DW_AT_low_pc and DW_AT_high_pc exist sets LowPC and HighPC and returns
+/// true.
+static bool getLowAndHighPC(const DIE &Die, const DWARFUnit &DU,
+ uint64_t &LowPC, uint64_t &HighPC,
+ uint64_t &SectionIndex) {
+ uint64_t TempLowPC = LowPC;
+ uint64_t TempHighPC = HighPC;
+ uint64_t TempSectionIndex = SectionIndex;
+ if (getLowPC(Die, DU, TempLowPC, TempSectionIndex) &&
+ getHighPC(Die, TempLowPC, TempHighPC)) {
+ LowPC = TempLowPC;
+ HighPC = TempHighPC;
+ SectionIndex = TempSectionIndex;
+ return true;
+ }
+ return false;
+}
+
static Expected<llvm::DWARFAddressRangesVector>
getDIEAddressRanges(const DIE &Die, DWARFUnit &DU) {
uint64_t LowPC, HighPC, Index;
@@ -1248,10 +1271,9 @@ void DWARFRewriter::updateUnitDebugInfo(
}
}
} else if (LowPCAttrInfo) {
- const std::optional<uint64_t> Result =
- LowPCAttrInfo.getDIEInteger().getValue();
- if (Result.has_value()) {
- const uint64_t Address = Result.value();
+ uint64_t Address = 0;
+ uint64_t SectionIndex = 0;
+ if (getLowPC(*Die, Unit, Address, SectionIndex)) {
uint64_t NewAddress = 0;
if (const BinaryFunction *Function =
BC.getBinaryFunctionContainingAddress(Address)) {
@@ -1662,7 +1684,7 @@ namespace {
std::unique_ptr<BinaryContext>
createDwarfOnlyBC(const object::ObjectFile &File) {
return cantFail(BinaryContext::createBinaryContext(
- &File, false,
+ File.makeTriple(), File.getFileName(), nullptr, false,
DWARFContext::create(File, DWARFContext::ProcessDebugRelocations::Ignore,
nullptr, "", WithColor::defaultErrorHandler,
WithColor::defaultWarningHandler),
diff --git a/bolt/lib/Rewrite/JITLinkLinker.cpp b/bolt/lib/Rewrite/JITLinkLinker.cpp
index 66e129bf1d05..be8f9dd03467 100644
--- a/bolt/lib/Rewrite/JITLinkLinker.cpp
+++ b/bolt/lib/Rewrite/JITLinkLinker.cpp
@@ -5,9 +5,11 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+
#include "bolt/Rewrite/JITLinkLinker.h"
+#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/BinaryData.h"
-#include "bolt/Rewrite/RewriteInstance.h"
+#include "bolt/Core/BinarySection.h"
#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
diff --git a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
index a2bfd45a64e3..d96199e020d3 100644
--- a/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
+++ b/bolt/lib/Rewrite/LinuxKernelRewriter.cpp
@@ -14,7 +14,9 @@
#include "bolt/Rewrite/MetadataRewriter.h"
#include "bolt/Rewrite/MetadataRewriters.h"
#include "bolt/Utils/CommandLineOpts.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -66,6 +68,16 @@ static cl::opt<bool> DumpStaticCalls("dump-static-calls",
cl::cat(BoltCategory));
static cl::opt<bool>
+ DumpStaticKeys("dump-static-keys",
+ cl::desc("dump Linux kernel static keys jump table"),
+ cl::init(false), cl::Hidden, cl::cat(BoltCategory));
+
+static cl::opt<bool> LongJumpLabels(
+ "long-jump-labels",
+ cl::desc("always use long jumps/nops for Linux kernel static keys"),
+ cl::init(false), cl::Hidden, cl::cat(BoltCategory));
+
+static cl::opt<bool>
PrintORC("print-orc",
cl::desc("print ORC unwind information for instructions"),
cl::init(true), cl::Hidden, cl::cat(BoltCategory));
@@ -151,6 +163,20 @@ class LinuxKernelRewriter final : public MetadataRewriter {
/// Number of entries in the input file ORC sections.
uint64_t NumORCEntries = 0;
+ /// Section containing static keys jump table.
+ ErrorOr<BinarySection &> StaticKeysJumpSection = std::errc::bad_address;
+ uint64_t StaticKeysJumpTableAddress = 0;
+ static constexpr size_t STATIC_KEYS_JUMP_ENTRY_SIZE = 8;
+
+ struct JumpInfoEntry {
+ bool Likely;
+ bool InitValue;
+ };
+ SmallVector<JumpInfoEntry, 16> JumpInfo;
+
+ /// Static key entries that need nop conversion.
+ DenseSet<uint32_t> NopIDs;
+
/// Section containing static call table.
ErrorOr<BinarySection &> StaticCallSection = std::errc::bad_address;
uint64_t StaticCallTableAddress = 0;
@@ -186,6 +212,11 @@ class LinuxKernelRewriter final : public MetadataRewriter {
/// Size of bug_entry struct.
static constexpr size_t BUG_TABLE_ENTRY_SIZE = 12;
+ /// List of bug entries per function.
+ using FunctionBugListType =
+ DenseMap<BinaryFunction *, SmallVector<uint32_t, 2>>;
+ FunctionBugListType FunctionBugList;
+
/// .pci_fixup section.
ErrorOr<BinarySection &> PCIFixupSection = std::errc::bad_address;
static constexpr size_t PCI_FIXUP_ENTRY_SIZE = 16;
@@ -226,15 +257,28 @@ class LinuxKernelRewriter final : public MetadataRewriter {
/// Paravirtual instruction patch sites.
Error readParaInstructions();
+ Error rewriteParaInstructions();
+ /// __bug_table section handling.
Error readBugTable();
+ Error rewriteBugTable();
+
+ /// Do no process functions containing instruction annotated with
+ /// \p Annotation.
+ void skipFunctionsWithAnnotation(StringRef Annotation) const;
- /// Read alternative instruction info from .altinstructions.
+ /// Handle alternative instruction info from .altinstructions.
Error readAltInstructions();
+ Error rewriteAltInstructions();
/// Read .pci_fixup
Error readPCIFixupTable();
+ /// Handle static keys jump table.
+ Error readStaticKeysJumpTable();
+ Error rewriteStaticKeysJumpTable();
+ Error updateStaticKeysJumpTablePostEmit();
+
/// Mark instructions referenced by kernel metadata.
Error markInstructions();
@@ -268,6 +312,9 @@ public:
if (Error E = readPCIFixupTable())
return E;
+ if (Error E = readStaticKeysJumpTable())
+ return E;
+
return Error::success();
}
@@ -284,18 +331,33 @@ public:
if (Error E = rewriteExceptionTable())
return E;
+ if (Error E = rewriteAltInstructions())
+ return E;
+
+ if (Error E = rewriteParaInstructions())
+ return E;
+
if (Error E = rewriteORCTables())
return E;
if (Error E = rewriteStaticCalls())
return E;
+ if (Error E = rewriteStaticKeysJumpTable())
+ return E;
+
+ if (Error E = rewriteBugTable())
+ return E;
+
return Error::success();
}
Error postEmitFinalizer() override {
updateLKMarkers();
+ if (Error E = updateStaticKeysJumpTablePostEmit())
+ return E;
+
return Error::success();
}
};
@@ -1086,16 +1148,43 @@ Error LinuxKernelRewriter::readParaInstructions() {
return Error::success();
}
+void LinuxKernelRewriter::skipFunctionsWithAnnotation(
+ StringRef Annotation) const {
+ for (BinaryFunction &BF : llvm::make_second_range(BC.getBinaryFunctions())) {
+ if (!BC.shouldEmit(BF))
+ continue;
+ for (const BinaryBasicBlock &BB : BF) {
+ const bool HasAnnotation = llvm::any_of(BB, [&](const MCInst &Inst) {
+ return BC.MIB->hasAnnotation(Inst, Annotation);
+ });
+ if (HasAnnotation) {
+ BF.setSimple(false);
+ break;
+ }
+ }
+ }
+}
+
+Error LinuxKernelRewriter::rewriteParaInstructions() {
+ // Disable output of functions with paravirtual instructions before the
+ // rewrite support is complete.
+ skipFunctionsWithAnnotation("ParaSite");
+
+ return Error::success();
+}
+
/// Process __bug_table section.
-/// This section contains information useful for kernel debugging.
+/// This section contains information useful for kernel debugging, mostly
+/// utilized by WARN()/WARN_ON() macros and deprecated BUG()/BUG_ON().
+///
/// Each entry in the section is a struct bug_entry that contains a pointer to
/// the ud2 instruction corresponding to the bug, corresponding file name (both
/// pointers use PC relative offset addressing), line number, and flags.
/// The definition of the struct bug_entry can be found in
-/// `include/asm-generic/bug.h`
-///
-/// NB: find_bug() uses linear search to match an address to an entry in the bug
-/// table. Hence there is no need to sort entries when rewriting the table.
+/// `include/asm-generic/bug.h`. The first entry in the struct is an instruction
+/// address encoded as a PC-relative offset. In theory, it could be an absolute
+/// address if CONFIG_GENERIC_BUG_RELATIVE_POINTERS is not set, but in practice
+/// the kernel code relies on it being a relative offset on x86-64.
Error LinuxKernelRewriter::readBugTable() {
BugTableSection = BC.getUniqueSectionByName("__bug_table");
if (!BugTableSection)
@@ -1138,6 +1227,8 @@ Error LinuxKernelRewriter::readBugTable() {
" referenced by bug table entry %d",
InstAddress, EntryID);
BC.MIB->addAnnotation(*Inst, "BugEntry", EntryID);
+
+ FunctionBugList[BF].push_back(EntryID);
}
}
@@ -1146,6 +1237,52 @@ Error LinuxKernelRewriter::readBugTable() {
return Error::success();
}
+/// find_bug() uses linear search to match an address to an entry in the bug
+/// table. Hence, there is no need to sort entries when rewriting the table.
+/// When we need to erase an entry, we set its instruction address to zero.
+Error LinuxKernelRewriter::rewriteBugTable() {
+ if (!BugTableSection)
+ return Error::success();
+
+ for (BinaryFunction &BF : llvm::make_second_range(BC.getBinaryFunctions())) {
+ if (!BC.shouldEmit(BF))
+ continue;
+
+ if (!FunctionBugList.count(&BF))
+ continue;
+
+ // Bugs that will be emitted for this function.
+ DenseSet<uint32_t> EmittedIDs;
+ for (BinaryBasicBlock &BB : BF) {
+ for (MCInst &Inst : BB) {
+ if (!BC.MIB->hasAnnotation(Inst, "BugEntry"))
+ continue;
+ const uint32_t ID = BC.MIB->getAnnotationAs<uint32_t>(Inst, "BugEntry");
+ EmittedIDs.insert(ID);
+
+ // Create a relocation entry for this bug entry.
+ MCSymbol *Label =
+ BC.MIB->getOrCreateInstLabel(Inst, "__BUG_", BC.Ctx.get());
+ const uint64_t EntryOffset = (ID - 1) * BUG_TABLE_ENTRY_SIZE;
+ BugTableSection->addRelocation(EntryOffset, Label, ELF::R_X86_64_PC32,
+ /*Addend*/ 0);
+ }
+ }
+
+ // Clear bug entries that were not emitted for this function, e.g. as a
+ // result of DCE, but setting their instruction address to zero.
+ for (const uint32_t ID : FunctionBugList[&BF]) {
+ if (!EmittedIDs.count(ID)) {
+ const uint64_t EntryOffset = (ID - 1) * BUG_TABLE_ENTRY_SIZE;
+ BugTableSection->addRelocation(EntryOffset, nullptr, ELF::R_X86_64_PC32,
+ /*Addend*/ 0);
+ }
+ }
+ }
+
+ return Error::success();
+}
+
/// The kernel can replace certain instruction sequences depending on hardware
/// it is running on and features specified during boot time. The information
/// about alternative instruction sequences is stored in .altinstructions
@@ -1265,6 +1402,14 @@ Error LinuxKernelRewriter::readAltInstructions() {
return Error::success();
}
+Error LinuxKernelRewriter::rewriteAltInstructions() {
+ // Disable output of functions with alt instructions before the rewrite
+ // support is complete.
+ skipFunctionsWithAnnotation("AltInst");
+
+ return Error::success();
+}
+
/// When the Linux kernel needs to handle an error associated with a given PCI
/// device, it uses a table stored in .pci_fixup section to locate a fixup code
/// specific to the vendor and the problematic device. The section contains a
@@ -1343,6 +1488,353 @@ Error LinuxKernelRewriter::readPCIFixupTable() {
return Error::success();
}
+/// Runtime code modification used by static keys is the most ubiquitous
+/// self-modifying feature of the Linux kernel. The idea is to eliminate the
+/// condition check and associated conditional jump on a hot path if that
+/// condition (based on a boolean value of a static key) does not change often.
+/// Whenever the condition changes, the kernel runtime modifies all code paths
+/// associated with that key flipping the code between nop and (unconditional)
+/// jump. The information about the code is stored in a static key jump table
+/// and contains the list of entries of the following type from
+/// include/linux/jump_label.h:
+//
+/// struct jump_entry {
+/// s32 code;
+/// s32 target;
+/// long key; // key may be far away from the core kernel under KASLR
+/// };
+///
+/// The list does not have to be stored in any sorted way, but it is sorted at
+/// boot time (or module initialization time) first by "key" and then by "code".
+/// jump_label_sort_entries() is responsible for sorting the table.
+///
+/// The key in jump_entry structure uses lower two bits of the key address
+/// (which itself is aligned) to store extra information. We are interested in
+/// the lower bit which indicates if the key is likely to be set on the code
+/// path associated with this jump_entry.
+///
+/// static_key_{enable,disable}() functions modify the code based on key and
+/// jump table entries.
+///
+/// jump_label_update() updates all code entries for a given key. Batch mode is
+/// used for x86.
+///
+/// The actual patching happens in text_poke_bp_batch() that overrides the first
+/// byte of the sequence with int3 before proceeding with actual code
+/// replacement.
+Error LinuxKernelRewriter::readStaticKeysJumpTable() {
+ const BinaryData *StaticKeysJumpTable =
+ BC.getBinaryDataByName("__start___jump_table");
+ if (!StaticKeysJumpTable)
+ return Error::success();
+
+ StaticKeysJumpTableAddress = StaticKeysJumpTable->getAddress();
+
+ const BinaryData *Stop = BC.getBinaryDataByName("__stop___jump_table");
+ if (!Stop)
+ return createStringError(errc::executable_format_error,
+ "missing __stop___jump_table symbol");
+
+ ErrorOr<BinarySection &> ErrorOrSection =
+ BC.getSectionForAddress(StaticKeysJumpTableAddress);
+ if (!ErrorOrSection)
+ return createStringError(errc::executable_format_error,
+ "no section matching __start___jump_table");
+
+ StaticKeysJumpSection = *ErrorOrSection;
+ if (!StaticKeysJumpSection->containsAddress(Stop->getAddress() - 1))
+ return createStringError(errc::executable_format_error,
+ "__stop___jump_table not in the same section "
+ "as __start___jump_table");
+
+ if ((Stop->getAddress() - StaticKeysJumpTableAddress) %
+ STATIC_KEYS_JUMP_ENTRY_SIZE)
+ return createStringError(errc::executable_format_error,
+ "static keys jump table size error");
+
+ const uint64_t SectionAddress = StaticKeysJumpSection->getAddress();
+ DataExtractor DE(StaticKeysJumpSection->getContents(),
+ BC.AsmInfo->isLittleEndian(),
+ BC.AsmInfo->getCodePointerSize());
+ DataExtractor::Cursor Cursor(StaticKeysJumpTableAddress - SectionAddress);
+ uint32_t EntryID = 0;
+ while (Cursor && Cursor.tell() < Stop->getAddress() - SectionAddress) {
+ const uint64_t JumpAddress =
+ SectionAddress + Cursor.tell() + (int32_t)DE.getU32(Cursor);
+ const uint64_t TargetAddress =
+ SectionAddress + Cursor.tell() + (int32_t)DE.getU32(Cursor);
+ const uint64_t KeyAddress =
+ SectionAddress + Cursor.tell() + (int64_t)DE.getU64(Cursor);
+
+ // Consume the status of the cursor.
+ if (!Cursor)
+ return createStringError(
+ errc::executable_format_error,
+ "out of bounds while reading static keys jump table: %s",
+ toString(Cursor.takeError()).c_str());
+
+ ++EntryID;
+
+ JumpInfo.push_back(JumpInfoEntry());
+ JumpInfoEntry &Info = JumpInfo.back();
+ Info.Likely = KeyAddress & 1;
+
+ if (opts::DumpStaticKeys) {
+ BC.outs() << "Static key jump entry: " << EntryID
+ << "\n\tJumpAddress: 0x" << Twine::utohexstr(JumpAddress)
+ << "\n\tTargetAddress: 0x" << Twine::utohexstr(TargetAddress)
+ << "\n\tKeyAddress: 0x" << Twine::utohexstr(KeyAddress)
+ << "\n\tIsLikely: " << Info.Likely << '\n';
+ }
+
+ BinaryFunction *BF = BC.getBinaryFunctionContainingAddress(JumpAddress);
+ if (!BF && opts::Verbosity) {
+ BC.outs()
+ << "BOLT-INFO: no function matches address 0x"
+ << Twine::utohexstr(JumpAddress)
+ << " of jump instruction referenced from static keys jump table\n";
+ }
+
+ if (!BF || !BC.shouldEmit(*BF))
+ continue;
+
+ MCInst *Inst = BF->getInstructionAtOffset(JumpAddress - BF->getAddress());
+ if (!Inst)
+ return createStringError(
+ errc::executable_format_error,
+ "no instruction at static keys jump site address 0x%" PRIx64,
+ JumpAddress);
+
+ if (!BF->containsAddress(TargetAddress))
+ return createStringError(
+ errc::executable_format_error,
+ "invalid target of static keys jump at 0x%" PRIx64 " : 0x%" PRIx64,
+ JumpAddress, TargetAddress);
+
+ const bool IsBranch = BC.MIB->isBranch(*Inst);
+ if (!IsBranch && !BC.MIB->isNoop(*Inst))
+ return createStringError(errc::executable_format_error,
+ "jump or nop expected at address 0x%" PRIx64,
+ JumpAddress);
+
+ const uint64_t Size = BC.computeInstructionSize(*Inst);
+ if (Size != 2 && Size != 5) {
+ return createStringError(
+ errc::executable_format_error,
+ "unexpected static keys jump size at address 0x%" PRIx64,
+ JumpAddress);
+ }
+
+ MCSymbol *Target = BF->registerBranch(JumpAddress, TargetAddress);
+ MCInst StaticKeyBranch;
+
+ // Create a conditional branch instruction. The actual conditional code type
+ // should not matter as long as it's a valid code. The instruction should be
+ // treated as a conditional branch for control-flow purposes. Before we emit
+ // the code, it will be converted to a different instruction in
+ // rewriteStaticKeysJumpTable().
+ //
+ // NB: for older kernels, under LongJumpLabels option, we create long
+ // conditional branch to guarantee that code size estimation takes
+ // into account the extra bytes needed for long branch that will be used
+ // by the kernel patching code. Newer kernels can work with both short
+ // and long branches. The code for long conditional branch is larger
+ // than unconditional one, so we are pessimistic in our estimations.
+ if (opts::LongJumpLabels)
+ BC.MIB->createLongCondBranch(StaticKeyBranch, Target, 0, BC.Ctx.get());
+ else
+ BC.MIB->createCondBranch(StaticKeyBranch, Target, 0, BC.Ctx.get());
+ BC.MIB->moveAnnotations(std::move(*Inst), StaticKeyBranch);
+ BC.MIB->setDynamicBranch(StaticKeyBranch, EntryID);
+ *Inst = StaticKeyBranch;
+
+ // IsBranch = InitialValue ^ LIKELY
+ //
+ // 0 0 0
+ // 1 0 1
+ // 1 1 0
+ // 0 1 1
+ //
+ // => InitialValue = IsBranch ^ LIKELY
+ Info.InitValue = IsBranch ^ Info.Likely;
+
+ // Add annotations to facilitate manual code analysis.
+ BC.MIB->addAnnotation(*Inst, "Likely", Info.Likely);
+ BC.MIB->addAnnotation(*Inst, "InitValue", Info.InitValue);
+ if (!BC.MIB->getSize(*Inst))
+ BC.MIB->setSize(*Inst, Size);
+
+ if (opts::LongJumpLabels)
+ BC.MIB->setSize(*Inst, 5);
+ }
+
+ BC.outs() << "BOLT-INFO: parsed " << EntryID << " static keys jump entries\n";
+
+ return Error::success();
+}
+
+// Pre-emit pass. Convert dynamic branch instructions into jumps that could be
+// relaxed. In post-emit pass we will convert those jumps into nops when
+// necessary. We do the unconditional conversion into jumps so that the jumps
+// can be relaxed and the optimal size of jump/nop instruction is selected.
+Error LinuxKernelRewriter::rewriteStaticKeysJumpTable() {
+ if (!StaticKeysJumpSection)
+ return Error::success();
+
+ uint64_t NumShort = 0;
+ uint64_t NumLong = 0;
+ for (BinaryFunction &BF : llvm::make_second_range(BC.getBinaryFunctions())) {
+ if (!BC.shouldEmit(BF))
+ continue;
+
+ for (BinaryBasicBlock &BB : BF) {
+ for (MCInst &Inst : BB) {
+ if (!BC.MIB->isDynamicBranch(Inst))
+ continue;
+
+ const uint32_t EntryID = *BC.MIB->getDynamicBranchID(Inst);
+ MCSymbol *Target =
+ const_cast<MCSymbol *>(BC.MIB->getTargetSymbol(Inst));
+ assert(Target && "Target symbol should be set.");
+
+ const JumpInfoEntry &Info = JumpInfo[EntryID - 1];
+ const bool IsBranch = Info.Likely ^ Info.InitValue;
+
+ uint32_t Size = *BC.MIB->getSize(Inst);
+ if (Size == 2)
+ ++NumShort;
+ else if (Size == 5)
+ ++NumLong;
+ else
+ llvm_unreachable("Wrong size for static keys jump instruction.");
+
+ MCInst NewInst;
+ // Replace the instruction with unconditional jump even if it needs to
+ // be nop in the binary.
+ if (opts::LongJumpLabels) {
+ BC.MIB->createLongUncondBranch(NewInst, Target, BC.Ctx.get());
+ } else {
+ // Newer kernels can handle short and long jumps for static keys.
+ // Optimistically, emit short jump and check if it gets relaxed into
+ // a long one during post-emit. Only then convert the jump to a nop.
+ BC.MIB->createUncondBranch(NewInst, Target, BC.Ctx.get());
+ }
+
+ BC.MIB->moveAnnotations(std::move(Inst), NewInst);
+ Inst = NewInst;
+
+ // Mark the instruction for nop conversion.
+ if (!IsBranch)
+ NopIDs.insert(EntryID);
+
+ MCSymbol *Label =
+ BC.MIB->getOrCreateInstLabel(Inst, "__SK_", BC.Ctx.get());
+
+ // Create a relocation against the label.
+ const uint64_t EntryOffset = StaticKeysJumpTableAddress -
+ StaticKeysJumpSection->getAddress() +
+ (EntryID - 1) * 16;
+ StaticKeysJumpSection->addRelocation(EntryOffset, Label,
+ ELF::R_X86_64_PC32,
+ /*Addend*/ 0);
+ StaticKeysJumpSection->addRelocation(EntryOffset + 4, Target,
+ ELF::R_X86_64_PC32, /*Addend*/ 0);
+ }
+ }
+ }
+
+ BC.outs() << "BOLT-INFO: the input contains " << NumShort << " short and "
+ << NumLong << " long static keys jumps in optimized functions\n";
+
+ return Error::success();
+}
+
+// Post-emit pass of static keys jump section. Convert jumps to nops.
+Error LinuxKernelRewriter::updateStaticKeysJumpTablePostEmit() {
+ if (!StaticKeysJumpSection || !StaticKeysJumpSection->isFinalized())
+ return Error::success();
+
+ const uint64_t SectionAddress = StaticKeysJumpSection->getAddress();
+ DataExtractor DE(StaticKeysJumpSection->getOutputContents(),
+ BC.AsmInfo->isLittleEndian(),
+ BC.AsmInfo->getCodePointerSize());
+ DataExtractor::Cursor Cursor(StaticKeysJumpTableAddress - SectionAddress);
+ const BinaryData *Stop = BC.getBinaryDataByName("__stop___jump_table");
+ uint32_t EntryID = 0;
+ uint64_t NumShort = 0;
+ uint64_t NumLong = 0;
+ while (Cursor && Cursor.tell() < Stop->getAddress() - SectionAddress) {
+ const uint64_t JumpAddress =
+ SectionAddress + Cursor.tell() + (int32_t)DE.getU32(Cursor);
+ const uint64_t TargetAddress =
+ SectionAddress + Cursor.tell() + (int32_t)DE.getU32(Cursor);
+ const uint64_t KeyAddress =
+ SectionAddress + Cursor.tell() + (int64_t)DE.getU64(Cursor);
+
+ // Consume the status of the cursor.
+ if (!Cursor)
+ return createStringError(errc::executable_format_error,
+ "out of bounds while updating static keys: %s",
+ toString(Cursor.takeError()).c_str());
+
+ ++EntryID;
+
+ LLVM_DEBUG({
+ dbgs() << "\n\tJumpAddress: 0x" << Twine::utohexstr(JumpAddress)
+ << "\n\tTargetAddress: 0x" << Twine::utohexstr(TargetAddress)
+ << "\n\tKeyAddress: 0x" << Twine::utohexstr(KeyAddress) << '\n';
+ });
+ (void)TargetAddress;
+ (void)KeyAddress;
+
+ BinaryFunction *BF =
+ BC.getBinaryFunctionContainingAddress(JumpAddress,
+ /*CheckPastEnd*/ false,
+ /*UseMaxSize*/ true);
+ assert(BF && "Cannot get function for modified static key.");
+
+ if (!BF->isEmitted())
+ continue;
+
+ // Disassemble instruction to collect stats even if nop-conversion is
+ // unnecessary.
+ MutableArrayRef<uint8_t> Contents = MutableArrayRef<uint8_t>(
+ reinterpret_cast<uint8_t *>(BF->getImageAddress()), BF->getImageSize());
+ assert(Contents.size() && "Non-empty function image expected.");
+
+ MCInst Inst;
+ uint64_t Size;
+ const uint64_t JumpOffset = JumpAddress - BF->getAddress();
+ if (!BC.DisAsm->getInstruction(Inst, Size, Contents.slice(JumpOffset), 0,
+ nulls())) {
+ llvm_unreachable("Unable to disassemble jump instruction.");
+ }
+ assert(BC.MIB->isBranch(Inst) && "Branch instruction expected.");
+
+ if (Size == 2)
+ ++NumShort;
+ else if (Size == 5)
+ ++NumLong;
+ else
+ llvm_unreachable("Unexpected size for static keys jump instruction.");
+
+ // Check if we need to convert jump instruction into a nop.
+ if (!NopIDs.contains(EntryID))
+ continue;
+
+ SmallString<15> NopCode;
+ raw_svector_ostream VecOS(NopCode);
+ BC.MAB->writeNopData(VecOS, Size, BC.STI.get());
+ for (uint64_t I = 0; I < Size; ++I)
+ Contents[JumpOffset + I] = NopCode[I];
+ }
+
+ BC.outs() << "BOLT-INFO: written " << NumShort << " short and " << NumLong
+ << " long static keys jumps in optimized functions\n";
+
+ return Error::success();
+}
+
} // namespace
std::unique_ptr<MetadataRewriter>
diff --git a/bolt/lib/Rewrite/MachORewriteInstance.cpp b/bolt/lib/Rewrite/MachORewriteInstance.cpp
index 0970a0507ebe..172cb640bf91 100644
--- a/bolt/lib/Rewrite/MachORewriteInstance.cpp
+++ b/bolt/lib/Rewrite/MachORewriteInstance.cpp
@@ -18,6 +18,7 @@
#include "bolt/Rewrite/BinaryPassManager.h"
#include "bolt/Rewrite/ExecutableFileMemoryManager.h"
#include "bolt/Rewrite/JITLinkLinker.h"
+#include "bolt/Rewrite/RewriteInstance.h"
#include "bolt/RuntimeLibs/InstrumentationRuntimeLibrary.h"
#include "bolt/Utils/Utils.h"
#include "llvm/MC/MCObjectStreamer.h"
@@ -54,37 +55,6 @@ extern cl::opt<unsigned> Verbosity;
namespace llvm {
namespace bolt {
-extern MCPlusBuilder *createX86MCPlusBuilder(const MCInstrAnalysis *,
- const MCInstrInfo *,
- const MCRegisterInfo *,
- const MCSubtargetInfo *);
-extern MCPlusBuilder *createAArch64MCPlusBuilder(const MCInstrAnalysis *,
- const MCInstrInfo *,
- const MCRegisterInfo *,
- const MCSubtargetInfo *);
-
-namespace {
-
-MCPlusBuilder *createMCPlusBuilder(const Triple::ArchType Arch,
- const MCInstrAnalysis *Analysis,
- const MCInstrInfo *Info,
- const MCRegisterInfo *RegInfo,
- const MCSubtargetInfo *STI) {
-#ifdef X86_AVAILABLE
- if (Arch == Triple::x86_64)
- return createX86MCPlusBuilder(Analysis, Info, RegInfo, STI);
-#endif
-
-#ifdef AARCH64_AVAILABLE
- if (Arch == Triple::aarch64)
- return createAArch64MCPlusBuilder(Analysis, Info, RegInfo, STI);
-#endif
-
- llvm_unreachable("architecture unsupported by MCPlusBuilder");
-}
-
-} // anonymous namespace
-
#define DEBUG_TYPE "bolt"
Expected<std::unique_ptr<MachORewriteInstance>>
@@ -103,7 +73,8 @@ MachORewriteInstance::MachORewriteInstance(object::MachOObjectFile *InputFile,
: InputFile(InputFile), ToolPath(ToolPath) {
ErrorAsOutParameter EAO(&Err);
auto BCOrErr = BinaryContext::createBinaryContext(
- InputFile, /* IsPIC */ true, DWARFContext::create(*InputFile),
+ InputFile->makeTriple(), InputFile->getFileName(), nullptr,
+ /* IsPIC */ true, DWARFContext::create(*InputFile),
{llvm::outs(), llvm::errs()});
if (Error E = BCOrErr.takeError()) {
Err = std::move(E);
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index cde195c17390..0c8ee0d41723 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -81,8 +81,10 @@ extern cl::list<std::string> HotTextMoveSections;
extern cl::opt<bool> Hugify;
extern cl::opt<bool> Instrument;
extern cl::opt<JumpTableSupportLevel> JumpTables;
+extern cl::opt<bool> KeepNops;
extern cl::list<std::string> ReorderData;
extern cl::opt<bolt::ReorderFunctions::ReorderType> ReorderFunctions;
+extern cl::opt<bool> TerminalTrap;
extern cl::opt<bool> TimeBuild;
cl::opt<bool> AllowStripped("allow-stripped",
@@ -199,10 +201,7 @@ static cl::opt<cl::boolOrDefault> RelocationMode(
"relocs", cl::desc("use relocations in the binary (default=autodetect)"),
cl::cat(BoltCategory));
-static cl::opt<std::string>
-SaveProfile("w",
- cl::desc("save recorded profile to a file"),
- cl::cat(BoltOutputCategory));
+extern cl::opt<std::string> SaveProfile;
static cl::list<std::string>
SkipFunctionNames("skip-funcs",
@@ -269,6 +268,10 @@ namespace bolt {
extern const char *BoltRevision;
+// Weird location for createMCPlusBuilder, but this is here to avoid a
+// cyclic dependency of libCore (its natural place) and libTarget. libRewrite
+// can depend on libTarget, but not libCore. Since libRewrite is the only
+// user of this function, we define it here.
MCPlusBuilder *createMCPlusBuilder(const Triple::ArchType Arch,
const MCInstrAnalysis *Analysis,
const MCInstrInfo *Info,
@@ -346,8 +349,21 @@ RewriteInstance::RewriteInstance(ELFObjectFileBase *File, const int Argc,
Stderr.SetUnbuffered();
LLVM_DEBUG(dbgs().SetUnbuffered());
+ // Read RISCV subtarget features from input file
+ std::unique_ptr<SubtargetFeatures> Features;
+ Triple TheTriple = File->makeTriple();
+ if (TheTriple.getArch() == llvm::Triple::riscv64) {
+ Expected<SubtargetFeatures> FeaturesOrErr = File->getFeatures();
+ if (auto E = FeaturesOrErr.takeError()) {
+ Err = std::move(E);
+ return;
+ } else {
+ Features.reset(new SubtargetFeatures(*FeaturesOrErr));
+ }
+ }
+
auto BCOrErr = BinaryContext::createBinaryContext(
- File, IsPIC,
+ TheTriple, File->getFileName(), Features.get(), IsPIC,
DWARFContext::create(*File, DWARFContext::ProcessDebugRelocations::Ignore,
nullptr, opts::DWPPathName,
WithColor::defaultErrorHandler,
@@ -732,6 +748,13 @@ Error RewriteInstance::run() {
// Skip disassembling if we have a translation table and we are running an
// aggregation job.
if (opts::AggregateOnly && BAT->enabledFor(InputFile)) {
+ // YAML profile in BAT mode requires CFG for .bolt.org.text functions
+ if (!opts::SaveProfile.empty() ||
+ opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML) {
+ selectFunctionsToProcess();
+ disassembleFunctions();
+ buildFunctionsCFG();
+ }
processProfileData();
return Error::success();
}
@@ -2028,12 +2051,13 @@ void RewriteInstance::adjustCommandLineOptions() {
if (opts::Lite)
BC->outs() << "BOLT-INFO: enabling lite mode\n";
- if (!opts::SaveProfile.empty() && BAT->enabledFor(InputFile)) {
- BC->errs()
- << "BOLT-ERROR: unable to save profile in YAML format for input "
- "file processed by BOLT. Please remove -w option and use branch "
- "profile.\n";
- exit(1);
+ if (BC->IsLinuxKernel) {
+ if (!opts::KeepNops.getNumOccurrences())
+ opts::KeepNops = true;
+
+ // Linux kernel may resume execution after a trap instruction in some cases.
+ if (!opts::TerminalTrap.getNumOccurrences())
+ opts::TerminalTrap = false;
}
}
@@ -3126,12 +3150,13 @@ void RewriteInstance::processProfileData() {
}
}
- if (!opts::SaveProfile.empty()) {
+ if (!opts::SaveProfile.empty() && !BAT->enabledFor(InputFile)) {
YAMLProfileWriter PW(opts::SaveProfile);
PW.writeProfile(*this);
}
if (opts::AggregateOnly &&
- opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML) {
+ opts::ProfileFormat == opts::ProfileFormatKind::PF_YAML &&
+ !BAT->enabledFor(InputFile)) {
YAMLProfileWriter PW(opts::OutputFilename);
PW.writeProfile(*this);
}
diff --git a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp
index b6fc49d3f5d6..d114d70f2d37 100644
--- a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp
+++ b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp
@@ -11,10 +11,9 @@
//===----------------------------------------------------------------------===//
#include "bolt/RuntimeLibs/HugifyRuntimeLibrary.h"
-#include "bolt/Core/BinaryFunction.h"
+#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/Linker.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/Alignment.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
diff --git a/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp b/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp
index ab9623d5c51b..74f2f0aae91e 100644
--- a/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp
+++ b/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp
@@ -16,10 +16,7 @@
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "mcplus"
diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
index de55fbe51764..8b1894953f37 100644
--- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
+++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp
@@ -211,13 +211,6 @@ public:
return false;
}
- // FIXME: For compatibility with old LLVM only!
- bool isTerminator(const MCInst &Inst) const override {
- unsigned Opcode = Inst.getOpcode();
- return Info->get(Opcode).isTerminator() || X86::isUD1(Opcode) ||
- X86::isUD2(Opcode);
- }
-
bool isIndirectCall(const MCInst &Inst) const override {
return isCall(Inst) &&
((getMemoryOperandNo(Inst) != -1) || Inst.getOperand(0).isReg());
@@ -336,6 +329,9 @@ public:
}
bool isUnsupportedBranch(const MCInst &Inst) const override {
+ if (isDynamicBranch(Inst))
+ return true;
+
switch (Inst.getOpcode()) {
default:
return false;
@@ -2728,6 +2724,7 @@ public:
void createUncondBranch(MCInst &Inst, const MCSymbol *TBB,
MCContext *Ctx) const override {
+ Inst.clear();
Inst.setOpcode(X86::JMP_1);
Inst.clear();
Inst.addOperand(MCOperand::createExpr(
@@ -2776,6 +2773,15 @@ public:
Inst.addOperand(MCOperand::createImm(CC));
}
+ void createLongCondBranch(MCInst &Inst, const MCSymbol *Target, unsigned CC,
+ MCContext *Ctx) const override {
+ Inst.setOpcode(X86::JCC_4);
+ Inst.clear();
+ Inst.addOperand(MCOperand::createExpr(
+ MCSymbolRefExpr::create(Target, MCSymbolRefExpr::VK_None, *Ctx)));
+ Inst.addOperand(MCOperand::createImm(CC));
+ }
+
bool reverseBranchCondition(MCInst &Inst, const MCSymbol *TBB,
MCContext *Ctx) const override {
unsigned InvCC = getInvertedCondCode(getCondCode(Inst));
diff --git a/bolt/lib/Utils/CommandLineOpts.cpp b/bolt/lib/Utils/CommandLineOpts.cpp
index e910fa4f8672..ba296c10c00a 100644
--- a/bolt/lib/Utils/CommandLineOpts.cpp
+++ b/bolt/lib/Utils/CommandLineOpts.cpp
@@ -162,6 +162,10 @@ cl::opt<ProfileFormatKind> ProfileFormat(
clEnumValN(PF_YAML, "yaml", "dense YAML representation")),
cl::ZeroOrMore, cl::Hidden, cl::cat(BoltCategory));
+cl::opt<std::string> SaveProfile("w",
+ cl::desc("save recorded profile to a file"),
+ cl::cat(BoltOutputCategory));
+
cl::opt<bool> SplitEH("split-eh", cl::desc("split C++ exception handling code"),
cl::Hidden, cl::cat(BoltOptCategory));
diff --git a/bolt/test/X86/Inputs/blarge_new.preagg.txt b/bolt/test/X86/Inputs/blarge_new.preagg.txt
new file mode 100644
index 000000000000..e92f356d9188
--- /dev/null
+++ b/bolt/test/X86/Inputs/blarge_new.preagg.txt
@@ -0,0 +1,81 @@
+B 40164b 401608 109 0
+B 401611 4017e0 115 0
+B 4017f0 401616 117 0
+B 401ba2 4015da 6 0
+B 4015d5 401b60 1 0
+B 40159a 401b60 5 0
+B 401b9d 401b70 615 2
+B 401b90 401b99 344 37
+B 401ba2 40159f 8 0
+B 4015b0 401070 9 0
+B 401544 4014a0 6 0
+B 40188a 401928 5 0
+B 40152a 4014b0 21 0
+B 40169e 40165b 2 0
+B 4014dd 401070 12 1
+B 401509 4014ec 2 2
+B 401510 401030 673 0
+B 4019de 401080 1 0
+B 401500 401070 22 0
+B 401921 4014d6 9 0
+B 4019b3 401080 3 0
+B 40162d 401070 113 0
+B 4014d1 401800 27 0
+B 401a3f 401080 1 0
+B 4018d2 401050 17 0
+B 401664 4017c0 2 0
+B 401680 401070 2 0
+B 4017d0 401669 2 0
+B 4018f7 40190d 9 0
+B 4015bc 401592 6 0
+B 401964 401090 5 0
+B 4015f8 4015cd 1 0
+B 4015ec 401070 6 0
+F 40165b 401664 2
+F 4017c0 4017d0 2
+F 401669 401680 2
+F 40190d 401921 9
+F 4014d6 4014dd 9
+F 401800 4018d2 17
+F 4018d7 4018f7 9
+F 40159f 4015b0 8
+F 401515 401544 6
+F 401070 401500 1
+F 401070 401070 157
+F 4014a0 4014d1 6
+F 401616 40162d 112
+F 4019e3 401a3f 1
+F 4014e2 401500 19
+F 401090 401090 5
+F 401030 401030 673
+F 401505 401510 668
+F 401616 4017f0 2
+F 401070 4015b0 1
+F 4015da 4015ec 6
+F 401b60 401b90 6
+F 4019b8 4019de 1
+F 401969 4019b3 3
+F 401505 401509 2
+F 401515 40152a 21
+F 401592 40159a 4
+F 401050 401050 17
+F 4015cd 4015d5 1
+F 401070 4014dd 1
+F 401b99 401ba2 8
+F 401b70 401b90 326
+F 401b99 401b9d 324
+F 401592 4015bc 1
+F 401608 401611 109
+F 401b70 401b9d 268
+F 4015b5 4015bc 5
+F 401b99 401b90 1
+F 401b70 401ba2 5
+F 401632 40164b 108
+F 401080 401080 5
+F 4014b0 4014d1 21
+F 4017e0 4017f0 115
+F 4015f1 4015f8 1
+F 401685 40169e 2
+F 401928 401964 5
+F 401800 40188a 5
+F 4014ec 401500 2
diff --git a/bolt/test/X86/Inputs/blarge_new.yaml b/bolt/test/X86/Inputs/blarge_new.yaml
new file mode 100644
index 000000000000..0380f5180e90
--- /dev/null
+++ b/bolt/test/X86/Inputs/blarge_new.yaml
@@ -0,0 +1,1648 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_X86_64
+ Entry: 0x4016D0
+ProgramHeaders:
+ - Type: PT_PHDR
+ Flags: [ PF_R ]
+ VAddr: 0x400040
+ Align: 0x8
+ Offset: 0x40
+ - Type: PT_INTERP
+ Flags: [ PF_R ]
+ FirstSec: .interp
+ LastSec: .interp
+ VAddr: 0x4002A8
+ Offset: 0x2A8
+ - Type: PT_LOAD
+ Flags: [ PF_R ]
+ FirstSec: .interp
+ LastSec: .rela.plt
+ VAddr: 0x400000
+ Align: 0x1000
+ Offset: 0x0
+ - Type: PT_LOAD
+ Flags: [ PF_X, PF_R ]
+ FirstSec: .init
+ LastSec: .fini
+ VAddr: 0x401000
+ Align: 0x1000
+ Offset: 0x1000
+ - Type: PT_LOAD
+ Flags: [ PF_R ]
+ FirstSec: .rodata
+ LastSec: .eh_frame
+ VAddr: 0x402000
+ Align: 0x1000
+ Offset: 0x2000
+ - Type: PT_LOAD
+ Flags: [ PF_W, PF_R ]
+ FirstSec: .init_array
+ LastSec: .bss
+ VAddr: 0x403E00
+ Align: 0x1000
+ Offset: 0x2E00
+ - Type: PT_DYNAMIC
+ Flags: [ PF_W, PF_R ]
+ FirstSec: .dynamic
+ LastSec: .dynamic
+ VAddr: 0x403E10
+ Align: 0x8
+ Offset: 0x2E10
+ - Type: PT_NOTE
+ Flags: [ PF_R ]
+ FirstSec: .note.gnu.build-id
+ LastSec: .note.ABI-tag
+ VAddr: 0x4002C4
+ Align: 0x4
+ Offset: 0x2C4
+ - Type: PT_GNU_EH_FRAME
+ Flags: [ PF_R ]
+ FirstSec: .eh_frame_hdr
+ LastSec: .eh_frame_hdr
+ VAddr: 0x402270
+ Align: 0x4
+ Offset: 0x2270
+ - Type: PT_GNU_STACK
+ Flags: [ PF_W, PF_R ]
+ Align: 0x10
+ Offset: 0x0
+ - Type: PT_GNU_RELRO
+ Flags: [ PF_R ]
+ FirstSec: .init_array
+ LastSec: .got
+ VAddr: 0x403E00
+ Offset: 0x2E00
+Sections:
+ - Name: .interp
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4002A8
+ AddressAlign: 0x1
+ Content: 2F6C696236342F6C642D6C696E75782D7838362D36342E736F2E3200
+ - Name: .note.gnu.build-id
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4002C4
+ AddressAlign: 0x4
+ Notes:
+ - Name: GNU
+ Desc: 66CF856212C3B313EA98AD840984B20EA781118A
+ Type: NT_PRPSINFO
+ - Name: .note.ABI-tag
+ Type: SHT_NOTE
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4002E8
+ AddressAlign: 0x4
+ Notes:
+ - Name: GNU
+ Desc: '00000000030000000200000000000000'
+ Type: NT_VERSION
+ - Name: .gnu.hash
+ Type: SHT_GNU_HASH
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400308
+ Link: .dynsym
+ AddressAlign: 0x8
+ Header:
+ SymNdx: 0x1
+ Shift2: 0x0
+ BloomFilter: [ 0x0 ]
+ HashBuckets: [ 0x0 ]
+ HashValues: [ ]
+ - Name: .dynsym
+ Type: SHT_DYNSYM
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400328
+ Link: .dynstr
+ AddressAlign: 0x8
+ - Name: .dynstr
+ Type: SHT_STRTAB
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400430
+ AddressAlign: 0x1
+ - Name: .gnu.version
+ Type: SHT_GNU_versym
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4004BA
+ Link: .dynsym
+ AddressAlign: 0x2
+ Entries: [ 0, 2, 2, 3, 4, 2, 5, 5, 2, 0, 5 ]
+ - Name: .gnu.version_r
+ Type: SHT_GNU_verneed
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4004D0
+ Link: .dynstr
+ AddressAlign: 0x8
+ Dependencies:
+ - Version: 1
+ File: libm.so.6
+ Entries:
+ - Name: GLIBC_2.2.5
+ Hash: 157882997
+ Flags: 0
+ Other: 5
+ - Name: GLIBC_2.29
+ Hash: 110530953
+ Flags: 0
+ Other: 3
+ - Version: 1
+ File: libc.so.6
+ Entries:
+ - Name: GLIBC_2.4
+ Hash: 225011988
+ Flags: 0
+ Other: 4
+ - Name: GLIBC_2.2.5
+ Hash: 157882997
+ Flags: 0
+ Other: 2
+ - Name: .rela.dyn
+ Type: SHT_RELA
+ Flags: [ SHF_ALLOC ]
+ Address: 0x400530
+ Link: .dynsym
+ AddressAlign: 0x8
+ Relocations:
+ - Offset: 0x403FF0
+ Symbol: __libc_start_main
+ Type: R_X86_64_GLOB_DAT
+ - Offset: 0x403FF8
+ Symbol: __gmon_start__
+ Type: R_X86_64_GLOB_DAT
+ - Name: .rela.plt
+ Type: SHT_RELA
+ Flags: [ SHF_ALLOC, SHF_INFO_LINK ]
+ Address: 0x400560
+ Link: .dynsym
+ AddressAlign: 0x8
+ Info: .got.plt
+ Relocations:
+ - Offset: 0x404018
+ Symbol: putchar
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404020
+ Symbol: puts
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404028
+ Symbol: pow
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404030
+ Symbol: __stack_chk_fail
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404038
+ Symbol: printf
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404040
+ Symbol: cos
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404048
+ Symbol: acos
+ Type: R_X86_64_JUMP_SLOT
+ - Offset: 0x404050
+ Symbol: sqrt
+ Type: R_X86_64_JUMP_SLOT
+ - Name: .init
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401000
+ AddressAlign: 0x4
+ Offset: 0x1000
+ Content: F30F1EFA4883EC08488B05E92F00004885C07402FFD04883C408C3
+ - Name: .plt
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401020
+ AddressAlign: 0x10
+ EntSize: 0x10
+ Content: FF35E22F0000FF25E42F00000F1F4000FF25E22F00006800000000E9E0FFFFFFFF25DA2F00006801000000E9D0FFFFFFFF25D22F00006802000000E9C0FFFFFFFF25CA2F00006803000000E9B0FFFFFFFF25C22F00006804000000E9A0FFFFFFFF25BA2F00006805000000E990FFFFFFFF25B22F00006806000000E980FFFFFFFF25AA2F00006807000000E970FFFFFF
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x4010B0
+ AddressAlign: 0x10
+ Content: 4156BF082040004155415455534883EC5064488B042528000000488944244831C0E86AFFFFFF488D742430488D7C2424488B0529100000F20F101529100000F20F100D2910000066480F6ED8488B050510000066480F6EC0E8F3060000BFBF20400031C0E857FFFFFF448B5C24244585DB7E2131DBF20F1044DC30BFCA204000B8010000004883C301E832FFFFFF395C24247FE1BF0A000000E8E2FEFFFF488D742430488D7C2424488B05B10F0000F20F1015C10F0000F20F100DC10F000066480F6ED8488B058D0F000066480F6EC0E87B060000BFBF20400031C0E8DFFEFFFF448B5424244585D27E2131DBF20F1044DC30BFCA204000B8010000004883C301E8BAFEFFFF395C24247FE1BF0A000000E86AFEFFFF488B053B0F0000F20F101D630F0000488D742430F20F10155E0F0000F20F100D5E0F0000488D7C242466480F6EC0E807060000BFBF20400031C0E86BFEFFFF448B4C24244585C97E2131DBF20F1044DC30BFCA204000B8010000004883C301E846FEFFFF395C24247FE1BF0A000000E8F6FDFFFF488D742430488D7C2424488B05BD0E0000F20F101DFD0E0000F20F100DFD0E000066480F6ED066480F6EC0E896050000BFBF20400031C0E8FAFDFFFF448B4424244585C07E2131DBF20F1044DC30BFCA204000B8010000004883C301E8D5FDFFFF395C24247FE1BF0A000000E885FDFFFF488B05460E0000F20F101DA60E0000488D742430F20F100DA10E0000F20F1005A10E0000488D7C242466480F6ED0E822050000BFBF20400031C0E886FDFFFF8B7C242485FF7E2131DBF20F1044DC30BFCA204000B8010000004883C301E863FDFFFF395C24247FE1BF0A000000E813FDFFFFF20F101D530E0000F20F1015530E0000488D742430F20F100D4E0E0000F20F10054E0E0000488D7C2424E8B4040000BFBF20400031C0E818FDFFFF8B74242485F67E2131DBF20F1044DC30BFCA204000B8010000004883C301E8F5FCFFFF395C24247FE1BF0A000000E8A5FCFFFFF20F101D050E0000F20F1015050E0000488D742430F20F100D000E0000F20F1005000E0000488D7C2424E846040000BFBF20400031C0E8AAFCFFFF8B4C242485C97E2131DBF20F1044DC30BFCA204000B8010000004883C301E887FCFFFF395C24247FE1BF0A000000E837FCFFFFF20F101DB70D0000F20F1015B70D0000488D742430F20F100DB20D0000F20F1005B20D0000488D7C2424E8D8030000BFBF20400031C0E83CFCFFFF8B54242485D27E2131DBF20F1044DC30BFCA204000B8010000004883C301E819FCFFFF395C24247FE1BF0A00000041BD09000000E8C3FBFFFF488B05940C00004889442410488B05800C000041BE280000004889442418488B05660C000041BC1100000048894424080F1F00488B05490C0000BD0900000048890424F20F101C24488D742430488D7C2424F20F10542408F20F104C2418F20F10442410E82A030000BFBF20400031C0E88EFBFFFF8B44242485C07E2131DBF20F1044DC30BFCA204000B8010000004883C301E86BFBFFFF395C24247FE1BF0A000000E81BFBFFFFF20F102424F20F5C25B60C0000F20F11242483ED017584F20F102DAC0C0000F20F586C2408F20F116C24084183EC010F8556FFFFFFF20F107C2418F20F5C3D900C0000F20F117C24184183EE010F8523FFFFFFF20F103D980B0000F20F587C2410F20F117C24104183ED010F85F3FEFFFFBF3020400031DBE8AEFAFFFF4889DF488D742428E8C10500008B54242889DEBFCE20400031C04883C302E8BBFAFFFF4881FBA086010075D4BF0A000000BB6901ED3FE863FAFFFF4889DF488D742428E8860500008B5424284889DE31C0BFDF2040004883C301E87FFAFFFF4881FB6941ED3F75D3BF58204000E83CFAFFFF660FEFD2660F28C2F20F111424E8CA010000F20F101424BF80204000B802000000660F28C8660F28C2E83EFAFFFFF20F101424F20F5815B10B0000F20F103DB10B0000660F2FFA73BBBFEE204000E8E9F9FFFF660FEFD2660F28C2F20F111424E857010000F20F101424BFA0204000B802000000660F28C8660F28C2E8EBF9FFFFF20F101424F20F58156E0B0000F20F103D6E0B0000660F2FFA73BB488B442448644833042528000000750F4883C45031C05B5D415C415D415EC3E89CF9FFFF662E0F1F8400000000006690F30F1EFA31ED4989D15E4889E24883E4F0505449C7C0201C400048C7C1B01B400048C7C7B0104000FF15F2280000F490F30F1EFAC3662E0F1F84000000000090B868404000483D684040007413B8000000004885C07409BF68404000FFE06690C30F1F440000662E0F1F840000000000BE684040004881EE684040004889F048C1EE3F48C1F8034801C648D1FE7411B8000000004885C07407BF68404000FFE0C30F1F440000662E0F1F840000000000803DE1280000007517554889E5E87EFFFFFFC605CF280000015DC30F1F440000C30F1F440000662E0F1F840000000000EB8E662E0F1F8400000000000F1F4000F20F5905480A0000F20F5E05480A0000C366662E0F1F8400000000000F1F4000F20F5905300A0000F20F5E05200A0000C3662E0F1F8400000000000F1F440000F20F5EC8534889F34883EC50F20F5ED0F20F110C24DD0424660FEFC9DB3C24DB2C24F20F5ED8F20F11542418DD442418D9C1D8CAD905E6090000D8CADEE9D905E0090000DCF9F20F115C2418D9C3D8C4D8CCD8CCD9CCDEC9DECAD9CADEE1D905C4090000DC4C2418DEC1D835BC090000D9C1D8CAD8CAD9C1D8CAD8E1DD5C2418F20F10442418660F2FC80F8398000000DDD8660F2EC8660F28D0C70701000000F20F51D20F87B6010000D9C9DB7C2430F20F100D90090000DD542418F20F10442418660F540586090000DB7C2420F20F58C2E879F7FFFFF20F11442418DD442418DB6C2430D8F1DEC1DD5C2418DB6C2420D9EEDFF1DDD87714F20F107C2418660F573D59090000F20F117C2418DB2C24D8350A090000DC6C2418DD1B4883C4505BC3660F1F440000DD5C2418F20F10442418C70703000000660F28F0660F2EC8F20F51F6F20F117424180F8736010000D9C9DB7C2420DC742418DD5C2418F20F10442418E827F7FFFFDB6C2420660FEFC9F20F11442418DD5C2420F20F10542420660F2ECA660F28DAF20F51DB0F870D010000F20F102DD5070000F20F591D8D080000F20F5EC5F20F116C2430F20F115C2420E8C8F6FFFFDB2C24F20F11442440F20F10442418D83553080000F20F580563080000F20F5E442430DB3C24E89DF6FFFFF20F104C2440F20F10642420DB2C24660F28D0F20F59CCF20F59D4D9C0F20F114C2440DC6C2440DD5C2440F20F10442440F20F11542440DC6C2440DD5C2440660F164424400F1103F20F10442418F20F580507080000F20F5E442430E83CF6FFFFF20F59442420DB2C24F20F11442418DC6C2418DD5B104883C4505BC3DB7C2430F20F11542418DB7C2420E82DF6FFFFF20F10542418DB6C2430DB6C2420E926FEFFFFDB7C2430DB7C2420E80DF6FFFFDB6C2430DB6C2420E9B2FEFFFF660F28C2F20F11542448F20F115C2420E8EBF5FFFFF20F103DB3060000F20F10442418F20F105C2420F20F591D5F070000F20F5EC7F20F117C2430F20F115C2420E89AF5FFFFDB2C24F20F10742420F20F10542448D83525070000F20F59F0660F28C2F20F11742440D9C0DB3C24DC6C2440DD1BE887F5FFFFF20F10442418F20F580511070000F20F5E442430E84EF5FFFFDB2C24F20F10542448F20F59442420F20F11442440DC6C2440660F28C2DD5B08E849F5FFFFE9CFFEFFFF0F1F400041B82000000031C031D2660F1F4400004889F948C1E70248C1E91E83E103488D1491488D0C85010000004801C04839CA72074829CA4883C0014183E80175D1488906C3662E0F1F8400000000000F1F00F30F1EFA41574C8D3D4322000041564989D641554989F541544189FC55488D2D34220000534C29FD4883EC08E81FF4FFFF48C1FD03741F31DB0F1F80000000004C89F24C89EE4489E741FF14DF4883C3014839DD75EA4883C4085B5D415C415D415E415FC366662E0F1F840000000000F30F1EFAC3
+ - Name: .fini
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ Address: 0x401C28
+ AddressAlign: 0x4
+ Content: F30F1EFA4883EC084883C408C3
+ - Name: .rodata
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x402000
+ AddressAlign: 0x10
+ Offset: 0x2000
+ Content: 01000200000000002A2A2A2A2A2A2A2A2A2043554249432046554E4354494F4E53202A2A2A2A2A2A2A2A2A2A2A0000002A2A2A2A2A2A2A2A2A20494E54454745522053515220524F4F5453202A2A2A2A2A2A2A2A2A2A2A002A2A2A2A2A2A2A2A2A20414E474C4520434F4E56455253494F4E202A2A2A2A2A2A2A2A2A2A2A000025332E30662064656772656573203D20252E3132662072616469616E730A0000252E3132662072616469616E73203D2025332E306620646567726565730A00536F6C7574696F6E733A0020256600737172742825336429203D202532640A007371727428256C5829203D2025580A0000000000000000F0BF00000000000014400000000000002440000000000000F03F0000000000003EC0000000000000404000000000000025C0000000000000314000000000000012C00000000000003FC000000000000036400000000000000CC000000000008041C06666666666662BC00000000000002840AE47E17A14AE284000000000000008409A999999999937C00000000000001840295C8FC2F5F850C000000000000020C000000000000041400000000000001E40D7A3703D0A572140000000000080464000000000000030403333333333331540333333333333FBBF00000000000028C077BE9F1A2FDDDC3F85EB51B81E85E33F000000000000D03FFCA9F1D24D62503F0000000000807640399D52A246DF413F9B0B6097FB2119400000000000806640182D4454FB21094000004040000010410000D8410000584200000000000000C0182D4454FB211940182D4454FB212940555555555555D53FFFFFFFFFFFFFFF7F000000000000000000000000000000800000000000000000
+ - Name: .eh_frame_hdr
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x402270
+ AddressAlign: 0x4
+ Content: 011B033B5C0000000A000000B0EDFFFFA000000040EEFFFFC800000060F4FFFF7800000090F4FFFF8C00000050F5FFFF1001000070F5FFFF2401000090F5FFFF38010000F0F8FFFF6801000040F9FFFF80010000B0F9FFFFC8010000
+ - Name: .eh_frame
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC ]
+ Address: 0x4022D0
+ AddressAlign: 0x8
+ Content: 1400000000000000017A5200017810011B0C070890010000100000001C000000E0F3FFFF2F000000004407101000000030000000FCF3FFFF0500000000000000240000004400000008EDFFFF90000000000E10460E184A0F0B770880003F1A3B2A33242200000000440000006C00000070EDFFFF1406000000420E108E02470E188D03420E208C04410E288605410E308306440E800103F3050A0E30430E28410E20420E18420E10420E08410B00000010000000B400000038F4FFFF110000000000000010000000C800000044F4FFFF11000000000000002C000000DC00000050F4FFFF5C03000000450E108302470E600314010A0E10410E08470B0336010A0E10410E08410B00140000000C01000080F7FFFF4300000000000000000000004400000024010000B8F7FFFF6500000000460E108F02490E188E03450E208D04450E288C05440E308606480E388307470E406E0E38410E30410E28420E20420E18420E10420E0800100000006C010000E0F7FFFF050000000000000000000000
+ - Name: .init_array
+ Type: SHT_INIT_ARRAY
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403E00
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Offset: 0x2E00
+ Content: B017400000000000
+ - Name: .fini_array
+ Type: SHT_FINI_ARRAY
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403E08
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Content: '8017400000000000'
+ - Name: .dynamic
+ Type: SHT_DYNAMIC
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403E10
+ Link: .dynstr
+ AddressAlign: 0x8
+ Entries:
+ - Tag: DT_NEEDED
+ Value: 0x1
+ - Tag: DT_NEEDED
+ Value: 0x28
+ - Tag: DT_INIT
+ Value: 0x401000
+ - Tag: DT_FINI
+ Value: 0x401C28
+ - Tag: DT_INIT_ARRAY
+ Value: 0x403E00
+ - Tag: DT_INIT_ARRAYSZ
+ Value: 0x8
+ - Tag: DT_FINI_ARRAY
+ Value: 0x403E08
+ - Tag: DT_FINI_ARRAYSZ
+ Value: 0x8
+ - Tag: DT_GNU_HASH
+ Value: 0x400308
+ - Tag: DT_STRTAB
+ Value: 0x400430
+ - Tag: DT_SYMTAB
+ Value: 0x400328
+ - Tag: DT_STRSZ
+ Value: 0x8A
+ - Tag: DT_SYMENT
+ Value: 0x18
+ - Tag: DT_DEBUG
+ Value: 0x0
+ - Tag: DT_PLTGOT
+ Value: 0x404000
+ - Tag: DT_PLTRELSZ
+ Value: 0xC0
+ - Tag: DT_PLTREL
+ Value: 0x7
+ - Tag: DT_JMPREL
+ Value: 0x400560
+ - Tag: DT_RELA
+ Value: 0x400530
+ - Tag: DT_RELASZ
+ Value: 0x30
+ - Tag: DT_RELAENT
+ Value: 0x18
+ - Tag: DT_VERNEED
+ Value: 0x4004D0
+ - Tag: DT_VERNEEDNUM
+ Value: 0x2
+ - Tag: DT_VERSYM
+ Value: 0x4004BA
+ - Tag: DT_NULL
+ Value: 0x0
+ - Tag: DT_NULL
+ Value: 0x0
+ - Tag: DT_NULL
+ Value: 0x0
+ - Tag: DT_NULL
+ Value: 0x0
+ - Tag: DT_NULL
+ Value: 0x0
+ - Tag: DT_NULL
+ Value: 0x0
+ - Name: .got
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x403FF0
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Content: '00000000000000000000000000000000'
+ - Name: .got.plt
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404000
+ AddressAlign: 0x8
+ EntSize: 0x8
+ Content: 103E400000000000000000000000000000000000000000003610400000000000461040000000000056104000000000006610400000000000761040000000000086104000000000009610400000000000A610400000000000
+ - Name: .data
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404058
+ AddressAlign: 0x8
+ Content: '00000000000000000000000000000000'
+ - Name: .tm_clone_table
+ Type: SHT_PROGBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404068
+ AddressAlign: 0x8
+ - Name: .bss
+ Type: SHT_NOBITS
+ Flags: [ SHF_WRITE, SHF_ALLOC ]
+ Address: 0x404068
+ AddressAlign: 0x1
+ Size: 0x8
+ - Name: .comment
+ Type: SHT_PROGBITS
+ Flags: [ SHF_MERGE, SHF_STRINGS ]
+ AddressAlign: 0x1
+ EntSize: 0x1
+ Content: 4743433A20285562756E747520392E342E302D317562756E7475317E31362E30342920392E342E3000
+ - Name: .rela.init
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .init
+ Relocations:
+ - Offset: 0x40100B
+ Symbol: __gmon_start__
+ Type: R_X86_64_REX_GOTPCRELX
+ Addend: -4
+ - Name: .rela.text
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .text
+ Relocations:
+ - Offset: 0x4010B3
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 8
+ - Offset: 0x4010D2
+ Symbol: 'puts@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4010E3
+ Symbol: .LC6
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4010EB
+ Symbol: .LC7
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4010F3
+ Symbol: .LC8
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4010FF
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401109
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40110E
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x401115
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40112C
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x40113A
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40114A
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40115B
+ Symbol: .LC6
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401163
+ Symbol: .LC11
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40116B
+ Symbol: .LC12
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401177
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401181
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401186
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x40118D
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4011A4
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x4011B2
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4011C2
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4011C9
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4011D1
+ Symbol: .LC13
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4011DE
+ Symbol: .LC14
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4011E6
+ Symbol: .LC15
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4011F5
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4011FA
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x401201
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401218
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401226
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401236
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401247
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40124F
+ Symbol: .LC16
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401257
+ Symbol: .LC17
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401266
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40126B
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x401272
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401289
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401297
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4012A7
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4012AE
+ Symbol: .LC2
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4012B6
+ Symbol: .LC18
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4012C3
+ Symbol: .LC19
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4012CB
+ Symbol: .LC20
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4012DA
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4012DF
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x4012E6
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4012FB
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401309
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401319
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401321
+ Symbol: .LC21
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401329
+ Symbol: .LC22
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401336
+ Symbol: .LC23
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40133E
+ Symbol: .LC24
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401348
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40134D
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x401354
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401369
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401377
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401387
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40138F
+ Symbol: .LC25
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401397
+ Symbol: .LC26
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4013A4
+ Symbol: .LC27
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4013AC
+ Symbol: .LC28
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4013B6
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4013BB
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x4013C2
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4013D7
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x4013E5
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4013F5
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4013FD
+ Symbol: .LC29
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401405
+ Symbol: .LC30
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401412
+ Symbol: .LC31
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40141A
+ Symbol: .LC32
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401424
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401429
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x401430
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401445
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401453
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401469
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401470
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40147C
+ Symbol: .LC3
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40148E
+ Symbol: .LC2
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4014A3
+ Symbol: .LC0
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4014D2
+ Symbol: SolveCubic
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4014D7
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 191
+ - Offset: 0x4014DE
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4014F3
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 202
+ - Offset: 0x401501
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401511
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40151E
+ Symbol: .LC33
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401530
+ Symbol: .LC34
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401554
+ Symbol: .LC35
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40156C
+ Symbol: .LC4
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401587
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 48
+ - Offset: 0x40158E
+ Symbol: 'puts@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40159B
+ Symbol: usqrt
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4015A6
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 206
+ - Offset: 0x4015B1
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4015C9
+ Symbol: 'putchar@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4015D6
+ Symbol: usqrt
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4015E4
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 223
+ - Offset: 0x4015ED
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4015FB
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 88
+ - Offset: 0x401600
+ Symbol: 'puts@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401612
+ Symbol: deg2rad
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40161C
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 128
+ - Offset: 0x40162E
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40163B
+ Symbol: .LC41
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401643
+ Symbol: .LC42
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40164E
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 238
+ - Offset: 0x401653
+ Symbol: 'puts@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401665
+ Symbol: rad2deg
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40166F
+ Symbol: .rodata
+ Type: R_X86_64_32
+ Addend: 160
+ - Offset: 0x401681
+ Symbol: 'printf@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x40168E
+ Symbol: .LC45
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401696
+ Symbol: .LC46
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4016C0
+ Symbol: '__stack_chk_fail@@GLIBC_2.4'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4016E6
+ Symbol: __libc_csu_fini
+ Type: R_X86_64_32S
+ - Offset: 0x4016ED
+ Symbol: __libc_csu_init
+ Type: R_X86_64_32S
+ - Offset: 0x4016F4
+ Symbol: main
+ Type: R_X86_64_32S
+ - Offset: 0x4016FA
+ Symbol: '__libc_start_main@@GLIBC_2.2.5'
+ Type: R_X86_64_GOTPCRELX
+ Addend: -4
+ - Offset: 0x401711
+ Symbol: __TMC_END__
+ Type: R_X86_64_32
+ - Offset: 0x401717
+ Symbol: .tm_clone_table
+ Type: R_X86_64_32S
+ - Offset: 0x40171E
+ Symbol: _ITM_deregisterTMCloneTable
+ Type: R_X86_64_32
+ - Offset: 0x401728
+ Symbol: .tm_clone_table
+ Type: R_X86_64_32
+ - Offset: 0x401741
+ Symbol: __TMC_END__
+ Type: R_X86_64_32
+ - Offset: 0x401748
+ Symbol: .tm_clone_table
+ Type: R_X86_64_32S
+ - Offset: 0x401760
+ Symbol: _ITM_registerTMCloneTable
+ Type: R_X86_64_32
+ - Offset: 0x40176A
+ Symbol: .tm_clone_table
+ Type: R_X86_64_32
+ - Offset: 0x401782
+ Symbol: .bss
+ Type: R_X86_64_PC32
+ Addend: -5
+ - Offset: 0x401794
+ Symbol: .bss
+ Type: R_X86_64_PC32
+ Addend: -5
+ - Offset: 0x4017C4
+ Symbol: '.LC0 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4017CC
+ Symbol: .LC1
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4017E4
+ Symbol: .LC1
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4017EC
+ Symbol: '.LC0 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401836
+ Symbol: '.LC0 (2)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401840
+ Symbol: '.LC1 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401860
+ Symbol: '.LC2 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40186C
+ Symbol: '.LC3 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4018B4
+ Symbol: .LC9
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4018C6
+ Symbol: .LC10
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4018D3
+ Symbol: 'pow@@GLIBC_2.29'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401903
+ Symbol: '.LC12 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401912
+ Symbol: '.LC0 (2)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401965
+ Symbol: 'acos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401997
+ Symbol: '.LC6 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x40199F
+ Symbol: .LC5
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4019B4
+ Symbol: 'cos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x4019C9
+ Symbol: '.LC0 (2)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4019D1
+ Symbol: '.LC7 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x4019DF
+ Symbol: 'cos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401A35
+ Symbol: '.LC8 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401A40
+ Symbol: 'cos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401A6F
+ Symbol: 'sqrt@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401A8F
+ Symbol: 'sqrt@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401AB1
+ Symbol: 'sqrt@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401AB9
+ Symbol: '.LC6 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401ACD
+ Symbol: .LC5
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401AE2
+ Symbol: 'cos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401AF7
+ Symbol: '.LC0 (2)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401B15
+ Symbol: 'sqrt@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401B23
+ Symbol: '.LC7 (1)'
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401B2E
+ Symbol: 'cos@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401B53
+ Symbol: 'sqrt@@GLIBC_2.2.5'
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Offset: 0x401BB9
+ Symbol: __init_array_start
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401BD0
+ Symbol: __init_array_end
+ Type: R_X86_64_PC32
+ Addend: -4
+ - Offset: 0x401BDD
+ Symbol: _init
+ Type: R_X86_64_PLT32
+ Addend: -4
+ - Name: .rela.eh_frame
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .eh_frame
+ Relocations:
+ - Offset: 0x4022F0
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 1568
+ - Offset: 0x402304
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 1616
+ - Offset: 0x402340
+ Symbol: .text
+ Type: R_X86_64_PC32
+ - Offset: 0x402388
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 1808
+ - Offset: 0x40239C
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 1840
+ - Offset: 0x4023B0
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 1872
+ - Offset: 0x4023E0
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 2736
+ - Offset: 0x4023F8
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 2816
+ - Offset: 0x402440
+ Symbol: .text
+ Type: R_X86_64_PC32
+ Addend: 2928
+ - Name: .rela.init_array
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .init_array
+ Relocations:
+ - Offset: 0x403E00
+ Symbol: .text
+ Type: R_X86_64_64
+ Addend: 1792
+ - Name: .rela.fini_array
+ Type: SHT_RELA
+ Flags: [ SHF_INFO_LINK ]
+ Link: .symtab
+ AddressAlign: 0x8
+ Info: .fini_array
+ Relocations:
+ - Offset: 0x403E08
+ Symbol: .text
+ Type: R_X86_64_64
+ Addend: 1744
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .interp
+ - Name: .note.gnu.build-id
+ - Name: .note.ABI-tag
+ - Name: .gnu.hash
+ - Name: .dynsym
+ - Name: .dynstr
+ - Name: .gnu.version
+ - Name: .gnu.version_r
+ - Name: .rela.dyn
+ - Name: .rela.plt
+ - Name: .init
+ - Name: .rela.init
+ - Name: .plt
+ - Name: .text
+ - Name: .rela.text
+ - Name: .fini
+ - Name: .rodata
+ - Name: .eh_frame_hdr
+ - Name: .eh_frame
+ - Name: .rela.eh_frame
+ - Name: .init_array
+ - Name: .rela.init_array
+ - Name: .fini_array
+ - Name: .rela.fini_array
+ - Name: .dynamic
+ - Name: .got
+ - Name: .got.plt
+ - Name: .data
+ - Name: .tm_clone_table
+ - Name: .bss
+ - Name: .comment
+ - Name: .symtab
+ - Name: .strtab
+ - Name: .shstrtab
+Symbols:
+ - Name: .interp
+ Type: STT_SECTION
+ Section: .interp
+ Value: 0x4002A8
+ - Name: .note.gnu.build-id
+ Type: STT_SECTION
+ Section: .note.gnu.build-id
+ Value: 0x4002C4
+ - Name: .note.ABI-tag
+ Type: STT_SECTION
+ Section: .note.ABI-tag
+ Value: 0x4002E8
+ - Name: .gnu.hash
+ Type: STT_SECTION
+ Section: .gnu.hash
+ Value: 0x400308
+ - Name: .dynsym
+ Type: STT_SECTION
+ Section: .dynsym
+ Value: 0x400328
+ - Name: .dynstr
+ Type: STT_SECTION
+ Section: .dynstr
+ Value: 0x400430
+ - Name: .gnu.version
+ Type: STT_SECTION
+ Section: .gnu.version
+ Value: 0x4004BA
+ - Name: .gnu.version_r
+ Type: STT_SECTION
+ Section: .gnu.version_r
+ Value: 0x4004D0
+ - Name: .rela.dyn
+ Type: STT_SECTION
+ Section: .rela.dyn
+ Value: 0x400530
+ - Name: .rela.plt
+ Type: STT_SECTION
+ Section: .rela.plt
+ Value: 0x400560
+ - Name: .init
+ Type: STT_SECTION
+ Section: .init
+ Value: 0x401000
+ - Name: .plt
+ Type: STT_SECTION
+ Section: .plt
+ Value: 0x401020
+ - Name: .text
+ Type: STT_SECTION
+ Section: .text
+ Value: 0x4010B0
+ - Name: .fini
+ Type: STT_SECTION
+ Section: .fini
+ Value: 0x401C28
+ - Name: .rodata
+ Type: STT_SECTION
+ Section: .rodata
+ Value: 0x402000
+ - Name: .eh_frame_hdr
+ Type: STT_SECTION
+ Section: .eh_frame_hdr
+ Value: 0x402270
+ - Name: .eh_frame
+ Type: STT_SECTION
+ Section: .eh_frame
+ Value: 0x4022D0
+ - Name: .init_array
+ Type: STT_SECTION
+ Section: .init_array
+ Value: 0x403E00
+ - Name: .fini_array
+ Type: STT_SECTION
+ Section: .fini_array
+ Value: 0x403E08
+ - Name: .dynamic
+ Type: STT_SECTION
+ Section: .dynamic
+ Value: 0x403E10
+ - Name: .got
+ Type: STT_SECTION
+ Section: .got
+ Value: 0x403FF0
+ - Name: .got.plt
+ Type: STT_SECTION
+ Section: .got.plt
+ Value: 0x404000
+ - Name: .data
+ Type: STT_SECTION
+ Section: .data
+ Value: 0x404058
+ - Name: .tm_clone_table
+ Type: STT_SECTION
+ Section: .tm_clone_table
+ Value: 0x404068
+ - Name: .bss
+ Type: STT_SECTION
+ Section: .bss
+ Value: 0x404068
+ - Name: .comment
+ Type: STT_SECTION
+ Section: .comment
+ - Name: basicmath_large.c
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: .LC6
+ Section: .rodata
+ Value: 0x402110
+ - Name: .LC7
+ Section: .rodata
+ Value: 0x402118
+ - Name: .LC8
+ Section: .rodata
+ Value: 0x402120
+ - Name: .LC4
+ Section: .rodata
+ Value: 0x402108
+ - Name: .LC11
+ Section: .rodata
+ Value: 0x402128
+ - Name: .LC12
+ Section: .rodata
+ Value: 0x402130
+ - Name: .LC13
+ Section: .rodata
+ Value: 0x402138
+ - Name: .LC14
+ Section: .rodata
+ Value: 0x402140
+ - Name: .LC15
+ Section: .rodata
+ Value: 0x402148
+ - Name: .LC16
+ Section: .rodata
+ Value: 0x402150
+ - Name: .LC17
+ Section: .rodata
+ Value: 0x402158
+ - Name: .LC2
+ Section: .rodata
+ Value: 0x4020F8
+ - Name: .LC18
+ Section: .rodata
+ Value: 0x402160
+ - Name: .LC19
+ Section: .rodata
+ Value: 0x402168
+ - Name: .LC20
+ Section: .rodata
+ Value: 0x402170
+ - Name: .LC21
+ Section: .rodata
+ Value: 0x402178
+ - Name: .LC22
+ Section: .rodata
+ Value: 0x402180
+ - Name: .LC23
+ Section: .rodata
+ Value: 0x402188
+ - Name: .LC24
+ Section: .rodata
+ Value: 0x402190
+ - Name: .LC25
+ Section: .rodata
+ Value: 0x402198
+ - Name: .LC26
+ Section: .rodata
+ Value: 0x4021A0
+ - Name: .LC27
+ Section: .rodata
+ Value: 0x4021A8
+ - Name: .LC28
+ Section: .rodata
+ Value: 0x4021B0
+ - Name: .LC29
+ Section: .rodata
+ Value: 0x4021B8
+ - Name: .LC30
+ Section: .rodata
+ Value: 0x4021C0
+ - Name: .LC31
+ Section: .rodata
+ Value: 0x4021C8
+ - Name: .LC32
+ Section: .rodata
+ Value: 0x4021D0
+ - Name: .LC3
+ Section: .rodata
+ Value: 0x402100
+ - Name: .LC0
+ Section: .rodata
+ Value: 0x4020F0
+ - Name: .LC33
+ Section: .rodata
+ Value: 0x4021D8
+ - Name: .LC34
+ Section: .rodata
+ Value: 0x4021E0
+ - Name: .LC35
+ Section: .rodata
+ Value: 0x4021E8
+ - Name: .LC41
+ Section: .rodata
+ Value: 0x4021F0
+ - Name: .LC42
+ Section: .rodata
+ Value: 0x4021F8
+ - Name: .LC45
+ Section: .rodata
+ Value: 0x402200
+ - Name: .LC46
+ Section: .rodata
+ Value: 0x402208
+ - Name: crtstuff.c
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: __TMC_LIST__
+ Type: STT_OBJECT
+ Section: .tm_clone_table
+ Value: 0x404068
+ - Name: deregister_tm_clones
+ Type: STT_FUNC
+ Section: .text
+ Value: 0x401710
+ - Name: register_tm_clones
+ Type: STT_FUNC
+ Section: .text
+ Value: 0x401740
+ - Name: __do_global_dtors_aux
+ Type: STT_FUNC
+ Section: .text
+ Value: 0x401780
+ - Name: completed.8023
+ Type: STT_OBJECT
+ Section: .bss
+ Value: 0x404068
+ Size: 0x1
+ - Name: __do_global_dtors_aux_fini_array_entry
+ Type: STT_OBJECT
+ Section: .fini_array
+ Value: 0x403E08
+ - Name: frame_dummy
+ Type: STT_FUNC
+ Section: .text
+ Value: 0x4017B0
+ - Name: __frame_dummy_init_array_entry
+ Type: STT_OBJECT
+ Section: .init_array
+ Value: 0x403E00
+ - Name: rad2deg.c
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: '.LC0 (1)'
+ Section: .rodata
+ Value: 0x402210
+ - Name: .LC1
+ Section: .rodata
+ Value: 0x402218
+ - Name: cubic.c
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: '.LC0 (2)'
+ Section: .rodata
+ Value: 0x402220
+ - Name: '.LC1 (1)'
+ Section: .rodata
+ Value: 0x402224
+ - Name: '.LC2 (1)'
+ Section: .rodata
+ Value: 0x402228
+ - Name: '.LC3 (1)'
+ Section: .rodata
+ Value: 0x40222C
+ - Name: .LC9
+ Section: .rodata
+ Value: 0x402248
+ - Name: .LC10
+ Section: .rodata
+ Value: 0x402250
+ - Name: '.LC12 (1)'
+ Section: .rodata
+ Value: 0x402260
+ - Name: '.LC6 (1)'
+ Section: .rodata
+ Value: 0x402170
+ - Name: .LC5
+ Section: .rodata
+ Value: 0x402230
+ - Name: '.LC7 (1)'
+ Section: .rodata
+ Value: 0x402238
+ - Name: '.LC8 (1)'
+ Section: .rodata
+ Value: 0x402240
+ - Name: isqrt.c
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: 'crtstuff.c (1)'
+ Type: STT_FILE
+ Index: SHN_ABS
+ - Name: __FRAME_END__
+ Type: STT_OBJECT
+ Section: .eh_frame
+ Value: 0x40244C
+ - Type: STT_FILE
+ Index: SHN_ABS
+ - Name: __init_array_end
+ Section: .init_array
+ Value: 0x403E08
+ - Name: _DYNAMIC
+ Type: STT_OBJECT
+ Section: .dynamic
+ Value: 0x403E10
+ - Name: __init_array_start
+ Section: .init_array
+ Value: 0x403E00
+ - Name: __GNU_EH_FRAME_HDR
+ Section: .eh_frame_hdr
+ Value: 0x402270
+ - Name: _GLOBAL_OFFSET_TABLE_
+ Type: STT_OBJECT
+ Section: .got.plt
+ Value: 0x404000
+ - Name: __libc_csu_fini
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401C20
+ Size: 0x5
+ - Name: 'putchar@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: _ITM_deregisterTMCloneTable
+ Binding: STB_WEAK
+ - Name: data_start
+ Section: .data
+ Binding: STB_WEAK
+ Value: 0x404058
+ - Name: 'puts@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: usqrt
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401B60
+ Size: 0x43
+ - Name: _edata
+ Section: .tm_clone_table
+ Binding: STB_GLOBAL
+ Value: 0x404068
+ - Name: 'pow@@GLIBC_2.29'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: _fini
+ Type: STT_FUNC
+ Section: .fini
+ Binding: STB_GLOBAL
+ Value: 0x401C28
+ Other: [ STV_HIDDEN ]
+ - Name: '__stack_chk_fail@@GLIBC_2.4'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: 'printf@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: 'cos@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: 'acos@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: '__libc_start_main@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: deg2rad
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4017E0
+ Size: 0x11
+ - Name: __data_start
+ Section: .data
+ Binding: STB_GLOBAL
+ Value: 0x404058
+ - Name: SolveCubic
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401800
+ Size: 0x35C
+ - Name: __gmon_start__
+ Binding: STB_WEAK
+ - Name: __dso_handle
+ Type: STT_OBJECT
+ Section: .data
+ Binding: STB_GLOBAL
+ Value: 0x404060
+ Other: [ STV_HIDDEN ]
+ - Name: _IO_stdin_used
+ Type: STT_OBJECT
+ Section: .rodata
+ Binding: STB_GLOBAL
+ Value: 0x402000
+ Size: 0x4
+ - Name: __libc_csu_init
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401BB0
+ Size: 0x65
+ - Name: _end
+ Section: .bss
+ Binding: STB_GLOBAL
+ Value: 0x404070
+ - Name: _dl_relocate_static_pie
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x401700
+ Size: 0x5
+ Other: [ STV_HIDDEN ]
+ - Name: _start
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4016D0
+ Size: 0x2F
+ - Name: rad2deg
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4017C0
+ Size: 0x11
+ - Name: __bss_start
+ Section: .bss
+ Binding: STB_GLOBAL
+ Value: 0x404068
+ - Name: main
+ Type: STT_FUNC
+ Section: .text
+ Binding: STB_GLOBAL
+ Value: 0x4010B0
+ Size: 0x614
+ - Name: __TMC_END__
+ Type: STT_OBJECT
+ Section: .tm_clone_table
+ Binding: STB_GLOBAL
+ Value: 0x404068
+ Other: [ STV_HIDDEN ]
+ - Name: _ITM_registerTMCloneTable
+ Binding: STB_WEAK
+ - Name: 'sqrt@@GLIBC_2.2.5'
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: _init
+ Type: STT_FUNC
+ Section: .init
+ Binding: STB_GLOBAL
+ Value: 0x401000
+ Other: [ STV_HIDDEN ]
+DynamicSymbols:
+ - Name: putchar
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: puts
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: pow
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: __stack_chk_fail
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: printf
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: cos
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: acos
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: __libc_start_main
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+ - Name: __gmon_start__
+ Binding: STB_WEAK
+ - Name: sqrt
+ Type: STT_FUNC
+ Binding: STB_GLOBAL
+...
diff --git a/bolt/test/X86/Inputs/blarge_new_bat.preagg.txt b/bolt/test/X86/Inputs/blarge_new_bat.preagg.txt
new file mode 100644
index 000000000000..e9e4553aad95
--- /dev/null
+++ b/bolt/test/X86/Inputs/blarge_new_bat.preagg.txt
@@ -0,0 +1,79 @@
+B 40169e 40165b 7 0
+B 401664 800012 7 0
+B 401680 401070 7 0
+B 800022 401669 7 0
+B 401611 800000 121 0
+B 40162d 401070 119 0
+B 4015d5 800040 2 0
+B 800080 4015da 6 0
+B 40164b 401608 115 0
+B 800080 40159f 24 0
+B 4015ec 401070 6 0
+B 8001d0 401090 1 0
+B 4014d1 800082 25 0
+B 401510 401030 616 0
+B 8002ab 401080 1 0
+B 80007b 80004c 483 1
+B 800072 80004c 597 77
+B 80010c 800194 1 0
+B 401509 4014ec 1 0
+B 800010 401616 119 0
+B 80024a 401080 1 0
+B 800154 401050 20 0
+B 4014dd 401070 9 0
+B 80021f 401080 1 0
+B 800193 4014d6 8 0
+B 40159a 800040 19 0
+B 4015f8 4015cd 2 0
+B 40152a 4014b0 24 0
+B 401500 401070 15 0
+B 4015bc 401592 21 0
+B 401544 4014a0 1 0
+B 80004a 800052 24 0
+B 4015b0 401070 20 0
+B 800050 80007d 29 29
+F 401685 40169e 7
+F 4014a0 4014d1 1
+F 401090 401090 1
+F 401050 401050 20
+F 40159f 4015b0 20
+F 80007d 800080 27
+F 401515 401544 1
+F 4014e2 401500 13
+F 401592 40159a 19
+F 401505 401509 1
+F 4014b0 4014d1 24
+F 800194 8001d0 1
+F 8001d5 80021f 1
+F 401616 40162d 114
+F 80024f 8002ab 1
+F 800159 800193 7
+F 80004c 800050 26
+F 800224 80024a 1
+F 800082 80010c 1
+F 401080 401080 3
+F 401070 401070 168
+F 80004c 800072 555
+F 401616 800010 2
+F 401070 40162d 4
+F 800082 800154 20
+F 401669 401680 7
+F 40159f 800080 1
+F 4014ec 401500 1
+F 800012 800022 7
+F 401030 401030 616
+F 80004c 80007b 473
+F 800052 800072 24
+F 800040 80004a 21
+F 4015b5 4015bc 18
+F 4015cd 4015d5 2
+F 401592 4015bc 1
+F 4015da 4015ec 6
+F 4015f1 4015f8 2
+F 800000 800010 116
+F 401608 401611 115
+F 401632 40164b 114
+F 401515 40152a 24
+F 40165b 401664 7
+F 401505 401510 612
+F 4014d6 4014dd 8
diff --git a/bolt/test/X86/bolt-address-translation-yaml.test b/bolt/test/X86/bolt-address-translation-yaml.test
new file mode 100644
index 000000000000..7fdf7709a8b9
--- /dev/null
+++ b/bolt/test/X86/bolt-address-translation-yaml.test
@@ -0,0 +1,64 @@
+# Check new BAT format containing hashes for YAML profile.
+
+RUN: yaml2obj %p/Inputs/blarge_new.yaml &> %t.exe
+RUN: llvm-bolt %t.exe -o %t.out --pa -p %p/Inputs/blarge_new.preagg.txt \
+RUN: --reorder-blocks=ext-tsp --split-functions --split-strategy=cdsplit \
+RUN: --reorder-functions=cdsort --enable-bat --dyno-stats --skip-funcs=main \
+RUN: 2>&1 | FileCheck --check-prefix WRITE-BAT-CHECK %s
+RUN: perf2bolt %t.out --pa -p %p/Inputs/blarge_new_bat.preagg.txt -w %t.yaml -o %t.fdata \
+RUN: 2>&1 | FileCheck --check-prefix READ-BAT-CHECK %s
+RUN: FileCheck --input-file %t.yaml --check-prefix YAML-BAT-CHECK %s
+# Check that YAML converted from fdata matches YAML created directly with BAT.
+RUN: llvm-bolt %t.exe -data %t.fdata -w %t.yaml-fdata -o /dev/null
+RUN: FileCheck --input-file %t.yaml-fdata --check-prefix YAML-BAT-CHECK %s
+
+# Test resulting YAML profile with the original binary (no-stale mode)
+RUN: llvm-bolt %t.exe -data %t.yaml -o %t.null -dyno-stats \
+RUN: | FileCheck --check-prefix CHECK-BOLT-YAML %s
+
+WRITE-BAT-CHECK: BOLT-INFO: Wrote 5 BAT maps
+WRITE-BAT-CHECK: BOLT-INFO: Wrote 4 function and 22 basic block hashes
+WRITE-BAT-CHECK: BOLT-INFO: BAT section size (bytes): 384
+
+READ-BAT-CHECK-NOT: BOLT-ERROR: unable to save profile in YAML format for input file processed by BOLT
+READ-BAT-CHECK: BOLT-INFO: Parsed 5 BAT entries
+READ-BAT-CHECK: PERF2BOLT: read 79 aggregated LBR entries
+
+YAML-BAT-CHECK: functions:
+# Function not covered by BAT - has insns in basic block
+YAML-BAT-CHECK: - name: main
+YAML-BAT-CHECK-NEXT: fid: 2
+YAML-BAT-CHECK-NEXT: hash: 0x9895746D48B2C876
+YAML-BAT-CHECK-NEXT: exec: 0
+YAML-BAT-CHECK-NEXT: nblocks: 46
+YAML-BAT-CHECK-NEXT: blocks:
+YAML-BAT-CHECK-NEXT: - bid: 0
+YAML-BAT-CHECK-NEXT: insns: 26
+YAML-BAT-CHECK-NEXT: hash: 0xA900AE79CFD40000
+YAML-BAT-CHECK-NEXT: succ: [ { bid: 3, cnt: 0 }, { bid: 1, cnt: 0 } ]
+# Function covered by BAT with calls
+YAML-BAT-CHECK: - name: SolveCubic
+YAML-BAT-CHECK-NEXT: fid: [[#]]
+YAML-BAT-CHECK-NEXT: hash: 0x6AF7E61EA3966722
+YAML-BAT-CHECK-NEXT: exec: 25
+YAML-BAT-CHECK-NEXT: nblocks: 15
+YAML-BAT-CHECK-NEXT: blocks:
+YAML-BAT-CHECK: - bid: 3
+YAML-BAT-CHECK-NEXT: insns: [[#]]
+YAML-BAT-CHECK-NEXT: hash: 0xDDA1DC5F69F900AC
+YAML-BAT-CHECK-NEXT: calls: [ { off: 0x26, fid: [[#]], cnt: [[#]] } ]
+YAML-BAT-CHECK-NEXT: succ: [ { bid: 5, cnt: [[#]] }
+# Function covered by BAT - doesn't have insns in basic block
+YAML-BAT-CHECK: - name: usqrt
+YAML-BAT-CHECK-NEXT: fid: [[#]]
+YAML-BAT-CHECK-NEXT: hash: 0x99E67ED32A203023
+YAML-BAT-CHECK-NEXT: exec: 21
+YAML-BAT-CHECK-NEXT: nblocks: 5
+YAML-BAT-CHECK-NEXT: blocks:
+YAML-BAT-CHECK: - bid: 1
+YAML-BAT-CHECK-NEXT: insns: [[#]]
+YAML-BAT-CHECK-NEXT: hash: 0xD70DC695320E0010
+YAML-BAT-CHECK-NEXT: succ: {{.*}} { bid: 2, cnt: [[#]] }
+
+CHECK-BOLT-YAML: pre-processing profile using YAML profile reader
+CHECK-BOLT-YAML-NEXT: 5 out of 16 functions in the binary (31.2%) have non-empty execution profile
diff --git a/bolt/test/X86/bolt-address-translation.test b/bolt/test/X86/bolt-address-translation.test
index 4277b4e0d0fe..63234b4c1d21 100644
--- a/bolt/test/X86/bolt-address-translation.test
+++ b/bolt/test/X86/bolt-address-translation.test
@@ -37,7 +37,7 @@
# CHECK: BOLT: 3 out of 7 functions were overwritten.
# CHECK: BOLT-INFO: Wrote 6 BAT maps
# CHECK: BOLT-INFO: Wrote 3 function and 58 basic block hashes
-# CHECK: BOLT-INFO: BAT section size (bytes): 816
+# CHECK: BOLT-INFO: BAT section size (bytes): 924
#
# usqrt mappings (hot part). We match against any key (left side containing
# the bolted binary offsets) because BOLT may change where it puts instructions
diff --git a/bolt/test/X86/dwarf4-label-low-pc.s b/bolt/test/X86/dwarf4-label-low-pc.s
new file mode 100644
index 000000000000..dfd5af18c09b
--- /dev/null
+++ b/bolt/test/X86/dwarf4-label-low-pc.s
@@ -0,0 +1,263 @@
+
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=4 -filetype=obj -triple x86_64-unknown-linux %s -o %tmain.o
+# RUN: %clang %cflags -dwarf-4 %tmain.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.exe | FileCheck --check-prefix=PRECHECK %s
+# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt > %t.txt
+# RUN: llvm-objdump -d %t.bolt >> %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
+
+## This test checks that we correctly handle DW_AT_low_pc [DW_FORM_addr] that is part of DW_TAG_label.
+
+# PRECHECK: version = 0x0004
+# PRECHECK: DW_TAG_label
+# PRECHECK-NEXT: DW_AT_name
+# PRECHECK-NEXT: DW_AT_decl_file
+# PRECHECK-NEXT: DW_AT_decl_line
+# PRECHECK-NEXT:DW_AT_low_pc [DW_FORM_addr]
+# PRECHECK: DW_TAG_label
+# PRECHECK-NEXT: DW_AT_name
+# PRECHECK-NEXT: DW_AT_decl_file
+# PRECHECK-NEXT: DW_AT_decl_line
+# PRECHECK-NEXT:DW_AT_low_pc [DW_FORM_addr]
+
+# POSTCHECK: version = 0x0004
+# POSTCHECK: DW_TAG_label
+# POSTCHECK-NEXT: DW_AT_name
+# POSTCHECK-NEXT: DW_AT_decl_file
+# POSTCHECK-NEXT: DW_AT_decl_line
+# POSTCHECK-NEXT:DW_AT_low_pc [DW_FORM_addr] (0x[[ADDR:[1-9a-f]*]]
+# POSTCHECK: DW_TAG_label
+# POSTCHECK-NEXT: DW_AT_name
+# POSTCHECK-NEXT: DW_AT_decl_file
+# POSTCHECK-NEXT: DW_AT_decl_line
+# POSTCHECK-NEXT:DW_AT_low_pc [DW_FORM_addr] (0x[[ADDR2:[1-9a-f]*]]
+
+# POSTCHECK: [[ADDR]]: 8b 45 f8
+# POSTCHECK: [[ADDR2]]: 8b 45 f8
+
+## clang++ main.cpp -g2 -gdwarf-4 -S
+## int main() {
+## int a = 4;
+## if (a == 5)
+## goto LABEL1;
+## else
+## goto LABEL2;
+## LABEL1:a++;
+## LABEL2:a--;
+## return 0;
+## }
+
+ .text
+ .file "main.cpp"
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin0:
+ .file 1 "/home" "main.cpp"
+ .loc 1 1 0 # main.cpp:1:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl $0, -4(%rbp)
+.Ltmp0:
+ .loc 1 2 7 prologue_end # main.cpp:2:7
+ movl $4, -8(%rbp)
+.Ltmp1:
+ .loc 1 3 9 # main.cpp:3:9
+ cmpl $5, -8(%rbp)
+.Ltmp2:
+ .loc 1 3 7 is_stmt 0 # main.cpp:3:7
+ jne .LBB0_2
+# %bb.1: # %if.then
+.Ltmp3:
+ .loc 1 4 5 is_stmt 1 # main.cpp:4:5
+ jmp .LBB0_3
+.LBB0_2: # %if.else
+ .loc 1 6 5 # main.cpp:6:5
+ jmp .LBB0_4
+.Ltmp4:
+.LBB0_3: # %LABEL1
+ #DEBUG_LABEL: main:LABEL1
+ .loc 1 7 11 # main.cpp:7:11
+ movl -8(%rbp), %eax
+ addl $1, %eax
+ movl %eax, -8(%rbp)
+.LBB0_4: # %LABEL2
+.Ltmp5:
+ #DEBUG_LABEL: main:LABEL2
+ .loc 1 8 11 # main.cpp:8:11
+ movl -8(%rbp), %eax
+ addl $-1, %eax
+ movl %eax, -8(%rbp)
+ .loc 1 9 3 # main.cpp:9:3
+ xorl %eax, %eax
+ .loc 1 9 3 epilogue_begin is_stmt 0 # main.cpp:9:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp6:
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+ .cfi_endproc
+ # -- End function
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 14 # DW_FORM_strp
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 14 # DW_FORM_strp
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 10 # DW_TAG_label
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 17 # DW_AT_low_pc
+ .byte 1 # DW_FORM_addr
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 14 # DW_FORM_strp
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 4 # DWARF version number
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 8 # Address Size (in bytes)
+ .byte 1 # Abbrev [1] 0xb:0x6d DW_TAG_compile_unit
+ .long .Linfo_string0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .long .Linfo_string1 # DW_AT_name
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .long .Linfo_string2 # DW_AT_comp_dir
+ .quad .Lfunc_begin0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 2 # Abbrev [2] 0x2a:0x46 DW_TAG_subprogram
+ .quad .Lfunc_begin0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .long .Linfo_string3 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 112 # DW_AT_type
+ # DW_AT_external
+ .byte 3 # Abbrev [3] 0x43:0xe DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .long .Linfo_string5 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 2 # DW_AT_decl_line
+ .long 112 # DW_AT_type
+ .byte 4 # Abbrev [4] 0x51:0xf DW_TAG_label
+ .long .Linfo_string6 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 7 # DW_AT_decl_line
+ .quad .Ltmp4 # DW_AT_low_pc
+ .byte 4 # Abbrev [4] 0x60:0xf DW_TAG_label
+ .long .Linfo_string7 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 8 # DW_AT_decl_line
+ .quad .Ltmp5 # DW_AT_low_pc
+ .byte 0 # End Of Children Mark
+ .byte 5 # Abbrev [5] 0x70:0x7 DW_TAG_base_type
+ .long .Linfo_string4 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=24
+.Linfo_string2:
+ .asciz "/home" # string offset=33
+.Linfo_string3:
+ .asciz "main" # string offset=71
+.Linfo_string4:
+ .asciz "int" # string offset=76
+.Linfo_string5:
+ .asciz "a" # string offset=80
+.Linfo_string6:
+ .asciz "LABEL1" # string offset=82
+.Linfo_string7:
+ .asciz "LABEL2" # string offset=89
+ .ident "clang version 19.0.0git"
+ .section ".note.GNU-stack","",@progbits
+ .addrsig
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-debug-names-cross-cu.s b/bolt/test/X86/dwarf5-debug-names-cross-cu.s
new file mode 100644
index 000000000000..73c50d6d41db
--- /dev/null
+++ b/bolt/test/X86/dwarf5-debug-names-cross-cu.s
@@ -0,0 +1,712 @@
+
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -o %tmain.o
+# RUN: %clang %cflags -dwarf-5 %tmain.o -o %t.exe -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.bolt --update-debug-sections
+# RUN: llvm-dwarfdump --debug-info -r 0 --debug-names %t.bolt > %t.txt
+# RUN: cat %t.txt | FileCheck --check-prefix=CHECK %s
+
+## This test checks that BOLT generates Entries for DW_AT_abstract_origin when it has cross cu reference.
+
+# CHECK: [[OFFSET1:0x[0-9a-f]*]]: Compile Unit
+# CHECK: [[OFFSET2:0x[0-9a-f]*]]: Compile Unit
+# CHECK: Name Index @ 0x0 {
+# CHECK-NEXT: Header {
+# CHECK-NEXT: Length: 0xD2
+# CHECK-NEXT: Format: DWARF32
+# CHECK-NEXT: Version: 5
+# CHECK-NEXT: CU count: 2
+# CHECK-NEXT: Local TU count: 0
+# CHECK-NEXT: Foreign TU count: 0
+# CHECK-NEXT: Bucket count: 5
+# CHECK-NEXT: Name count: 5
+# CHECK-NEXT: Abbreviations table size: 0x1F
+# CHECK-NEXT: Augmentation: 'BOLT'
+# CHECK-NEXT: }
+# CHECK-NEXT: Compilation Unit offsets [
+# CHECK-NEXT: CU[0]: [[OFFSET1]]
+# CHECK-NEXT: CU[1]: [[OFFSET2]]
+# CHECK-NEXT: ]
+# CHECK-NEXT: Abbreviations [
+# CHECK-NEXT: Abbreviation [[ABBREV1:0x[0-9a-f]*]] {
+# CHECK-NEXT: Tag: DW_TAG_subprogram
+# CHECK-NEXT: DW_IDX_compile_unit: DW_FORM_data1
+# CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+# CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
+# CHECK-NEXT: }
+# CHECK-NEXT: Abbreviation [[ABBREV2:0x[0-9a-f]*]] {
+# CHECK-NEXT: Tag: DW_TAG_inlined_subroutine
+# CHECK-NEXT: DW_IDX_compile_unit: DW_FORM_data1
+# CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+# CHECK-NEXT: DW_IDX_parent: DW_FORM_ref4
+# CHECK-NEXT: }
+# CHECK-NEXT: Abbreviation [[ABBREV3:0x[0-9a-f]*]] {
+# CHECK-NEXT: Tag: DW_TAG_base_type
+# CHECK-NEXT: DW_IDX_compile_unit: DW_FORM_data1
+# CHECK-NEXT: DW_IDX_die_offset: DW_FORM_ref4
+# CHECK-NEXT: DW_IDX_parent: DW_FORM_flag_present
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: Bucket 0 [
+# CHECK-NEXT: EMPTY
+# CHECK-NEXT: ]
+# CHECK-NEXT: Bucket 1 [
+# CHECK-NEXT: Name 1 {
+# CHECK-NEXT: Hash: 0x7C9A7F6A
+# CHECK-NEXT: String: {{.+}} "main"
+# CHECK-NEXT: Entry @ [[ENTRY:0x[0-9a-f]*]] {
+# CHECK-NEXT: Abbrev: [[ABBREV1]]
+# CHECK-NEXT: Tag: DW_TAG_subprogram
+# CHECK-NEXT: DW_IDX_compile_unit: 0x00
+# CHECK-NEXT: DW_IDX_die_offset: 0x00000024
+# CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+# CHECK-NEXT: }
+# CHECK-NEXT: }
+# CHECK-NEXT: Name 2 {
+# CHECK-NEXT: Hash: 0xB5063CFE
+# CHECK-NEXT: String: {{.+}} "_Z3fooi"
+# CHECK-NEXT: Entry @ {{.+}} {
+# CHECK-NEXT: Abbrev: [[ABBREV1]]
+# CHECK-NEXT: Tag: DW_TAG_subprogram
+# CHECK-NEXT: DW_IDX_compile_unit: 0x01
+# CHECK-NEXT: DW_IDX_die_offset: 0x0000003a
+# CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+# CHECK-NEXT: }
+# CHECK-NEXT: Entry @ {{.+}} {
+# CHECK-NEXT: Abbrev: [[ABBREV2]]
+# CHECK-NEXT: Tag: DW_TAG_inlined_subroutine
+# CHECK-NEXT: DW_IDX_compile_unit: 0x00
+# CHECK-NEXT: DW_IDX_die_offset: 0x00000054
+# CHECK-NEXT: DW_IDX_parent: Entry @ [[ENTRY]]
+# CHECK-NEXT: }
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: Bucket 2 [
+# CHECK-NEXT: EMPTY
+# CHECK-NEXT: ]
+# CHECK-NEXT: Bucket 3 [
+# CHECK-NEXT: Name 3 {
+# CHECK-NEXT: Hash: 0xB888030
+# CHECK-NEXT: String: {{.+}} "int"
+# CHECK-NEXT: Entry @ {{.+}} {
+# CHECK-NEXT: Abbrev: [[ABBREV3]]
+# CHECK-NEXT: Tag: DW_TAG_base_type
+# CHECK-NEXT: DW_IDX_compile_unit: 0x01
+# CHECK-NEXT: DW_IDX_die_offset: 0x00000036
+# CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+# CHECK-NEXT: }
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: Bucket 4 [
+# CHECK-NEXT: Name 4 {
+# CHECK-NEXT: Hash: 0xB887389
+# CHECK-NEXT: String: {{.+}} "foo"
+# CHECK-NEXT: Entry @ {{.+}} {
+# CHECK-NEXT: Abbrev: [[ABBREV1]]
+# CHECK-NEXT: Tag: DW_TAG_subprogram
+# CHECK-NEXT: DW_IDX_compile_unit: 0x01
+# CHECK-NEXT: DW_IDX_die_offset: 0x0000003a
+# CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+# CHECK-NEXT: }
+# CHECK-NEXT: Entry @ 0xc4 {
+# CHECK-NEXT: Abbrev: [[ABBREV2]]
+# CHECK-NEXT: Tag: DW_TAG_inlined_subroutine
+# CHECK-NEXT: DW_IDX_compile_unit: 0x00
+# CHECK-NEXT: DW_IDX_die_offset: 0x00000054
+# CHECK-NEXT: DW_IDX_parent: Entry @ [[ENTRY]]
+# CHECK-NEXT: }
+# CHECK-NEXT: }
+# CHECK-NEXT: Name 5 {
+# CHECK-NEXT: Hash: 0x7C952063
+# CHECK-NEXT: String: {{.+}} "char"
+# CHECK-NEXT: Entry @ {{.+}} {
+# CHECK-NEXT: Abbrev: [[ABBREV3]]
+# CHECK-NEXT: Tag: DW_TAG_base_type
+# CHECK-NEXT: DW_IDX_compile_unit: 0x00
+# CHECK-NEXT: DW_IDX_die_offset: 0x00000075
+# CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+# CHECK-NEXT: }
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }
+
+## clang++ -g2 -gpubnames -S -emit-llvm main.cpp -o main.ll
+## clang++ -g2 -gpubnames -S -emit-llvm helper.cpp -o helper.ll
+## llvm-link main.ll helper.ll -o combined.ll
+## clang++ -g2 -gpubnames combined.ll -emit-llvm -S -o combined.opt.ll
+## llc -dwarf-version=5 -filetype=asm -mtriple x86_64-unknown-linux combined.opt.ll -o combined.s
+## main.cpp
+## extern int foo(int);
+## int main(int argc, char* argv[]) {
+## int i = 0;
+## [[clang::always_inline]] i = foo(argc);
+## return i;
+## }
+## helper.cpp
+## int foo(int i) {
+## return i ++;
+## }
+
+ .text
+ .file "llvm-link"
+ .globl main # -- Begin function main
+ .p2align 4, 0x90
+ .type main,@function
+main: # @main
+.Lfunc_begin0:
+ .file 1 "/home" "main.cpp" md5 0x24fb0b4c3900e91fece1ac87ed73ff3b
+ .loc 1 2 0 # main.cpp:2:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl $0, -16(%rbp)
+ movl %edi, -12(%rbp)
+ movq %rsi, -24(%rbp)
+.Ltmp0:
+ .loc 1 3 7 prologue_end # main.cpp:3:7
+ movl $0, -4(%rbp)
+ .loc 1 4 36 # main.cpp:4:36
+ movl -12(%rbp), %eax
+ movl %eax, -8(%rbp)
+.Ltmp1:
+ .file 2 "/home" "helper.cpp" md5 0x7d4429e24d8c74d7ee22c1889ad46d6b
+ .loc 2 2 12 # helper.cpp:2:12
+ movl -8(%rbp), %eax
+ movl %eax, %ecx
+ addl $1, %ecx
+ movl %ecx, -8(%rbp)
+.Ltmp2:
+ .loc 1 4 30 # main.cpp:4:30
+ movl %eax, -4(%rbp)
+ .loc 1 5 10 # main.cpp:5:10
+ movl -4(%rbp), %eax
+ .loc 1 5 3 epilogue_begin is_stmt 0 # main.cpp:5:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp3:
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+ .cfi_endproc
+ # -- End function
+ .globl _Z3fooi # -- Begin function _Z3fooi
+ .p2align 4, 0x90
+ .type _Z3fooi,@function
+_Z3fooi: # @_Z3fooi
+.Lfunc_begin1:
+ .loc 2 1 0 is_stmt 1 # helper.cpp:1:0
+ .cfi_startproc
+# %bb.0: # %entry
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ movq %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+ movl %edi, -4(%rbp)
+.Ltmp4:
+ .loc 2 2 12 prologue_end # helper.cpp:2:12
+ movl -4(%rbp), %eax
+ movl %eax, %ecx
+ addl $1, %ecx
+ movl %ecx, -4(%rbp)
+ .loc 2 2 3 epilogue_begin is_stmt 0 # helper.cpp:2:3
+ popq %rbp
+ .cfi_def_cfa %rsp, 8
+ retq
+.Ltmp5:
+.Lfunc_end1:
+ .size _Z3fooi, .Lfunc_end1-_Z3fooi
+ .cfi_endproc
+ # -- End function
+ .section .debug_abbrev,"",@progbits
+ .byte 1 # Abbreviation Code
+ .byte 17 # DW_TAG_compile_unit
+ .byte 1 # DW_CHILDREN_yes
+ .byte 37 # DW_AT_producer
+ .byte 37 # DW_FORM_strx1
+ .byte 19 # DW_AT_language
+ .byte 5 # DW_FORM_data2
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 114 # DW_AT_str_offsets_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 16 # DW_AT_stmt_list
+ .byte 23 # DW_FORM_sec_offset
+ .byte 27 # DW_AT_comp_dir
+ .byte 37 # DW_FORM_strx1
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 115 # DW_AT_addr_base
+ .byte 23 # DW_FORM_sec_offset
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 2 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 16 # DW_FORM_ref_addr
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 3 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 16 # DW_FORM_ref_addr
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 4 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 5 # Abbreviation Code
+ .byte 52 # DW_TAG_variable
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 16 # DW_FORM_ref_addr
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 6 # Abbreviation Code
+ .byte 29 # DW_TAG_inlined_subroutine
+ .byte 1 # DW_CHILDREN_yes
+ .byte 49 # DW_AT_abstract_origin
+ .byte 16 # DW_FORM_ref_addr
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 88 # DW_AT_call_file
+ .byte 11 # DW_FORM_data1
+ .byte 89 # DW_AT_call_line
+ .byte 11 # DW_FORM_data1
+ .byte 87 # DW_AT_call_column
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 7 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 49 # DW_AT_abstract_origin
+ .byte 16 # DW_FORM_ref_addr
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 8 # Abbreviation Code
+ .byte 15 # DW_TAG_pointer_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 9 # Abbreviation Code
+ .byte 36 # DW_TAG_base_type
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 62 # DW_AT_encoding
+ .byte 11 # DW_FORM_data1
+ .byte 11 # DW_AT_byte_size
+ .byte 11 # DW_FORM_data1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 10 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 110 # DW_AT_linkage_name
+ .byte 37 # DW_FORM_strx1
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 63 # DW_AT_external
+ .byte 25 # DW_FORM_flag_present
+ .byte 32 # DW_AT_inline
+ .byte 33 # DW_FORM_implicit_const
+ .byte 1
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 11 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 3 # DW_AT_name
+ .byte 37 # DW_FORM_strx1
+ .byte 58 # DW_AT_decl_file
+ .byte 11 # DW_FORM_data1
+ .byte 59 # DW_AT_decl_line
+ .byte 11 # DW_FORM_data1
+ .byte 73 # DW_AT_type
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 12 # Abbreviation Code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_CHILDREN_yes
+ .byte 17 # DW_AT_low_pc
+ .byte 27 # DW_FORM_addrx
+ .byte 18 # DW_AT_high_pc
+ .byte 6 # DW_FORM_data4
+ .byte 64 # DW_AT_frame_base
+ .byte 24 # DW_FORM_exprloc
+ .byte 49 # DW_AT_abstract_origin
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 13 # Abbreviation Code
+ .byte 5 # DW_TAG_formal_parameter
+ .byte 0 # DW_CHILDREN_no
+ .byte 2 # DW_AT_location
+ .byte 24 # DW_FORM_exprloc
+ .byte 49 # DW_AT_abstract_origin
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # EOM(1)
+ .byte 0 # EOM(2)
+ .byte 0 # EOM(3)
+ .section .debug_info,"",@progbits
+.Lcu_begin0:
+ .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit
+.Ldebug_info_start0:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 1 # Abbrev [1] 0xc:0x6d DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 1 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .byte 2 # Abbrev [2] 0x23:0x47 DW_TAG_subprogram
+ .byte 0 # DW_AT_low_pc
+ .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .byte 8 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 2 # DW_AT_decl_line
+ .long .debug_info+174 # DW_AT_type
+ # DW_AT_external
+ .byte 3 # Abbrev [3] 0x32:0xb DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 116
+ .byte 9 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 2 # DW_AT_decl_line
+ .long .debug_info+174 # DW_AT_type
+ .byte 4 # Abbrev [4] 0x3d:0xb DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 104
+ .byte 10 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 2 # DW_AT_decl_line
+ .long 106 # DW_AT_type
+ .byte 5 # Abbrev [5] 0x48:0xb DW_TAG_variable
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 124
+ .byte 7 # DW_AT_name
+ .byte 1 # DW_AT_decl_file
+ .byte 3 # DW_AT_decl_line
+ .long .debug_info+174 # DW_AT_type
+ .byte 6 # Abbrev [6] 0x53:0x16 DW_TAG_inlined_subroutine
+ .long .debug_info+156 # DW_AT_abstract_origin
+ .byte 1 # DW_AT_low_pc
+ .long .Ltmp2-.Ltmp1 # DW_AT_high_pc
+ .byte 1 # DW_AT_call_file
+ .byte 4 # DW_AT_call_line
+ .byte 32 # DW_AT_call_column
+ .byte 7 # Abbrev [7] 0x60:0x8 DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 120
+ .long .debug_info+165 # DW_AT_abstract_origin
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+ .byte 8 # Abbrev [8] 0x6a:0x5 DW_TAG_pointer_type
+ .long 111 # DW_AT_type
+ .byte 8 # Abbrev [8] 0x6f:0x5 DW_TAG_pointer_type
+ .long 116 # DW_AT_type
+ .byte 9 # Abbrev [9] 0x74:0x4 DW_TAG_base_type
+ .byte 11 # DW_AT_name
+ .byte 6 # DW_AT_encoding
+ .byte 1 # DW_AT_byte_size
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end0:
+.Lcu_begin1:
+ .long .Ldebug_info_end1-.Ldebug_info_start1 # Length of Unit
+.Ldebug_info_start1:
+ .short 5 # DWARF version number
+ .byte 1 # DWARF Unit Type
+ .byte 8 # Address Size (in bytes)
+ .long .debug_abbrev # Offset Into Abbrev. Section
+ .byte 1 # Abbrev [1] 0xc:0x43 DW_TAG_compile_unit
+ .byte 0 # DW_AT_producer
+ .short 33 # DW_AT_language
+ .byte 3 # DW_AT_name
+ .long .Lstr_offsets_base0 # DW_AT_str_offsets_base
+ .long .Lline_table_start0 # DW_AT_stmt_list
+ .byte 2 # DW_AT_comp_dir
+ .byte 2 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .long .Laddr_table_base0 # DW_AT_addr_base
+ .byte 10 # Abbrev [10] 0x23:0x12 DW_TAG_subprogram
+ .byte 4 # DW_AT_linkage_name
+ .byte 5 # DW_AT_name
+ .byte 2 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 53 # DW_AT_type
+ # DW_AT_external
+ # DW_AT_inline
+ .byte 11 # Abbrev [11] 0x2c:0x8 DW_TAG_formal_parameter
+ .byte 7 # DW_AT_name
+ .byte 2 # DW_AT_decl_file
+ .byte 1 # DW_AT_decl_line
+ .long 53 # DW_AT_type
+ .byte 0 # End Of Children Mark
+ .byte 9 # Abbrev [9] 0x35:0x4 DW_TAG_base_type
+ .byte 6 # DW_AT_name
+ .byte 5 # DW_AT_encoding
+ .byte 4 # DW_AT_byte_size
+ .byte 12 # Abbrev [12] 0x39:0x15 DW_TAG_subprogram
+ .byte 2 # DW_AT_low_pc
+ .long .Lfunc_end1-.Lfunc_begin1 # DW_AT_high_pc
+ .byte 1 # DW_AT_frame_base
+ .byte 86
+ .long 35 # DW_AT_abstract_origin
+ .byte 13 # Abbrev [13] 0x45:0x8 DW_TAG_formal_parameter
+ .byte 2 # DW_AT_location
+ .byte 145
+ .byte 124
+ .long 44 # DW_AT_abstract_origin
+ .byte 0 # End Of Children Mark
+ .byte 0 # End Of Children Mark
+.Ldebug_info_end1:
+ .section .debug_str_offsets,"",@progbits
+ .long 52 # Length of String Offsets Set
+ .short 5
+ .short 0
+.Lstr_offsets_base0:
+ .section .debug_str,"MS",@progbits,1
+.Linfo_string0:
+ .asciz "clang version 19.0.0git" # string offset=0
+.Linfo_string1:
+ .asciz "main.cpp" # string offset=24
+.Linfo_string2:
+ .asciz "/home/ayermolo/local/tasks/T182867349" # string offset=33
+.Linfo_string3:
+ .asciz "helper.cpp" # string offset=71
+.Linfo_string4:
+ .asciz "_Z3fooi" # string offset=82
+.Linfo_string5:
+ .asciz "foo" # string offset=90
+.Linfo_string6:
+ .asciz "int" # string offset=94
+.Linfo_string7:
+ .asciz "i" # string offset=98
+.Linfo_string8:
+ .asciz "main" # string offset=100
+.Linfo_string9:
+ .asciz "argc" # string offset=105
+.Linfo_string10:
+ .asciz "argv" # string offset=110
+.Linfo_string11:
+ .asciz "char" # string offset=115
+ .section .debug_str_offsets,"",@progbits
+ .long .Linfo_string0
+ .long .Linfo_string1
+ .long .Linfo_string2
+ .long .Linfo_string3
+ .long .Linfo_string4
+ .long .Linfo_string5
+ .long .Linfo_string6
+ .long .Linfo_string7
+ .long .Linfo_string8
+ .long .Linfo_string9
+ .long .Linfo_string10
+ .long .Linfo_string11
+ .section .debug_addr,"",@progbits
+ .long .Ldebug_addr_end0-.Ldebug_addr_start0 # Length of contribution
+.Ldebug_addr_start0:
+ .short 5 # DWARF version number
+ .byte 8 # Address size
+ .byte 0 # Segment selector size
+.Laddr_table_base0:
+ .quad .Lfunc_begin0
+ .quad .Ltmp1
+ .quad .Lfunc_begin1
+.Ldebug_addr_end0:
+ .section .debug_names,"",@progbits
+ .long .Lnames_end0-.Lnames_start0 # Header: unit length
+.Lnames_start0:
+ .short 5 # Header: version
+ .short 0 # Header: padding
+ .long 2 # Header: compilation unit count
+ .long 0 # Header: local type unit count
+ .long 0 # Header: foreign type unit count
+ .long 5 # Header: bucket count
+ .long 5 # Header: name count
+ .long .Lnames_abbrev_end0-.Lnames_abbrev_start0 # Header: abbreviation table size
+ .long 8 # Header: augmentation string size
+ .ascii "LLVM0700" # Header: augmentation string
+ .long .Lcu_begin0 # Compilation unit 0
+ .long .Lcu_begin1 # Compilation unit 1
+ .long 0 # Bucket 0
+ .long 1 # Bucket 1
+ .long 0 # Bucket 2
+ .long 3 # Bucket 3
+ .long 4 # Bucket 4
+ .long 2090499946 # Hash in Bucket 1
+ .long -1257882370 # Hash in Bucket 1
+ .long 193495088 # Hash in Bucket 3
+ .long 193491849 # Hash in Bucket 4
+ .long 2090147939 # Hash in Bucket 4
+ .long .Linfo_string8 # String in Bucket 1: main
+ .long .Linfo_string4 # String in Bucket 1: _Z3fooi
+ .long .Linfo_string6 # String in Bucket 3: int
+ .long .Linfo_string5 # String in Bucket 4: foo
+ .long .Linfo_string11 # String in Bucket 4: char
+ .long .Lnames1-.Lnames_entries0 # Offset in Bucket 1
+ .long .Lnames3-.Lnames_entries0 # Offset in Bucket 1
+ .long .Lnames0-.Lnames_entries0 # Offset in Bucket 3
+ .long .Lnames2-.Lnames_entries0 # Offset in Bucket 4
+ .long .Lnames4-.Lnames_entries0 # Offset in Bucket 4
+.Lnames_abbrev_start0:
+ .byte 1 # Abbrev code
+ .byte 46 # DW_TAG_subprogram
+ .byte 1 # DW_IDX_compile_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 2 # Abbrev code
+ .byte 29 # DW_TAG_inlined_subroutine
+ .byte 1 # DW_IDX_compile_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 19 # DW_FORM_ref4
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 3 # Abbrev code
+ .byte 36 # DW_TAG_base_type
+ .byte 1 # DW_IDX_compile_unit
+ .byte 11 # DW_FORM_data1
+ .byte 3 # DW_IDX_die_offset
+ .byte 19 # DW_FORM_ref4
+ .byte 4 # DW_IDX_parent
+ .byte 25 # DW_FORM_flag_present
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev
+ .byte 0 # End of abbrev list
+.Lnames_abbrev_end0:
+.Lnames_entries0:
+.Lnames1:
+.L3:
+ .byte 1 # Abbreviation code
+ .byte 0 # DW_IDX_compile_unit
+ .long 35 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: main
+.Lnames3:
+.L0:
+ .byte 1 # Abbreviation code
+ .byte 1 # DW_IDX_compile_unit
+ .long 57 # DW_IDX_die_offset
+.L2: # DW_IDX_parent
+ .byte 2 # Abbreviation code
+ .byte 0 # DW_IDX_compile_unit
+ .long 83 # DW_IDX_die_offset
+ .long .L3-.Lnames_entries0 # DW_IDX_parent
+ .byte 0 # End of list: _Z3fooi
+.Lnames0:
+.L4:
+ .byte 3 # Abbreviation code
+ .byte 1 # DW_IDX_compile_unit
+ .long 53 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: int
+.Lnames2:
+ .byte 1 # Abbreviation code
+ .byte 1 # DW_IDX_compile_unit
+ .long 57 # DW_IDX_die_offset
+ .byte 2 # DW_IDX_parent
+ # Abbreviation code
+ .byte 0 # DW_IDX_compile_unit
+ .long 83 # DW_IDX_die_offset
+ .long .L3-.Lnames_entries0 # DW_IDX_parent
+ .byte 0 # End of list: foo
+.Lnames4:
+.L1:
+ .byte 3 # Abbreviation code
+ .byte 0 # DW_IDX_compile_unit
+ .long 116 # DW_IDX_die_offset
+ .byte 0 # DW_IDX_parent
+ # End of list: char
+ .p2align 2, 0x0
+.Lnames_end0:
+ .ident "clang version 19.0.0git"
+ .ident "clang version 19.0.0git"
+ .section ".note.GNU-stack","",@progbits
+ .section .debug_line,"",@progbits
+.Lline_table_start0:
diff --git a/bolt/test/X86/dwarf5-label-low-pc.s b/bolt/test/X86/dwarf5-label-low-pc.s
index 890d9e024d1a..1e3fc17ad516 100644
--- a/bolt/test/X86/dwarf5-label-low-pc.s
+++ b/bolt/test/X86/dwarf5-label-low-pc.s
@@ -8,9 +8,10 @@
# RUN: llvm-dwarfdump --show-form --verbose --debug-addr %t.bolt > %t.txt
# RUN: llvm-dwarfdump --show-form --verbose --debug-info %t.bolt >> %t.txt
+# RUN: llvm-objdump -d %t.bolt >> %t.txt
# RUN: cat %t.txt | FileCheck --check-prefix=POSTCHECK %s
-# This test checks that we correctly handle DW_AT_low_pc [DW_FORM_addrx] that is part of DW_TAG_label.
+## This test checks that we correctly handle DW_AT_low_pc [DW_FORM_addrx] that is part of DW_TAG_label.
# PRECHECK: version = 0x0005
# PRECHECK: DW_TAG_label
@@ -28,8 +29,8 @@
# POSTCHECK: Addrs: [
# POSTCHECK-NEXT: 0x
# POSTCHECK-NEXT: 0x
-# POSTCHECK-NEXT: 0x[[#%.16x,ADDR:]]
-# POSTCHECK-NEXT: 0x[[#%.16x,ADDR2:]]
+# POSTCHECK-NEXT: 0x[[ADDR:[1-9a-f]*]]
+# POSTCHECK-NEXT: 0x[[ADDR2:[1-9a-f]*]]
# POSTCHECK: version = 0x0005
# POSTCHECK: DW_TAG_label
@@ -37,25 +38,28 @@
# POSTCHECK-NEXT: DW_AT_decl_file
# POSTCHECK-NEXT: DW_AT_decl_line
# POSTCHECK-NEXT:DW_AT_low_pc [DW_FORM_addrx] (indexed (00000002)
-# POSTCHECK-SAME: 0x[[#ADDR]]
+# POSTCHECK-SAME: 0x[[ADDR]]
# POSTCHECK: DW_TAG_label
# POSTCHECK-NEXT: DW_AT_name
# POSTCHECK-NEXT: DW_AT_decl_file
# POSTCHECK-NEXT: DW_AT_decl_line
# POSTCHECK-NEXT:DW_AT_low_pc [DW_FORM_addrx] (indexed (00000003)
-# POSTCHECK-SAME: 0x[[#ADDR2]]
+# POSTCHECK-SAME: 0x[[ADDR2]]
-# clang++ main.cpp -g -S
-# int main() {
-# int a = 4;
-# if (a == 5)
-# goto LABEL1;
-# else
-# goto LABEL2;
-# LABEL1:a++;
-# LABEL2:a--;
-# return 0;
-# }
+# POSTCHECK: [[ADDR]]: 8b 45 f8
+# POSTCHECK: [[ADDR2]]: 8b 45 f8
+
+## clang++ main.cpp -g -S
+## int main() {
+## int a = 4;
+## if (a == 5)
+## goto LABEL1;
+## else
+## goto LABEL2;
+## LABEL1:a++;
+## LABEL2:a--;
+## return 0;
+## }
.text
.file "main.cpp"
diff --git a/bolt/test/X86/linux-alt-instruction.s b/bolt/test/X86/linux-alt-instruction.s
index 5dcc6fe3ab0c..2cdf31519682 100644
--- a/bolt/test/X86/linux-alt-instruction.s
+++ b/bolt/test/X86/linux-alt-instruction.s
@@ -6,8 +6,8 @@
# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
# RUN: %clang %cflags -nostdlib %t.o -o %t.exe \
# RUN: -Wl,--image-base=0xffffffff80000000,--no-dynamic-linker,--no-eh-frame-hdr,--no-pie
-# RUN: llvm-bolt %t.exe --print-normalized --keep-nops -o %t.out \
-# RUN: --alt-inst-feature-size=2 | FileCheck %s
+# RUN: llvm-bolt %t.exe --print-normalized --alt-inst-feature-size=2 -o %t.out \
+# RUN: | FileCheck %s
## Older kernels used to have padlen field in alt_instr. Check compatibility.
@@ -15,8 +15,8 @@
# RUN: %s -o %t.o
# RUN: %clang %cflags -nostdlib %t.o -o %t.exe \
# RUN: -Wl,--image-base=0xffffffff80000000,--no-dynamic-linker,--no-eh-frame-hdr,--no-pie
-# RUN: llvm-bolt %t.exe --print-normalized --keep-nops --alt-inst-has-padlen \
-# RUN: -o %t.out | FileCheck %s
+# RUN: llvm-bolt %t.exe --print-normalized --alt-inst-has-padlen -o %t.out \
+# RUN: | FileCheck %s
## Check with a larger size of "feature" field in alt_instr.
@@ -24,13 +24,12 @@
# RUN: --defsym FEATURE_SIZE_4=1 %s -o %t.o
# RUN: %clang %cflags -nostdlib %t.o -o %t.exe \
# RUN: -Wl,--image-base=0xffffffff80000000,--no-dynamic-linker,--no-eh-frame-hdr,--no-pie
-# RUN: llvm-bolt %t.exe --print-normalized --keep-nops \
-# RUN: --alt-inst-feature-size=4 -o %t.out | FileCheck %s
+# RUN: llvm-bolt %t.exe --print-normalized --alt-inst-feature-size=4 -o %t.out \
+# RUN: | FileCheck %s
## Check that out-of-bounds read is handled properly.
-# RUN: not llvm-bolt %t.exe --print-normalized --keep-nops \
-# RUN: --alt-inst-feature-size=2 -o %t.out
+# RUN: not llvm-bolt %t.exe --print-normalized --alt-inst-feature-size=2 -o %t.out
# CHECK: BOLT-INFO: Linux kernel binary detected
# CHECK: BOLT-INFO: parsed 2 alternative instruction entries
diff --git a/bolt/test/X86/linux-bug-table.s b/bolt/test/X86/linux-bug-table.s
index e8de2fb6cba7..63f70a0b35d9 100644
--- a/bolt/test/X86/linux-bug-table.s
+++ b/bolt/test/X86/linux-bug-table.s
@@ -1,6 +1,7 @@
# REQUIRES: system-linux
-## Check that BOLT correctly parses the Linux kernel __bug_table section.
+## Check that BOLT correctly parses and updates the Linux kernel __bug_table
+## section.
# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
# RUN: %clang %cflags -nostdlib %t.o -o %t.exe \
@@ -8,7 +9,13 @@
## Verify bug entry bindings to instructions.
-# RUN: llvm-bolt %t.exe --print-normalized -o %t.out | FileCheck %s
+# RUN: llvm-bolt %t.exe --print-normalized --print-only=_start -o %t.out \
+# RUN: --eliminate-unreachable=1 --bolt-info=0 | FileCheck %s
+
+## Verify bug entry bindings again after unreachable code elimination.
+
+# RUN: llvm-bolt %t.out -o %t.out.1 --print-only=_start --print-normalized \
+# RUN: |& FileCheck --check-prefix=CHECK-REOPT %s
# CHECK: BOLT-INFO: Linux kernel binary detected
# CHECK: BOLT-INFO: parsed 2 bug table entries
@@ -17,18 +24,26 @@
.globl _start
.type _start, %function
_start:
-# CHECK: Binary Function "_start"
- nop
+ jmp .L1
.L0:
ud2
# CHECK: ud2
# CHECK-SAME: BugEntry: 1
- nop
.L1:
ud2
# CHECK: ud2
# CHECK-SAME: BugEntry: 2
+
+## Only the second entry should remain after the first pass.
+
+# CHECK-REOPT: ud2
+# CHECK-REOPT-SAME: BugEntry: 2
+
ret
+## The return instruction is reachable only via preceding ud2. Test that it is
+## treated as a reachable instruction in the Linux kernel mode.
+
+# CHECK-REOPT-NEXT: ret
.size _start, .-_start
diff --git a/bolt/test/X86/linux-orc.s b/bolt/test/X86/linux-orc.s
index 4da19989408e..5f2096278e92 100644
--- a/bolt/test/X86/linux-orc.s
+++ b/bolt/test/X86/linux-orc.s
@@ -27,7 +27,7 @@
## Verify ORC bindings to instructions.
# RUN: llvm-bolt %t.exe --print-normalized --dump-orc --print-orc -o %t.out \
-# RUN: --bolt-info=0 |& FileCheck %s
+# RUN: --keep-nops=0 --bolt-info=0 |& FileCheck %s
## Verify ORC bindings after rewrite.
@@ -37,7 +37,7 @@
## Verify ORC binding after rewrite when some of the functions are skipped.
-# RUN: llvm-bolt %t.exe -o %t.out --skip-funcs=bar --bolt-info=0
+# RUN: llvm-bolt %t.exe -o %t.out --skip-funcs=bar --bolt-info=0 --keep-nops=0
# RUN: llvm-bolt %t.out -o %t.out.1 --print-normalized --print-orc \
# RUN: |& FileCheck %s
diff --git a/bolt/test/X86/linux-parainstructions.s b/bolt/test/X86/linux-parainstructions.s
index 4bdfde5fb7f2..07fca6bbedaf 100644
--- a/bolt/test/X86/linux-parainstructions.s
+++ b/bolt/test/X86/linux-parainstructions.s
@@ -8,7 +8,7 @@
## Verify paravirtual bindings to instructions.
-# RUN: llvm-bolt %t.exe --print-normalized -o %t.out | FileCheck %s
+# RUN: llvm-bolt %t.exe --print-normalized -o %t.out --keep-nops=0 | FileCheck %s
# CHECK: BOLT-INFO: Linux kernel binary detected
# CHECK: BOLT-INFO: parsed 2 paravirtual patch sites
diff --git a/bolt/test/X86/linux-static-keys.s b/bolt/test/X86/linux-static-keys.s
new file mode 100644
index 000000000000..08454bf97631
--- /dev/null
+++ b/bolt/test/X86/linux-static-keys.s
@@ -0,0 +1,67 @@
+# REQUIRES: system-linux
+
+## Check that BOLT correctly updates the Linux kernel static keys jump table.
+
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
+# RUN: %clang %cflags -nostdlib %t.o -o %t.exe \
+# RUN: -Wl,--image-base=0xffffffff80000000,--no-dynamic-linker,--no-eh-frame-hdr
+
+## Verify static keys jump bindings to instructions.
+
+# RUN: llvm-bolt %t.exe --print-normalized -o %t.out --keep-nops=0 \
+# RUN: --bolt-info=0 |& FileCheck %s
+
+## Verify the bindings again on the rewritten binary with nops removed.
+
+# RUN: llvm-bolt %t.out -o %t.out.1 --print-normalized |& FileCheck %s
+
+# CHECK: BOLT-INFO: Linux kernel binary detected
+# CHECK: BOLT-INFO: parsed 2 static keys jump entries
+
+ .text
+ .globl _start
+ .type _start, %function
+_start:
+# CHECK: Binary Function "_start"
+ nop
+.L0:
+ jmp .L1
+# CHECK: jit
+# CHECK-SAME: # ID: 1 {{.*}} # Likely: 0 # InitValue: 1
+ nop
+.L1:
+ .nops 5
+# CHECK: jit
+# CHECK-SAME: # ID: 2 {{.*}} # Likely: 1 # InitValue: 1
+.L2:
+ nop
+ .size _start, .-_start
+
+ .globl foo
+ .type foo, %function
+foo:
+ ret
+ .size foo, .-foo
+
+
+## Static keys jump table.
+ .rodata
+ .globl __start___jump_table
+ .type __start___jump_table, %object
+__start___jump_table:
+
+ .long .L0 - . # Jump address
+ .long .L1 - . # Target address
+ .quad 1 # Key address
+
+ .long .L1 - . # Jump address
+ .long .L2 - . # Target address
+ .quad 0 # Key address
+
+ .globl __stop___jump_table
+ .type __stop___jump_table, %object
+__stop___jump_table:
+
+## Fake Linux Kernel sections.
+ .section __ksymtab,"a",@progbits
+ .section __ksymtab_gpl,"a",@progbits
diff --git a/bolt/test/X86/yaml-secondary-entry-discriminator.s b/bolt/test/X86/yaml-secondary-entry-discriminator.s
new file mode 100644
index 000000000000..43c2e2a7f055
--- /dev/null
+++ b/bolt/test/X86/yaml-secondary-entry-discriminator.s
@@ -0,0 +1,74 @@
+# This reproduces a bug with BOLT setting incorrect discriminator for
+# secondary entry points in YAML profile.
+
+# REQUIRES: system-linux
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
+# RUN: link_fdata %s %t.o %t.fdata
+# RUN: llvm-strip --strip-unneeded %t.o
+# RUN: %clang %cflags %t.o -o %t.exe -Wl,-q -nostdlib
+# RUN: llvm-bolt %t.exe -o %t.out --data %t.fdata -w %t.yaml --print-profile \
+# RUN: --print-only=main | FileCheck %s --check-prefix=CHECK-CFG
+# RUN: FileCheck %s -input-file %t.yaml
+# CHECK: - name: main
+# CHECK-NEXT: fid: 2
+# CHECK-NEXT: hash: 0xADF270D550151185
+# CHECK-NEXT: exec: 0
+# CHECK-NEXT: nblocks: 4
+# CHECK-NEXT: blocks:
+# CHECK: - bid: 1
+# CHECK-NEXT: insns: 1
+# CHECK-NEXT: hash: 0x36A303CBA4360014
+# CHECK-NEXT: calls: [ { off: 0x0, fid: 1, disc: 1, cnt: 1 } ]
+# CHECK: - bid: 2
+# CHECK-NEXT: insns: 5
+# CHECK-NEXT: hash: 0x8B2F5747CD0019
+# CHECK-NEXT: calls: [ { off: 0x0, fid: 1, disc: 1, cnt: 1, mis: 1 } ]
+
+# Make sure that the profile is attached correctly
+# RUN: llvm-bolt %t.exe -o %t.out --data %t.yaml --print-profile \
+# RUN: --print-only=main | FileCheck %s --check-prefix=CHECK-CFG
+
+# CHECK-CFG: Binary Function "main" after attaching profile {
+# CHECK-CFG: callq secondary_entry # Offset: [[#]] # Count: 1
+# CHECK-CFG: callq *%rax # Offset: [[#]] # CallProfile: 1 (1 misses) :
+# CHECK-CFG-NEXT: { secondary_entry: 1 (1 misses) }
+
+.globl func
+.type func, @function
+func:
+# FDATA: 0 [unknown] 0 1 func 0 1 0
+ .cfi_startproc
+ pushq %rbp
+ movq %rsp, %rbp
+.globl secondary_entry
+secondary_entry:
+ popq %rbp
+ retq
+ nopl (%rax)
+ .cfi_endproc
+ .size func, .-func
+
+.globl main
+.type main, @function
+main:
+ .cfi_startproc
+ pushq %rbp
+ movq %rsp, %rbp
+ subq $16, %rsp
+ movl $0, -4(%rbp)
+ testq %rax, %rax
+ jne Lindcall
+Lcall:
+ call secondary_entry
+# FDATA: 1 main #Lcall# 1 secondary_entry 0 1 1
+Lindcall:
+ callq *%rax
+# FDATA: 1 main #Lindcall# 1 secondary_entry 0 1 1
+ xorl %eax, %eax
+ addq $16, %rsp
+ popq %rbp
+ retq
+# For relocations against .text
+ call exit
+ .cfi_endproc
+ .size main, .-main
diff --git a/bolt/tools/bat-dump/bat-dump.cpp b/bolt/tools/bat-dump/bat-dump.cpp
index 2e9b26cc137a..709eb076bca2 100644
--- a/bolt/tools/bat-dump/bat-dump.cpp
+++ b/bolt/tools/bat-dump/bat-dump.cpp
@@ -1,9 +1,16 @@
+//===- bolt/tools/bat-dump/bat-dump.cpp - BAT dumper utility --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Profile/BoltAddressTranslation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/Error.h"
@@ -18,7 +25,6 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <assert.h>
#include <cstdint>
#include <map>
@@ -27,7 +33,6 @@
#include <system_error>
#include <type_traits>
#include <utility>
-#include <vector>
using namespace llvm;
using namespace bolt;
diff --git a/bolt/tools/heatmap/heatmap.cpp b/bolt/tools/heatmap/heatmap.cpp
index 9b190dd288b2..3bb9f2ce7491 100644
--- a/bolt/tools/heatmap/heatmap.cpp
+++ b/bolt/tools/heatmap/heatmap.cpp
@@ -1,4 +1,11 @@
-#include "bolt/Profile/DataAggregator.h"
+//===- bolt/tools/heatmap/heatmap.cpp - Profile heatmap visualization tool ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Rewrite/RewriteInstance.h"
#include "bolt/Utils/CommandLineOpts.h"
#include "llvm/MC/TargetRegistry.h"
@@ -6,7 +13,8 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/TargetSelect.h"
using namespace llvm;
diff --git a/bolt/unittests/Core/BinaryContext.cpp b/bolt/unittests/Core/BinaryContext.cpp
index 08619eccdd75..cfec72a34a59 100644
--- a/bolt/unittests/Core/BinaryContext.cpp
+++ b/bolt/unittests/Core/BinaryContext.cpp
@@ -1,7 +1,14 @@
+//===- bolt/unittest/Core/BinaryContext.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "bolt/Core/BinaryContext.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
-#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/TargetSelect.h"
#include "gtest/gtest.h"
@@ -40,8 +47,8 @@ protected:
void initializeBOLT() {
BC = cantFail(BinaryContext::createBinaryContext(
- ObjFile.get(), true, DWARFContext::create(*ObjFile.get()),
- {llvm::outs(), llvm::errs()}));
+ ObjFile->makeTriple(), ObjFile->getFileName(), nullptr, true,
+ DWARFContext::create(*ObjFile.get()), {llvm::outs(), llvm::errs()}));
ASSERT_FALSE(!BC);
}
diff --git a/bolt/unittests/Core/MCPlusBuilder.cpp b/bolt/unittests/Core/MCPlusBuilder.cpp
index daf9f392b822..62f3aaab4a72 100644
--- a/bolt/unittests/Core/MCPlusBuilder.cpp
+++ b/bolt/unittests/Core/MCPlusBuilder.cpp
@@ -1,3 +1,11 @@
+//===- bolt/unittest/Core/MCPlusBuilder.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#ifdef AARCH64_AVAILABLE
#include "AArch64Subtarget.h"
#endif // AARCH64_AVAILABLE
@@ -11,7 +19,6 @@
#include "bolt/Rewrite/RewriteInstance.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
-#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/TargetSelect.h"
#include "gtest/gtest.h"
@@ -50,8 +57,8 @@ protected:
void initializeBolt() {
BC = cantFail(BinaryContext::createBinaryContext(
- ObjFile.get(), true, DWARFContext::create(*ObjFile.get()),
- {llvm::outs(), llvm::errs()}));
+ ObjFile->makeTriple(), ObjFile->getFileName(), nullptr, true,
+ DWARFContext::create(*ObjFile.get()), {llvm::outs(), llvm::errs()}));
ASSERT_FALSE(!BC);
BC->initializeTarget(std::unique_ptr<MCPlusBuilder>(
createMCPlusBuilder(GetParam(), BC->MIA.get(), BC->MII.get(),
diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp
index 40ac6918faf4..b877ea06dc05 100644
--- a/clang-tools-extra/clang-tidy/ClangTidy.cpp
+++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp
@@ -233,7 +233,7 @@ public:
if (!tooling::applyAllReplacements(Replacements.get(), Rewrite)) {
llvm::errs() << "Can't apply replacements for file " << File << "\n";
}
- AnyNotWritten &= Rewrite.overwriteChangedFiles();
+ AnyNotWritten |= Rewrite.overwriteChangedFiles();
}
if (AnyNotWritten) {
diff --git a/clang-tools-extra/clang-tidy/bugprone/IncDecInConditionsCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/IncDecInConditionsCheck.cpp
index 16f43128d55e..9b3b01eb0268 100644
--- a/clang-tools-extra/clang-tidy/bugprone/IncDecInConditionsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/IncDecInConditionsCheck.cpp
@@ -31,6 +31,10 @@ void IncDecInConditionsCheck::registerMatchers(MatchFinder *Finder) {
anyOf(binaryOperator(anyOf(isComparisonOperator(), isLogicalOperator())),
cxxOperatorCallExpr(isComparisonOperator())));
+ auto IsInUnevaluatedContext =
+ expr(anyOf(hasAncestor(expr(matchers::hasUnevaluatedContext())),
+ hasAncestor(typeLoc())));
+
Finder->addMatcher(
expr(
OperatorMatcher, unless(isExpansionInSystemHeader()),
@@ -42,12 +46,14 @@ void IncDecInConditionsCheck::registerMatchers(MatchFinder *Finder) {
cxxOperatorCallExpr(
isPrePostOperator(),
hasUnaryOperand(expr().bind("operand")))),
+ unless(IsInUnevaluatedContext),
hasAncestor(
expr(equalsBoundNode("parent"),
hasDescendant(
expr(unless(equalsBoundNode("operand")),
matchers::isStatementIdenticalToBoundNode(
- "operand"))
+ "operand"),
+ unless(IsInUnevaluatedContext))
.bind("second")))))
.bind("operator"))),
this);
diff --git a/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.cpp b/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.cpp
index ef511e9108f2..359d8efd100b 100644
--- a/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.cpp
+++ b/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.cpp
@@ -40,6 +40,34 @@ namespace {
AST_MATCHER(FunctionDecl, isUserDefineLiteral) {
return Node.getLiteralIdentifier() != nullptr;
}
+
+AST_MATCHER(TypeLoc, isValidAndNotInMacro) {
+ const SourceLocation Loc = Node.getBeginLoc();
+ return Loc.isValid() && !Loc.isMacroID();
+}
+
+AST_MATCHER(TypeLoc, isBuiltinType) {
+ TypeLoc TL = Node;
+ if (auto QualLoc = Node.getAs<QualifiedTypeLoc>())
+ TL = QualLoc.getUnqualifiedLoc();
+
+ const auto BuiltinLoc = TL.getAs<BuiltinTypeLoc>();
+ if (!BuiltinLoc)
+ return false;
+
+ switch (BuiltinLoc.getTypePtr()->getKind()) {
+ case BuiltinType::Short:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ case BuiltinType::UShort:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ return true;
+ default:
+ return false;
+ }
+}
+
} // namespace
namespace tidy::google::runtime {
@@ -63,11 +91,11 @@ void IntegerTypesCheck::registerMatchers(MatchFinder *Finder) {
// "Where possible, avoid passing arguments of types specified by
// bitwidth typedefs to printf-based APIs."
Finder->addMatcher(
- typeLoc(loc(isInteger()),
- unless(anyOf(hasAncestor(callExpr(
- callee(functionDecl(hasAttr(attr::Format))))),
- hasParent(parmVarDecl(hasAncestor(
- functionDecl(isUserDefineLiteral())))))))
+ typeLoc(loc(isInteger()), isValidAndNotInMacro(), isBuiltinType(),
+ unless(hasAncestor(
+ callExpr(callee(functionDecl(hasAttr(attr::Format)))))),
+ unless(hasParent(parmVarDecl(
+ hasAncestor(functionDecl(isUserDefineLiteral()))))))
.bind("tl"),
this);
IdentTable = std::make_unique<IdentifierTable>(getLangOpts());
@@ -77,9 +105,6 @@ void IntegerTypesCheck::check(const MatchFinder::MatchResult &Result) {
auto TL = *Result.Nodes.getNodeAs<TypeLoc>("tl");
SourceLocation Loc = TL.getBeginLoc();
- if (Loc.isInvalid() || Loc.isMacroID())
- return;
-
// Look through qualification.
if (auto QualLoc = TL.getAs<QualifiedTypeLoc>())
TL = QualLoc.getUnqualifiedLoc();
diff --git a/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.h b/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.h
index 3be7d24021b9..c62bda67ae2d 100644
--- a/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.h
+++ b/clang-tools-extra/clang-tidy/google/IntegerTypesCheck.h
@@ -35,6 +35,9 @@ public:
void registerMatchers(ast_matchers::MatchFinder *Finder) override;
void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
void storeOptions(ClangTidyOptions::OptionMap &Opts) override;
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
+ return TK_IgnoreUnlessSpelledInSource;
+ }
private:
const StringRef UnsignedTypePrefix;
diff --git a/clang-tools-extra/clang-tidy/modernize/UseUsingCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseUsingCheck.cpp
index bb05f206c717..24eefdb082eb 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseUsingCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseUsingCheck.cpp
@@ -8,6 +8,7 @@
#include "UseUsingCheck.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/Lex/Lexer.h"
using namespace clang::ast_matchers;
@@ -24,6 +25,7 @@ static constexpr llvm::StringLiteral ExternCDeclName = "extern-c-decl";
static constexpr llvm::StringLiteral ParentDeclName = "parent-decl";
static constexpr llvm::StringLiteral TagDeclName = "tag-decl";
static constexpr llvm::StringLiteral TypedefName = "typedef";
+static constexpr llvm::StringLiteral DeclStmtName = "decl-stmt";
UseUsingCheck::UseUsingCheck(StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context),
@@ -41,7 +43,8 @@ void UseUsingCheck::registerMatchers(MatchFinder *Finder) {
unless(isInstantiated()),
optionally(hasAncestor(
linkageSpecDecl(isExternCLinkage()).bind(ExternCDeclName))),
- hasParent(decl().bind(ParentDeclName)))
+ anyOf(hasParent(decl().bind(ParentDeclName)),
+ hasParent(declStmt().bind(DeclStmtName))))
.bind(TypedefName),
this);
@@ -51,17 +54,32 @@ void UseUsingCheck::registerMatchers(MatchFinder *Finder) {
tagDecl(
anyOf(allOf(unless(anyOf(isImplicit(),
classTemplateSpecializationDecl())),
- hasParent(decl().bind(ParentDeclName))),
+ anyOf(hasParent(decl().bind(ParentDeclName)),
+ hasParent(declStmt().bind(DeclStmtName)))),
// We want the parent of the ClassTemplateDecl, not the parent
// of the specialization.
classTemplateSpecializationDecl(hasAncestor(classTemplateDecl(
- hasParent(decl().bind(ParentDeclName)))))))
+ anyOf(hasParent(decl().bind(ParentDeclName)),
+ hasParent(declStmt().bind(DeclStmtName))))))))
.bind(TagDeclName),
this);
}
void UseUsingCheck::check(const MatchFinder::MatchResult &Result) {
const auto *ParentDecl = Result.Nodes.getNodeAs<Decl>(ParentDeclName);
+
+ if (!ParentDecl) {
+ const auto *ParentDeclStmt = Result.Nodes.getNodeAs<DeclStmt>(DeclStmtName);
+ if (ParentDeclStmt) {
+ if (ParentDeclStmt->isSingleDecl())
+ ParentDecl = ParentDeclStmt->getSingleDecl();
+ else
+ ParentDecl =
+ ParentDeclStmt->getDeclGroup().getDeclGroup()
+ [ParentDeclStmt->getDeclGroup().getDeclGroup().size() - 1];
+ }
+ }
+
if (!ParentDecl)
return;
diff --git a/clang-tools-extra/clang-tidy/readability/StaticDefinitionInAnonymousNamespaceCheck.h b/clang-tools-extra/clang-tidy/readability/StaticDefinitionInAnonymousNamespaceCheck.h
index c28087e07e9b..620cd6e3f2f8 100644
--- a/clang-tools-extra/clang-tidy/readability/StaticDefinitionInAnonymousNamespaceCheck.h
+++ b/clang-tools-extra/clang-tidy/readability/StaticDefinitionInAnonymousNamespaceCheck.h
@@ -24,6 +24,12 @@ public:
: ClangTidyCheck(Name, Context) {}
void registerMatchers(ast_matchers::MatchFinder *Finder) override;
void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
+ bool isLanguageVersionSupported(const LangOptions &LangOpts) const override {
+ return LangOpts.CPlusPlus;
+ }
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
+ return TK_IgnoreUnlessSpelledInSource;
+ }
};
} // namespace clang::tidy::readability
diff --git a/clang-tools-extra/clang-tidy/utils/IncludeSorter.cpp b/clang-tools-extra/clang-tidy/utils/IncludeSorter.cpp
index b6d9c50d0b10..a44720c47eca 100644
--- a/clang-tools-extra/clang-tidy/utils/IncludeSorter.cpp
+++ b/clang-tools-extra/clang-tidy/utils/IncludeSorter.cpp
@@ -108,7 +108,7 @@ int compareHeaders(StringRef LHS, StringRef RHS,
IncludeSorter::IncludeStyle Style) {
if (Style == IncludeSorter::IncludeStyle::IS_Google_ObjC) {
const std::pair<const char *, const char *> &Mismatch =
- std::mismatch(LHS.begin(), LHS.end(), RHS.begin());
+ std::mismatch(LHS.begin(), LHS.end(), RHS.begin(), RHS.end());
if ((Mismatch.first != LHS.end()) && (Mismatch.second != RHS.end())) {
if ((*Mismatch.first == '.') && (*Mismatch.second == '+')) {
return -1;
diff --git a/clang-tools-extra/clangd/ClangdLSPServer.cpp b/clang-tools-extra/clangd/ClangdLSPServer.cpp
index f29dadde2b86..7fd599d4e1a0 100644
--- a/clang-tools-extra/clangd/ClangdLSPServer.cpp
+++ b/clang-tools-extra/clangd/ClangdLSPServer.cpp
@@ -1390,7 +1390,7 @@ void ClangdLSPServer::onClangdInlayHints(const InlayHintsParams &Params,
// Extension doesn't have paddingLeft/Right so adjust the label
// accordingly.
{"label",
- ((Hint.paddingLeft ? " " : "") + llvm::StringRef(Hint.label) +
+ ((Hint.paddingLeft ? " " : "") + llvm::StringRef(Hint.joinLabels()) +
(Hint.paddingRight ? " " : ""))
.str()},
});
diff --git a/clang-tools-extra/clangd/InlayHints.cpp b/clang-tools-extra/clangd/InlayHints.cpp
index a0ebc631ef82..cd4f1931b3ce 100644
--- a/clang-tools-extra/clangd/InlayHints.cpp
+++ b/clang-tools-extra/clangd/InlayHints.cpp
@@ -977,8 +977,9 @@ private:
return;
bool PadLeft = Prefix.consume_front(" ");
bool PadRight = Suffix.consume_back(" ");
- Results.push_back(InlayHint{LSPPos, (Prefix + Label + Suffix).str(), Kind,
- PadLeft, PadRight, LSPRange});
+ Results.push_back(InlayHint{LSPPos,
+ /*label=*/{(Prefix + Label + Suffix).str()},
+ Kind, PadLeft, PadRight, LSPRange});
}
// Get the range of the main file that *exactly* corresponds to R.
diff --git a/clang-tools-extra/clangd/Protocol.cpp b/clang-tools-extra/clangd/Protocol.cpp
index c6553e00dcae..c08f80442eaa 100644
--- a/clang-tools-extra/clangd/Protocol.cpp
+++ b/clang-tools-extra/clangd/Protocol.cpp
@@ -1501,6 +1501,10 @@ bool operator<(const InlayHint &A, const InlayHint &B) {
return std::tie(A.position, A.range, A.kind, A.label) <
std::tie(B.position, B.range, B.kind, B.label);
}
+std::string InlayHint::joinLabels() const {
+ return llvm::join(llvm::map_range(label, [](auto &L) { return L.value; }),
+ "");
+}
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, InlayHintKind Kind) {
auto ToString = [](InlayHintKind K) {
@@ -1519,6 +1523,33 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, InlayHintKind Kind) {
return OS << ToString(Kind);
}
+llvm::json::Value toJSON(const InlayHintLabelPart &L) {
+ llvm::json::Object Result{{"value", L.value}};
+ if (L.tooltip)
+ Result["tooltip"] = *L.tooltip;
+ if (L.location)
+ Result["location"] = *L.location;
+ if (L.command)
+ Result["command"] = *L.command;
+ return Result;
+}
+
+bool operator==(const InlayHintLabelPart &LHS, const InlayHintLabelPart &RHS) {
+ return std::tie(LHS.value, LHS.location) == std::tie(RHS.value, RHS.location);
+}
+
+bool operator<(const InlayHintLabelPart &LHS, const InlayHintLabelPart &RHS) {
+ return std::tie(LHS.value, LHS.location) < std::tie(RHS.value, RHS.location);
+}
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const InlayHintLabelPart &L) {
+ OS << L.value;
+ if (L.location)
+ OS << " (" << L.location << ")";
+ return OS;
+}
+
static const char *toString(OffsetEncoding OE) {
switch (OE) {
case OffsetEncoding::UTF8:
diff --git a/clang-tools-extra/clangd/Protocol.h b/clang-tools-extra/clangd/Protocol.h
index 358d4c6feaf8..a0f8b04bc4ff 100644
--- a/clang-tools-extra/clangd/Protocol.h
+++ b/clang-tools-extra/clangd/Protocol.h
@@ -1689,6 +1689,48 @@ enum class InlayHintKind {
};
llvm::json::Value toJSON(const InlayHintKind &);
+/// An inlay hint label part allows for interactive and composite labels
+/// of inlay hints.
+struct InlayHintLabelPart {
+
+ InlayHintLabelPart() = default;
+
+ InlayHintLabelPart(std::string value,
+ std::optional<Location> location = std::nullopt)
+ : value(std::move(value)), location(std::move(location)) {}
+
+ /// The value of this label part.
+ std::string value;
+
+ /// The tooltip text when you hover over this label part. Depending on
+ /// the client capability `inlayHint.resolveSupport`, clients might resolve
+ /// this property late using the resolve request.
+ std::optional<MarkupContent> tooltip;
+
+ /// An optional source code location that represents this
+ /// label part.
+ ///
+ /// The editor will use this location for the hover and for code navigation
+ /// features: This part will become a clickable link that resolves to the
+ /// definition of the symbol at the given location (not necessarily the
+ /// location itself), it shows the hover that shows at the given location,
+ /// and it shows a context menu with further code navigation commands.
+ ///
+ /// Depending on the client capability `inlayHint.resolveSupport` clients
+ /// might resolve this property late using the resolve request.
+ std::optional<Location> location;
+
+ /// An optional command for this label part.
+ ///
+ /// Depending on the client capability `inlayHint.resolveSupport` clients
+ /// might resolve this property late using the resolve request.
+ std::optional<Command> command;
+};
+llvm::json::Value toJSON(const InlayHintLabelPart &);
+bool operator==(const InlayHintLabelPart &, const InlayHintLabelPart &);
+bool operator<(const InlayHintLabelPart &, const InlayHintLabelPart &);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &, const InlayHintLabelPart &);
+
/// Inlay hint information.
struct InlayHint {
/// The position of this hint.
@@ -1698,7 +1740,7 @@ struct InlayHint {
/// InlayHintLabelPart label parts.
///
/// *Note* that neither the string nor the label part can be empty.
- std::string label;
+ std::vector<InlayHintLabelPart> label;
/// The kind of this hint. Can be omitted in which case the client should fall
/// back to a reasonable default.
@@ -1724,6 +1766,9 @@ struct InlayHint {
/// The range allows clients more flexibility of when/how to display the hint.
/// This is an (unserialized) clangd extension.
Range range;
+
+ /// Join the label[].value together.
+ std::string joinLabels() const;
};
llvm::json::Value toJSON(const InlayHint &);
bool operator==(const InlayHint &, const InlayHint &);
diff --git a/clang-tools-extra/clangd/support/Trace.h b/clang-tools-extra/clangd/support/Trace.h
index 1bfc75b874d8..36c3745a41e9 100644
--- a/clang-tools-extra/clangd/support/Trace.h
+++ b/clang-tools-extra/clangd/support/Trace.h
@@ -143,8 +143,8 @@ bool enabled();
class Span {
public:
Span(llvm::Twine Name);
- /// Records span's duration in seconds to \p LatencyMetric with \p Name as the
- /// label.
+ /// Records span's duration in milliseconds to \p LatencyMetric with \p Name
+ /// as the label.
Span(llvm::Twine Name, const Metric &LatencyMetric);
~Span();
diff --git a/clang-tools-extra/clangd/test/inlayHints.test b/clang-tools-extra/clangd/test/inlayHints.test
index 8f302dd17a54..e5b3c0fb0b96 100644
--- a/clang-tools-extra/clangd/test/inlayHints.test
+++ b/clang-tools-extra/clangd/test/inlayHints.test
@@ -51,7 +51,11 @@
# CHECK-NEXT: "result": [
# CHECK-NEXT: {
# CHECK-NEXT: "kind": 2,
-# CHECK-NEXT: "label": "bar:",
+# CHECK-NEXT: "label": [
+# CHECK-NEXT: {
+# CHECK-NEXT: "value": "bar:"
+# CHECK-NEXT: }
+# CHECK-NEXT: ],
# CHECK-NEXT: "paddingLeft": false,
# CHECK-NEXT: "paddingRight": true,
# CHECK-NEXT: "position": {
diff --git a/clang-tools-extra/clangd/tool/Check.cpp b/clang-tools-extra/clangd/tool/Check.cpp
index 45e2e1e278de..25005ec1bd04 100644
--- a/clang-tools-extra/clangd/tool/Check.cpp
+++ b/clang-tools-extra/clangd/tool/Check.cpp
@@ -367,7 +367,13 @@ public:
auto Hints = inlayHints(*AST, LineRange);
for (const auto &Hint : Hints) {
- vlog(" {0} {1} {2}", Hint.kind, Hint.position, Hint.label);
+ vlog(" {0} {1} [{2}]", Hint.kind, Hint.position, [&] {
+ return llvm::join(llvm::map_range(Hint.label,
+ [&](auto &L) {
+ return llvm::formatv("{{{0}}", L);
+ }),
+ ", ");
+ }());
}
}
diff --git a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
index 0fff0dfca6c9..5b1531eb2fa6 100644
--- a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
+++ b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp
@@ -25,7 +25,7 @@ namespace clangd {
llvm::raw_ostream &operator<<(llvm::raw_ostream &Stream,
const InlayHint &Hint) {
- return Stream << Hint.label << "@" << Hint.range;
+ return Stream << Hint.joinLabels() << "@" << Hint.range;
}
namespace {
@@ -57,10 +57,11 @@ struct ExpectedHint {
MATCHER_P2(HintMatcher, Expected, Code, llvm::to_string(Expected)) {
llvm::StringRef ExpectedView(Expected.Label);
- if (arg.label != ExpectedView.trim(" ") ||
+ std::string ResultLabel = arg.joinLabels();
+ if (ResultLabel != ExpectedView.trim(" ") ||
arg.paddingLeft != ExpectedView.starts_with(" ") ||
arg.paddingRight != ExpectedView.ends_with(" ")) {
- *result_listener << "label is '" << arg.label << "'";
+ *result_listener << "label is '" << ResultLabel << "'";
return false;
}
if (arg.range != Code.range(Expected.RangeName)) {
@@ -72,7 +73,7 @@ MATCHER_P2(HintMatcher, Expected, Code, llvm::to_string(Expected)) {
return true;
}
-MATCHER_P(labelIs, Label, "") { return arg.label == Label; }
+MATCHER_P(labelIs, Label, "") { return arg.joinLabels() == Label; }
Config noHintsConfig() {
Config C;
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index a604e9276668..78b09d23d442 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -139,6 +139,10 @@ Changes in existing checks
<clang-tidy/checks/bugprone/assert-side-effect>` check by detecting side
effect from calling a method with non-const reference parameters.
+- Improved :doc:`bugprone-inc-dec-in-conditions
+ <clang-tidy/checks/bugprone/inc-dec-in-conditions>` check to ignore code
+ within unevaluated contexts, such as ``decltype``.
+
- Improved :doc:`bugprone-non-zero-enum-to-bool-conversion
<clang-tidy/checks/bugprone/non-zero-enum-to-bool-conversion>` check by
eliminating false positives resulting from direct usage of bitwise operators
@@ -176,13 +180,13 @@ Changes in existing checks
<clang-tidy/checks/cppcoreguidelines/owning-memory>` check to properly handle
return type in lambdas and in nested functions.
-- Cleaned up :doc:`cppcoreguidelines-prefer-member-initializer
- <clang-tidy/checks/cppcoreguidelines/prefer-member-initializer>`
+- Improved :doc:`cppcoreguidelines-prefer-member-initializer
+ <clang-tidy/checks/cppcoreguidelines/prefer-member-initializer>` check
by removing enforcement of rule `C.48
<https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#c48-prefer-in-class-initializers-to-member-initializers-in-constructors-for-constant-initializers>`_,
which was deprecated since :program:`clang-tidy` 17. This rule is now covered
by :doc:`cppcoreguidelines-use-default-member-init
- <clang-tidy/checks/cppcoreguidelines/use-default-member-init>` and fixes
+ <clang-tidy/checks/cppcoreguidelines/use-default-member-init>`. Fixed
incorrect hints when using list-initialization.
- Improved :doc:`google-build-namespaces
@@ -197,6 +201,9 @@ Changes in existing checks
<clang-tidy/checks/google/global-names-in-headers>` check by replacing the local
option `HeaderFileExtensions` by the global option of the same name.
+- Improved :doc:`google-runtime-int <clang-tidy/checks/google/runtime-int>`
+ check performance through optimizations.
+
- Improved :doc:`llvm-header-guard
<clang-tidy/checks/llvm/header-guard>` check by replacing the local
option `HeaderFileExtensions` by the global option of the same name.
@@ -229,12 +236,20 @@ Changes in existing checks
<clang-tidy/checks/modernize/use-override>` check to also remove any trailing
whitespace when deleting the ``virtual`` keyword.
+- Improved :doc:`modernize-use-using <clang-tidy/checks/modernize/use-using>`
+ check by adding support for detection of typedefs declared on function level.
+
- Improved :doc:`performance-unnecessary-copy-initialization
<clang-tidy/checks/performance/unnecessary-copy-initialization>` check by
detecting more cases of constant access. In particular, pointers can be
analyzed, se the check now handles the common patterns
`const auto e = (*vector_ptr)[i]` and `const auto e = vector_ptr->at(i);`.
+- Improved :doc:`readability-identifier-naming
+ <clang-tidy/checks/readability/identifier-naming>` check in `GetConfigPerFile`
+ mode by resolving symbolic links to header files. Fixed handling of Hungarian
+ Prefix when configured to `LowerCase`.
+
- Improved :doc:`readability-implicit-bool-conversion
<clang-tidy/checks/readability/implicit-bool-conversion>` check to provide
valid fix suggestions for ``static_cast`` without a preceding space and
@@ -244,10 +259,10 @@ Changes in existing checks
<clang-tidy/checks/readability/redundant-inline-specifier>` check to properly
emit warnings for static data member with an in-class initializer.
-- Improved :doc:`readability-identifier-naming
- <clang-tidy/checks/readability/identifier-naming>` check in `GetConfigPerFile`
- mode by resolving symbolic links to header files. Fixed handling of Hungarian
- Prefix when configured to `LowerCase`.
+- Improved :doc:`readability-static-definition-in-anonymous-namespace
+ <clang-tidy/checks/readability/static-definition-in-anonymous-namespace>`
+ check by resolving fix-it overlaps in template code by disregarding implicit
+ instances.
Removed checks
^^^^^^^^^^^^^^
@@ -258,9 +273,9 @@ Removed checks
Miscellaneous
^^^^^^^^^^^^^
-- Fixed incorrect formatting in ``clang-apply-replacements`` when no ``--format``
- option is specified. Now ``clang-apply-replacements`` applies formatting only with
- the option.
+- Fixed incorrect formatting in :program:`clang-apply-replacements` when no
+ ``--format`` option is specified. Now :program:`clang-apply-replacements`
+ applies formatting only with the option.
Improvements to include-fixer
-----------------------------
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/inc-dec-in-conditions.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/inc-dec-in-conditions.cpp
index 82af039973c3..91de013138f0 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/inc-dec-in-conditions.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/inc-dec-in-conditions.cpp
@@ -68,3 +68,13 @@ bool doubleCheck(Container<int> x) {
// CHECK-MESSAGES: :[[@LINE-1]]:11: warning: decrementing and referencing a variable in a complex condition can cause unintended side-effects due to C++'s order of evaluation, consider moving the modification outside of the condition to avoid misunderstandings [bugprone-inc-dec-in-conditions]
// CHECK-MESSAGES: :[[@LINE-2]]:31: warning: incrementing and referencing a variable in a complex condition can cause unintended side-effects due to C++'s order of evaluation, consider moving the modification outside of the condition to avoid misunderstandings [bugprone-inc-dec-in-conditions]
}
+
+namespace PR85838 {
+ void test()
+ {
+ auto foo = 0;
+ auto bar = 0;
+ if (++foo < static_cast<decltype(foo)>(bar)) {}
+ if (static_cast<decltype(++foo)>(bar) < foo) {}
+ }
+}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp
index 462bc984fd3a..925e5f9c1ca5 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-using.cpp
@@ -1,4 +1,4 @@
-// RUN: %check_clang_tidy %s modernize-use-using %t -- -- -I %S/Inputs/use-using/
+// RUN: %check_clang_tidy %s modernize-use-using %t -- -- -fno-delayed-template-parsing -I %S/Inputs/use-using/
typedef int Type;
// CHECK-MESSAGES: :[[@LINE-1]]:1: warning: use 'using' instead of 'typedef' [modernize-use-using]
@@ -342,3 +342,44 @@ typedef int InExternCPP;
// CHECK-FIXES: using InExternCPP = int;
}
+
+namespace ISSUE_72179
+{
+ void foo()
+ {
+ typedef int a;
+ // CHECK-MESSAGES: :[[@LINE-1]]:5: warning: use 'using' instead of 'typedef' [modernize-use-using]
+ // CHECK-FIXES: using a = int;
+
+ }
+
+ void foo2()
+ {
+ typedef struct { int a; union { int b; }; } c;
+ // CHECK-MESSAGES: :[[@LINE-1]]:5: warning: use 'using' instead of 'typedef' [modernize-use-using]
+ // CHECK-FIXES: using c = struct { int a; union { int b; }; };
+ }
+
+ template <typename T>
+ void foo3()
+ {
+ typedef T b;
+ // CHECK-MESSAGES: :[[@LINE-1]]:5: warning: use 'using' instead of 'typedef' [modernize-use-using]
+ // CHECK-FIXES: using b = T;
+ }
+
+ template <typename T>
+ class MyClass
+ {
+ void foo()
+ {
+ typedef MyClass c;
+ // CHECK-MESSAGES: :[[@LINE-1]]:7: warning: use 'using' instead of 'typedef' [modernize-use-using]
+ // CHECK-FIXES: using c = MyClass;
+ }
+ };
+
+ const auto foo4 = [](int a){typedef int d;};
+ // CHECK-MESSAGES: :[[@LINE-1]]:31: warning: use 'using' instead of 'typedef' [modernize-use-using]
+ // CHECK-FIXES: const auto foo4 = [](int a){using d = int;};
+}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/static-definition-in-anonymous-namespace.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/static-definition-in-anonymous-namespace.cpp
index e9938db4f5b8..e204199393db 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/readability/static-definition-in-anonymous-namespace.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/static-definition-in-anonymous-namespace.cpp
@@ -51,6 +51,17 @@ static int c = 1;
} // namespace deep_inner
} // namespace inner
+template<typename T>
+static void printTemplate(T&&) {}
+// CHECK-MESSAGES: :[[@LINE-1]]:13: warning: 'printTemplate' is a static definition in anonymous namespace; static is redundant here [readability-static-definition-in-anonymous-namespace]
+// CHECK-FIXES: {{^}}void printTemplate(T&&) {}
+
+void testTemplate() {
+ printTemplate(5);
+ printTemplate(5U);
+ printTemplate("some string");
+}
+
} // namespace
namespace N {
diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt
index 47fc2e4886cf..284b2af24dda 100644
--- a/clang/CMakeLists.txt
+++ b/clang/CMakeLists.txt
@@ -13,8 +13,16 @@ if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
set(CLANG_BUILT_STANDALONE TRUE)
endif()
+# Make sure that our source directory is on the current cmake module path so that
+# we can include cmake files from this directory.
+list(INSERT CMAKE_MODULE_PATH 0
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules"
+ "${LLVM_COMMON_CMAKE_UTILS}/Modules"
+ )
+
# Must go below project(..)
include(GNUInstallDirs)
+include(GetDarwinLinkerVersion)
if(CLANG_BUILT_STANDALONE)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ standard to conform to")
@@ -140,13 +148,6 @@ if(CLANG_BUILT_STANDALONE)
endif() # LLVM_INCLUDE_TESTS
endif() # standalone
-# Make sure that our source directory is on the current cmake module path so that
-# we can include cmake files from this directory.
-list(INSERT CMAKE_MODULE_PATH 0
- "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules"
- "${LLVM_COMMON_CMAKE_UTILS}/Modules"
- )
-
# This allows disabling clang's XML dependency even if LLVM finds libxml2.
# By default, clang depends on libxml2 if LLVM does.
option(CLANG_ENABLE_LIBXML2 "Whether libclang may depend on libxml2"
@@ -190,11 +191,12 @@ set(CLANG_RESOURCE_DIR "" CACHE STRING
set(C_INCLUDE_DIRS "" CACHE STRING
"Colon separated list of directories clang will search for headers.")
+set(USE_DEPRECATED_GCC_INSTALL_PREFIX OFF CACHE BOOL "Temporary workaround before GCC_INSTALL_PREFIX is completely removed")
set(GCC_INSTALL_PREFIX "" CACHE PATH "Directory where gcc is installed." )
set(DEFAULT_SYSROOT "" CACHE STRING
"Default <path> to all compiler invocations for --sysroot=<path>." )
-if(GCC_INSTALL_PREFIX)
- message(WARNING "GCC_INSTALL_PREFIX is deprecated and will be removed. Use "
+if(GCC_INSTALL_PREFIX AND NOT USE_DEPRECATED_GCC_INSTALL_PREFIX)
+ message(FATAL_ERROR "GCC_INSTALL_PREFIX is deprecated and will be removed. Use "
"configuration files (https://clang.llvm.org/docs/UsersManual.html#configuration-files)"
"to specify the default --gcc-install-dir= or --gcc-triple=. --gcc-toolchain= is discouraged. "
"See https://github.com/llvm/llvm-project/pull/77537 for detail.")
@@ -345,20 +347,7 @@ endif ()
# Determine HOST_LINK_VERSION on Darwin.
set(HOST_LINK_VERSION)
if (APPLE AND NOT CMAKE_LINKER MATCHES ".*lld.*")
- set(LD_V_OUTPUT)
- execute_process(
- COMMAND sh -c "${CMAKE_LINKER} -v 2>&1 | head -1"
- RESULT_VARIABLE HAD_ERROR
- OUTPUT_VARIABLE LD_V_OUTPUT
- )
- if (HAD_ERROR)
- message(FATAL_ERROR "${CMAKE_LINKER} failed with status ${HAD_ERROR}")
- endif()
- if ("${LD_V_OUTPUT}" MATCHES ".*ld64-([0-9.]+).*")
- string(REGEX REPLACE ".*ld64-([0-9.]+).*" "\\1" HOST_LINK_VERSION ${LD_V_OUTPUT})
- elseif ("${LD_V_OUTPUT}" MATCHES "[^0-9]*([0-9.]+).*")
- string(REGEX REPLACE "[^0-9]*([0-9.]+).*" "\\1" HOST_LINK_VERSION ${LD_V_OUTPUT})
- endif()
+ get_darwin_linker_version(HOST_LINK_VERSION)
message(STATUS "Host linker version: ${HOST_LINK_VERSION}")
endif()
diff --git a/clang/cmake/caches/CrossWinToARMLinux.cmake b/clang/cmake/caches/CrossWinToARMLinux.cmake
index 2a0953af53fa..736a54ece550 100644
--- a/clang/cmake/caches/CrossWinToARMLinux.cmake
+++ b/clang/cmake/caches/CrossWinToARMLinux.cmake
@@ -29,6 +29,11 @@
# cmake --build . --target check-cxxabi-<TOOLCHAIN_TARGET_TRIPLE>
# cmake --build . --target check-unwind-<TOOLCHAIN_TARGET_TRIPLE>
# cmake --build . --target check-cxx-<TOOLCHAIN_TARGET_TRIPLE>
+# (another way to execute the tests)
+# python bin/llvm-lit.py -v --threads=32 runtimes/runtimes-<TOOLCHAIN_TARGET_TRIPLE>bins/libunwind/test 2>&1 | tee libunwind-tests.log
+# python bin/llvm-lit.py -v --threads=32 runtimes/runtimes-<TOOLCHAIN_TARGET_TRIPLE>-bins/libcxxabi/test 2>&1 | tee libcxxabi-tests.log
+# python bin/llvm-lit.py -v --threads=32 runtimes/runtimes-<TOOLCHAIN_TARGET_TRIPLE>-bins/libcxx/test 2>&1 | tee libcxx-tests.log
+
# LLVM_PROJECT_DIR is the path to the llvm-project directory.
# The right way to compute it would probably be to use "${CMAKE_SOURCE_DIR}/../",
@@ -42,9 +47,6 @@ if (NOT DEFINED DEFAULT_SYSROOT)
message(WARNING "DEFAULT_SYSROOT must be specified for the cross toolchain build.")
endif()
-if (NOT DEFINED LLVM_TARGETS_TO_BUILD)
- set(LLVM_TARGETS_TO_BUILD "ARM" CACHE STRING "")
-endif()
if (NOT DEFINED LLVM_ENABLE_ASSERTIONS)
set(LLVM_ENABLE_ASSERTIONS ON CACHE BOOL "")
endif()
@@ -56,7 +58,7 @@ if (NOT DEFINED LLVM_ENABLE_RUNTIMES)
endif()
if (NOT DEFINED TOOLCHAIN_TARGET_TRIPLE)
- set(TOOLCHAIN_TARGET_TRIPLE "armv7-unknown-linux-gnueabihf")
+ set(TOOLCHAIN_TARGET_TRIPLE "aarch64-unknown-linux-gnu")
else()
#NOTE: we must normalize specified target triple to a fully specified triple,
# including the vendor part. It is necessary to synchronize the runtime library
@@ -74,24 +76,38 @@ else()
string(REPLACE ";" "-" TOOLCHAIN_TARGET_TRIPLE "${TOOLCHAIN_TARGET_TRIPLE}")
endif()
+message(STATUS "Toolchain target triple: ${TOOLCHAIN_TARGET_TRIPLE}")
+
+if (NOT DEFINED LLVM_TARGETS_TO_BUILD)
+ if ("${TOOLCHAIN_TARGET_TRIPLE}" MATCHES "^(armv|arm32)+")
+ set(LLVM_TARGETS_TO_BUILD "ARM" CACHE STRING "")
+ endif()
+ if ("${TOOLCHAIN_TARGET_TRIPLE}" MATCHES "^(aarch64|arm64)+")
+ set(LLVM_TARGETS_TO_BUILD "AArch64" CACHE STRING "")
+ endif()
+endif()
+
+message(STATUS "Toolchain target to build: ${LLVM_TARGETS_TO_BUILD}")
+
if (NOT DEFINED CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "")
endif()
-message(STATUS "Toolchain target triple: ${TOOLCHAIN_TARGET_TRIPLE}")
-
set(CMAKE_CROSSCOMPILING ON CACHE BOOL "")
set(CMAKE_CL_SHOWINCLUDES_PREFIX "Note: including file: " CACHE STRING "")
# Required if COMPILER_RT_DEFAULT_TARGET_ONLY is ON
set(CMAKE_C_COMPILER_TARGET "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
set(CMAKE_CXX_COMPILER_TARGET "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
-set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ON CACHE BOOL "")
set(LLVM_DEFAULT_TARGET_TRIPLE "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
set(LLVM_TARGET_ARCH "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
set(LLVM_LIT_ARGS "-vv ${LLVM_LIT_ARGS}" CACHE STRING "" FORCE)
+set(CLANG_DEFAULT_CXX_STDLIB "libc++" CACHE STRING "")
set(CLANG_DEFAULT_LINKER "lld" CACHE STRING "")
+set(CLANG_DEFAULT_OBJCOPY "llvm-objcopy" CACHE STRING "")
+set(CLANG_DEFAULT_RTLIB "compiler-rt" CACHE STRING "")
+set(CLANG_DEFAULT_UNWINDLIB "libunwind" CACHE STRING "")
if(WIN32)
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded" CACHE STRING "")
@@ -109,9 +125,10 @@ set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_SYSTEM_NAME
set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_SYSROOT "${DEFAULT_SYSROOT}" CACHE STRING "")
set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_INSTALL_RPATH "${RUNTIMES_INSTALL_RPATH}" CACHE STRING "")
set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_CMAKE_BUILD_WITH_INSTALL_RPATH ON CACHE BOOL "")
-
+set(BUILTINS_${TOOLCHAIN_TARGET_TRIPLE}_LLVM_CMAKE_DIR "${LLVM_PROJECT_DIR}/llvm/cmake/modules" CACHE PATH "")
set(LLVM_RUNTIME_TARGETS "${TOOLCHAIN_TARGET_TRIPLE}" CACHE STRING "")
+set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LLVM_ENABLE_RUNTIMES "${LLVM_ENABLE_RUNTIMES}" CACHE STRING "")
@@ -125,13 +142,16 @@ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_SANITIZERS
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_XRAY OFF CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_LIBFUZZER OFF CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_PROFILE OFF CACHE BOOL "")
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_CRT OFF CACHE BOOL "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_CRT ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_BUILD_ORC OFF CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_DEFAULT_TARGET_ONLY ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_INCLUDE_TESTS ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_CAN_EXECUTE_TESTS ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_USE_BUILTINS_LIBRARY ON CACHE BOOL "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_CXX_LIBRARY libcxx CACHE STRING "")
+# Tell Clang to seach C++ headers alongside with the just-built binaries for the C++ compiler-rt tests.
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_TEST_COMPILER_CFLAGS "--stdlib=libc++" CACHE STRING "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_USE_COMPILER_RT ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_ENABLE_SHARED OFF CACHE BOOL "")
@@ -148,8 +168,10 @@ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ABI_VERSION
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_CXX_ABI "libcxxabi" CACHE STRING "") #!!!
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_NEW_DELETE_DEFINITIONS ON CACHE BOOL "")
-
+# Avoid searching for the python3 interpreter during the runtimes configuration for the cross builds.
+# It starts searching the python3 package using the target's sysroot path, that usually is not compatible with the build host.
find_package(Python3 COMPONENTS Interpreter)
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_Python3_EXECUTABLE ${Python3_EXECUTABLE} CACHE PATH "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_TEST_PARAMS_default "${RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_TEST_PARAMS}")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_TEST_PARAMS_default "${RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_TEST_PARAMS}")
diff --git a/clang/cmake/caches/HLSL.cmake b/clang/cmake/caches/HLSL.cmake
index 71f81e53f6bd..84850c86f12c 100644
--- a/clang/cmake/caches/HLSL.cmake
+++ b/clang/cmake/caches/HLSL.cmake
@@ -4,7 +4,7 @@ set(LLVM_TARGETS_TO_BUILD Native CACHE STRING "")
# Include the DirectX target for DXIL code generation, eventually we'll include
# SPIR-V here too.
-set(LLVM_EXPERIMENTAL_TARGETS_TO_BUILD DirectX CACHE STRING "")
+set(LLVM_EXPERIMENTAL_TARGETS_TO_BUILD "DirectX;SPIRV" CACHE STRING "")
# HLSL support is currently limted to clang, eventually it will expand to
# clang-tools-extra too.
diff --git a/clang/docs/ClangFormat.rst b/clang/docs/ClangFormat.rst
index 819d9ee9f9cd..80dc38a075c8 100644
--- a/clang/docs/ClangFormat.rst
+++ b/clang/docs/ClangFormat.rst
@@ -61,6 +61,7 @@ to format C/C++/Java/JavaScript/JSON/Objective-C/Protobuf/C# code.
--dry-run - If set, do not actually make the formatting changes
--dump-config - Dump configuration options to stdout and exit.
Can be used with -style option.
+ --fail-on-incomplete-format - If set, fail with exit code 1 on incomplete format.
--fallback-style=<string> - The name of the predefined style used as a
fallback in case clang-format is invoked with
-style=file, but can not find the .clang-format
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index be021dfc5c08..2ee36f24d7ce 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -955,6 +955,151 @@ the configuration (without a prefix: ``Auto``).
}
+.. _AlignConsecutiveTableGenBreakingDAGArgColons:
+
+**AlignConsecutiveTableGenBreakingDAGArgColons** (``AlignConsecutiveStyle``) :versionbadge:`clang-format 19` :ref:`¶ <AlignConsecutiveTableGenBreakingDAGArgColons>`
+ Style of aligning consecutive TableGen DAGArg operator colons.
+ If enabled, align the colon inside DAGArg which have line break inside.
+ This works only when TableGenBreakInsideDAGArg is BreakElements or
+ BreakAll and the DAGArg is not excepted by
+ TableGenBreakingDAGArgOperators's effect.
+
+ .. code-block:: c++
+
+ let dagarg = (ins
+ a :$src1,
+ aa :$src2,
+ aaa:$src3
+ )
+
+ Nested configuration flags:
+
+ Alignment options.
+
+ They can also be read as a whole for compatibility. The choices are:
+ - None
+ - Consecutive
+ - AcrossEmptyLines
+ - AcrossComments
+ - AcrossEmptyLinesAndComments
+
+ For example, to align across empty lines and not across comments, either
+ of these work.
+
+ .. code-block:: c++
+
+ AlignConsecutiveTableGenBreakingDAGArgColons: AcrossEmptyLines
+
+ AlignConsecutiveTableGenBreakingDAGArgColons:
+ Enabled: true
+ AcrossEmptyLines: true
+ AcrossComments: false
+
+ * ``bool Enabled`` Whether aligning is enabled.
+
+ .. code-block:: c++
+
+ #define SHORT_NAME 42
+ #define LONGER_NAME 0x007f
+ #define EVEN_LONGER_NAME (2)
+ #define foo(x) (x * x)
+ #define bar(y, z) (y + z)
+
+ int a = 1;
+ int somelongname = 2;
+ double c = 3;
+
+ int aaaa : 1;
+ int b : 12;
+ int ccc : 8;
+
+ int aaaa = 12;
+ float b = 23;
+ std::string ccc;
+
+ * ``bool AcrossEmptyLines`` Whether to align across empty lines.
+
+ .. code-block:: c++
+
+ true:
+ int a = 1;
+ int somelongname = 2;
+ double c = 3;
+
+ int d = 3;
+
+ false:
+ int a = 1;
+ int somelongname = 2;
+ double c = 3;
+
+ int d = 3;
+
+ * ``bool AcrossComments`` Whether to align across comments.
+
+ .. code-block:: c++
+
+ true:
+ int d = 3;
+ /* A comment. */
+ double e = 4;
+
+ false:
+ int d = 3;
+ /* A comment. */
+ double e = 4;
+
+ * ``bool AlignCompound`` Only for ``AlignConsecutiveAssignments``. Whether compound assignments
+ like ``+=`` are aligned along with ``=``.
+
+ .. code-block:: c++
+
+ true:
+ a &= 2;
+ bbb = 2;
+
+ false:
+ a &= 2;
+ bbb = 2;
+
+ * ``bool AlignFunctionPointers`` Only for ``AlignConsecutiveDeclarations``. Whether function pointers are
+ aligned.
+
+ .. code-block:: c++
+
+ true:
+ unsigned i;
+ int &r;
+ int *p;
+ int (*f)();
+
+ false:
+ unsigned i;
+ int &r;
+ int *p;
+ int (*f)();
+
+ * ``bool PadOperators`` Only for ``AlignConsecutiveAssignments``. Whether short assignment
+ operators are left-padded to the same length as long ones in order to
+ put all assignment operators to the right of the left hand side.
+
+ .. code-block:: c++
+
+ true:
+ a >>= 2;
+ bbb = 2;
+
+ a = 2;
+ bbb >>= 2;
+
+ false:
+ a >>= 2;
+ bbb = 2;
+
+ a = 2;
+ bbb >>= 2;
+
+
.. _AlignConsecutiveTableGenCondOperatorColons:
**AlignConsecutiveTableGenCondOperatorColons** (``AlignConsecutiveStyle``) :versionbadge:`clang-format 19` :ref:`¶ <AlignConsecutiveTableGenCondOperatorColons>`
diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index 13d7261d83d7..7b23e4d1c2f3 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -1459,40 +1459,45 @@ More information could be found `here <https://clang.llvm.org/docs/Modules.html>
Language Extensions Back-ported to Previous Standards
=====================================================
-====================================== ================================ ============= =============
-Feature Feature Test Macro Introduced In Backported To
-====================================== ================================ ============= =============
-variadic templates __cpp_variadic_templates C++11 C++03
-Alias templates __cpp_alias_templates C++11 C++03
-Non-static data member initializers __cpp_nsdmi C++11 C++03
-Range-based ``for`` loop __cpp_range_based_for C++11 C++03
-RValue references __cpp_rvalue_references C++11 C++03
-Attributes __cpp_attributes C++11 C++03
-variable templates __cpp_variable_templates C++14 C++03
-Binary literals __cpp_binary_literals C++14 C++03
-Relaxed constexpr __cpp_constexpr C++14 C++11
-``if constexpr`` __cpp_if_constexpr C++17 C++11
-fold expressions __cpp_fold_expressions C++17 C++03
-Lambda capture of \*this by value __cpp_capture_star_this C++17 C++11
-Attributes on enums __cpp_enumerator_attributes C++17 C++03
-Guaranteed copy elision __cpp_guaranteed_copy_elision C++17 C++03
-Hexadecimal floating literals __cpp_hex_float C++17 C++03
-``inline`` variables __cpp_inline_variables C++17 C++03
-Attributes on namespaces __cpp_namespace_attributes C++17 C++11
-Structured bindings __cpp_structured_bindings C++17 C++03
-template template arguments __cpp_template_template_args C++17 C++03
-``static operator[]`` __cpp_multidimensional_subscript C++20 C++03
-Designated initializers __cpp_designated_initializers C++20 C++03
-Conditional ``explicit`` __cpp_conditional_explicit C++20 C++03
-``using enum`` __cpp_using_enum C++20 C++03
-``if consteval`` __cpp_if_consteval C++23 C++20
-``static operator()`` __cpp_static_call_operator C++23 C++03
-Attributes on Lambda-Expressions C++23 C++11
--------------------------------------- -------------------------------- ------------- -------------
-Designated initializers (N494) C99 C89
-Array & element qualification (N2607) C23 C89
-Attributes (N2335) C23 C89
-====================================== ================================ ============= =============
+============================================ ================================ ============= =============
+Feature Feature Test Macro Introduced In Backported To
+============================================ ================================ ============= =============
+variadic templates __cpp_variadic_templates C++11 C++03
+Alias templates __cpp_alias_templates C++11 C++03
+Non-static data member initializers __cpp_nsdmi C++11 C++03
+Range-based ``for`` loop __cpp_range_based_for C++11 C++03
+RValue references __cpp_rvalue_references C++11 C++03
+Attributes __cpp_attributes C++11 C++03
+Lambdas __cpp_lambdas C++11 C++03
+Generalized lambda captures __cpp_init_captures C++14 C++03
+Generic lambda expressions __cpp_generic_lambdas C++14 C++03
+variable templates __cpp_variable_templates C++14 C++03
+Binary literals __cpp_binary_literals C++14 C++03
+Relaxed constexpr __cpp_constexpr C++14 C++11
+Pack expansion in generalized lambda-capture __cpp_init_captures C++17 C++03
+``if constexpr`` __cpp_if_constexpr C++17 C++11
+fold expressions __cpp_fold_expressions C++17 C++03
+Lambda capture of \*this by value __cpp_capture_star_this C++17 C++03
+Attributes on enums __cpp_enumerator_attributes C++17 C++03
+Guaranteed copy elision __cpp_guaranteed_copy_elision C++17 C++03
+Hexadecimal floating literals __cpp_hex_float C++17 C++03
+``inline`` variables __cpp_inline_variables C++17 C++03
+Attributes on namespaces __cpp_namespace_attributes C++17 C++11
+Structured bindings __cpp_structured_bindings C++17 C++03
+template template arguments __cpp_template_template_args C++17 C++03
+Familiar template syntax for generic lambdas __cpp_generic_lambdas C++20 C++03
+``static operator[]`` __cpp_multidimensional_subscript C++20 C++03
+Designated initializers __cpp_designated_initializers C++20 C++03
+Conditional ``explicit`` __cpp_conditional_explicit C++20 C++03
+``using enum`` __cpp_using_enum C++20 C++03
+``if consteval`` __cpp_if_consteval C++23 C++20
+``static operator()`` __cpp_static_call_operator C++23 C++03
+Attributes on Lambda-Expressions C++23 C++11
+-------------------------------------------- -------------------------------- ------------- -------------
+Designated initializers (N494) C99 C89
+Array & element qualification (N2607) C23 C89
+Attributes (N2335) C23 C89
+============================================ ================================ ============= =============
Type Trait Primitives
=====================
@@ -3548,6 +3553,47 @@ argument can be of any unsigned integer type.
``__builtin_popcount{,l,ll}`` builtins, with support for other integer types,
such as ``unsigned __int128`` and C23 ``unsigned _BitInt(N)``.
+``__builtin_clzg`` and ``__builtin_ctzg``
+-----------------------------------------
+
+``__builtin_clzg`` (respectively ``__builtin_ctzg``) returns the number of
+leading (respectively trailing) 0 bits in the first argument. The first argument
+can be of any unsigned integer type.
+
+If the first argument is 0 and an optional second argument of ``int`` type is
+provided, then the second argument is returned. If the first argument is 0, but
+only one argument is provided, then the behavior is undefined.
+
+**Syntax**:
+
+.. code-block:: c++
+
+ int __builtin_clzg(type x[, int fallback])
+ int __builtin_ctzg(type x[, int fallback])
+
+**Examples**:
+
+.. code-block:: c++
+
+ unsigned int x = 1;
+ int x_lz = __builtin_clzg(x);
+ int x_tz = __builtin_ctzg(x);
+
+ unsigned long y = 2;
+ int y_lz = __builtin_clzg(y);
+ int y_tz = __builtin_ctzg(y);
+
+ unsigned _BitInt(128) z = 4;
+ int z_lz = __builtin_clzg(z);
+ int z_tz = __builtin_ctzg(z);
+
+**Description**:
+
+``__builtin_clzg`` (respectively ``__builtin_ctzg``) is meant to be a
+type-generic alternative to the ``__builtin_clz{,l,ll}`` (respectively
+``__builtin_ctz{,l,ll}``) builtins, with support for other integer types, such
+as ``unsigned __int128`` and C23 ``unsigned _BitInt(N)``.
+
Multiprecision Arithmetic Builtins
----------------------------------
@@ -5374,10 +5420,12 @@ The following builtin intrinsics can be used in constant expressions:
* ``__builtin_clzl``
* ``__builtin_clzll``
* ``__builtin_clzs``
+* ``__builtin_clzg``
* ``__builtin_ctz``
* ``__builtin_ctzl``
* ``__builtin_ctzll``
* ``__builtin_ctzs``
+* ``__builtin_ctzg``
* ``__builtin_ffs``
* ``__builtin_ffsl``
* ``__builtin_ffsll``
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index c0b0c8a8a3ea..76eaf0bf11c3 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -37,6 +37,9 @@ These changes are ones which we think may surprise users when upgrading to
Clang |release| because of the opportunity they pose for disruption to existing
code bases.
+- Setting the deprecated CMake variable ``GCC_INSTALL_PREFIX`` (which sets the
+ default ``--gcc-toolchain=``) now leads to a fatal error.
+
C/C++ Language Potentially Breaking Changes
-------------------------------------------
@@ -54,6 +57,12 @@ ABI Changes in This Version
inline member function that contains a static local variable with a dynamic
initializer is declared with ``__declspec(dllimport)``. (#GH83616).
+- Fixed Microsoft name mangling of lifetime extended temporary objects. This
+ change corrects missing back reference registrations that could result in
+ incorrect back reference indexes and suprising demangled name results. Since
+ MSVC uses a different mangling for these objects, compatibility is not affected.
+ (#GH85423).
+
AST Dumping Potentially Breaking Changes
----------------------------------------
@@ -177,6 +186,13 @@ Non-comprehensive list of changes in this release
the previous builtins, this new builtin is constexpr and may be used in
constant expressions.
+- Lambda expressions are now accepted in C++03 mode as an extension.
+
+- Added ``__builtin_clzg`` and ``__builtin_ctzg`` as type-generic alternatives
+ to ``__builtin_clz{,s,l,ll}`` and ``__builtin_ctz{,s,l,ll}`` respectively,
+ with support for any unsigned integer type. Like the previous builtins, these
+ new builtins are constexpr and may be used in constant expressions.
+
New Compiler Flags
------------------
@@ -193,7 +209,25 @@ Modified Compiler Flags
``-Wreturn-type``, and moved some of the diagnostics previously controlled by
``-Wreturn-type`` under this new flag. Fixes #GH72116.
-- Added ``-Wcast-function-type`` as a warning enabled by ``-Wextra``. #GH76872
+- Added ``-Wcast-function-type-mismatch`` under the ``-Wcast-function-type``
+ warning group. Moved the diagnostic previously controlled by
+ ``-Wcast-function-type`` to the new warning group and added
+ ``-Wcast-function-type-mismatch`` to ``-Wextra``. #GH76872
+
+ .. code-block:: c
+
+ int x(long);
+ typedef int (f2)(void*);
+ typedef int (f3)();
+
+ void func(void) {
+ // Diagnoses under -Wcast-function-type, -Wcast-function-type-mismatch,
+ // -Wcast-function-type-strict, -Wextra
+ f2 *b = (f2 *)x;
+ // Diagnoses under -Wcast-function-type, -Wcast-function-type-strict
+ f3 *c = (f3 *)x;
+ }
+
Removed Compiler Flags
-------------------------
@@ -266,6 +300,19 @@ Improvements to Clang's diagnostics
- Clang now correctly diagnoses no arguments to a variadic macro parameter as a C23/C++20 extension.
Fixes #GH84495.
+- Clang no longer emits a ``-Wexit-time destructors`` warning on static variables explicitly
+ annotated with the ``clang::always_destroy`` attribute.
+ Fixes #GH68686, #GH86486
+
+- ``-Wmicrosoft``, ``-Wgnu``, or ``-pedantic`` is now required to diagnose C99
+ flexible array members in a union or alone in a struct. Fixes GH#84565.
+
+- Clang now no longer diagnoses type definitions in ``offsetof`` in C23 mode.
+ Fixes #GH83658.
+
+- New ``-Wformat-signedness`` diagnostic that warn if the format string requires an
+ unsigned argument and the argument is signed and vice versa.
+
Improvements to Clang's time-trace
----------------------------------
@@ -316,6 +363,9 @@ Bug Fixes in This Version
- Fixes an assertion failure on invalid code when trying to define member
functions in lambdas.
+- Fixed a regression in CTAD that a friend declaration that befriends itself may cause
+ incorrect constraint substitution. (#GH86769).
+
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -414,6 +464,10 @@ Bug Fixes to C++ Support
- Clang's __builtin_bit_cast will now produce a constant value for records with empty bases. See:
(#GH82383)
- Fix a crash when instantiating a lambda that captures ``this`` outside of its context. Fixes (#GH85343).
+- Fix an issue where a namespace alias could be defined using a qualified name (all name components
+ following the first `::` were ignored).
+- Fix an out-of-bounds crash when checking the validity of template partial specializations. (part of #GH86757).
+- Fix an issue caused by not handling invalid cases when substituting into the parameter mapping of a constraint. Fixes (#GH86757).
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -497,6 +551,7 @@ RISC-V Support
^^^^^^^^^^^^^^
- ``__attribute__((rvv_vector_bits(N)))`` is now supported for RVV vbool*_t types.
+- Profile names in ``-march`` option are now supported.
CUDA/HIP Language Changes
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -553,6 +608,7 @@ Static Analyzer
- Fixed crashing on loops if the loop variable was declared in switch blocks
but not under any case blocks if ``unroll-loops=true`` analyzer config is
set. (#GH68819)
+- Support C++23 static operator calls. (#GH84972)
New features
^^^^^^^^^^^^
@@ -584,6 +640,10 @@ Sanitizers
manually disable potentially noisy signed integer overflow checks with
``-fno-sanitize=signed-integer-overflow``
+- ``-fsanitize=cfi -fsanitize-cfi-cross-dso`` (cross-DSO CFI instrumentation)
+ now generates the ``__cfi_check`` function with proper target-specific
+ attributes, for example allowing unwind table generation.
+
Python Binding Changes
----------------------
diff --git a/clang/docs/UsersManual.rst b/clang/docs/UsersManual.rst
index 129e75fc9a78..c464bc3a69ad 100644
--- a/clang/docs/UsersManual.rst
+++ b/clang/docs/UsersManual.rst
@@ -2441,20 +2441,39 @@ usual build cycle when using sample profilers for optimization:
1. Build the code with source line table information. You can use all the
usual build flags that you always build your application with. The only
- requirement is that you add ``-gline-tables-only`` or ``-g`` to the
- command line. This is important for the profiler to be able to map
- instructions back to source line locations.
+ requirement is that DWARF debug info including source line information is
+ generated. This DWARF information is important for the profiler to be able
+ to map instructions back to source line locations.
+
+ On Linux, ``-g`` or just ``-gline-tables-only`` is sufficient:
.. code-block:: console
$ clang++ -O2 -gline-tables-only code.cc -o code
+ While MSVC-style targets default to CodeView debug information, DWARF debug
+ information is required to generate source-level LLVM profiles. Use
+ ``-gdwarf`` to include DWARF debug information:
+
+ .. code-block:: console
+
+ $ clang-cl -O2 -gdwarf -gline-tables-only coff-profile.cpp -fuse-ld=lld -link -debug:dwarf
+
2. Run the executable under a sampling profiler. The specific profiler
you use does not really matter, as long as its output can be converted
- into the format that the LLVM optimizer understands. Currently, there
- exists a conversion tool for the Linux Perf profiler
- (https://perf.wiki.kernel.org/), so these examples assume that you
- are using Linux Perf to profile your code.
+ into the format that the LLVM optimizer understands.
+
+ Two such profilers are the the Linux Perf profiler
+ (https://perf.wiki.kernel.org/) and Intel's Sampling Enabling Product (SEP),
+ available as part of `Intel VTune
+ <https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/vtune-profiler.html>`_.
+ While Perf is Linux-specific, SEP can be used on Linux, Windows, and FreeBSD.
+
+ The LLVM tool ``llvm-profgen`` can convert output of either Perf or SEP. An
+ external project, `AutoFDO <https://github.com/google/autofdo>`_, also
+ provides a ``create_llvm_prof`` tool which supports Linux Perf output.
+
+ When using Perf:
.. code-block:: console
@@ -2465,11 +2484,19 @@ usual build cycle when using sample profilers for optimization:
it provides better call information, which improves the accuracy of
the profile data.
-3. Convert the collected profile data to LLVM's sample profile format.
- This is currently supported via the AutoFDO converter ``create_llvm_prof``.
- It is available at https://github.com/google/autofdo. Once built and
- installed, you can convert the ``perf.data`` file to LLVM using
- the command:
+ When using SEP:
+
+ .. code-block:: console
+
+ $ sep -start -out code.tb7 -ec BR_INST_RETIRED.NEAR_TAKEN:precise=yes:pdir -lbr no_filter:usr -perf-script brstack -app ./code
+
+ This produces a ``code.perf.data.script`` output which can be used with
+ ``llvm-profgen``'s ``--perfscript`` input option.
+
+3. Convert the collected profile data to LLVM's sample profile format. This is
+ currently supported via the `AutoFDO <https://github.com/google/autofdo>`_
+ converter ``create_llvm_prof``. Once built and installed, you can convert
+ the ``perf.data`` file to LLVM using the command:
.. code-block:: console
@@ -2485,7 +2512,14 @@ usual build cycle when using sample profilers for optimization:
.. code-block:: console
- $ llvm-profgen --binary=./code --output=code.prof--perfdata=perf.data
+ $ llvm-profgen --binary=./code --output=code.prof --perfdata=perf.data
+
+ When using SEP the output is in the textual format corresponding to
+ ``llvm-profgen --perfscript``. For example:
+
+ .. code-block:: console
+
+ $ llvm-profgen --binary=./code --output=code.prof --perfscript=code.perf.data.script
4. Build the code again using the collected profile. This step feeds
diff --git a/clang/docs/analyzer/checkers.rst b/clang/docs/analyzer/checkers.rst
index fe2115149142..f188f18ba555 100644
--- a/clang/docs/analyzer/checkers.rst
+++ b/clang/docs/analyzer/checkers.rst
@@ -340,6 +340,51 @@ cplusplus
C++ Checkers.
+.. _cplusplus-ArrayDelete:
+
+cplusplus.ArrayDelete (C++)
+"""""""""""""""""""""""""""
+
+Reports destructions of arrays of polymorphic objects that are destructed as
+their base class. If the dynamic type of the array is different from its static
+type, calling `delete[]` is undefined.
+
+This checker corresponds to the SEI CERT rule `EXP51-CPP: Do not delete an array through a pointer of the incorrect type <https://wiki.sei.cmu.edu/confluence/display/cplusplus/EXP51-CPP.+Do+not+delete+an+array+through+a+pointer+of+the+incorrect+type>`_.
+
+.. code-block:: cpp
+
+ class Base {
+ public:
+ virtual ~Base() {}
+ };
+ class Derived : public Base {};
+
+ Base *create() {
+ Base *x = new Derived[10]; // note: Casting from 'Derived' to 'Base' here
+ return x;
+ }
+
+ void foo() {
+ Base *x = create();
+ delete[] x; // warn: Deleting an array of 'Derived' objects as their base class 'Base' is undefined
+ }
+
+**Limitations**
+
+The checker does not emit note tags when casting to and from reference types,
+even though the pointer values are tracked across references.
+
+.. code-block:: cpp
+
+ void foo() {
+ Derived *d = new Derived[10];
+ Derived &dref = *d;
+
+ Base &bref = static_cast<Base&>(dref); // no note
+ Base *b = &bref;
+ delete[] b; // warn: Deleting an array of 'Derived' objects as their base class 'Base' is undefined
+ }
+
.. _cplusplus-InnerPointer:
cplusplus.InnerPointer (C++)
@@ -804,10 +849,89 @@ Check for performance anti-patterns when using Grand Central Dispatch.
.. _optin-performance-Padding:
-optin.performance.Padding
-"""""""""""""""""""""""""
+optin.performance.Padding (C, C++, ObjC)
+""""""""""""""""""""""""""""""""""""""""
Check for excessively padded structs.
+This checker detects structs with excessive padding, which can lead to wasted
+memory thus decreased performance by reducing the effectiveness of the
+processor cache. Padding bytes are added by compilers to align data accesses
+as some processors require data to be aligned to certain boundaries. On others,
+unaligned data access are possible, but impose significantly larger latencies.
+
+To avoid padding bytes, the fields of a struct should be ordered by decreasing
+by alignment. Usually, its easier to think of the ``sizeof`` of the fields, and
+ordering the fields by ``sizeof`` would usually also lead to the same optimal
+layout.
+
+In rare cases, one can use the ``#pragma pack(1)`` directive to enforce a packed
+layout too, but it can significantly increase the access times, so reordering the
+fields is usually a better solution.
+
+
+.. code-block:: cpp
+
+ // warn: Excessive padding in 'struct NonOptimal' (35 padding bytes, where 3 is optimal)
+ struct NonOptimal {
+ char c1;
+ // 7 bytes of padding
+ std::int64_t big1; // 8 bytes
+ char c2;
+ // 7 bytes of padding
+ std::int64_t big2; // 8 bytes
+ char c3;
+ // 7 bytes of padding
+ std::int64_t big3; // 8 bytes
+ char c4;
+ // 7 bytes of padding
+ std::int64_t big4; // 8 bytes
+ char c5;
+ // 7 bytes of padding
+ };
+ static_assert(sizeof(NonOptimal) == 4*8+5+5*7);
+
+ // no-warning: The fields are nicely aligned to have the minimal amount of padding bytes.
+ struct Optimal {
+ std::int64_t big1; // 8 bytes
+ std::int64_t big2; // 8 bytes
+ std::int64_t big3; // 8 bytes
+ std::int64_t big4; // 8 bytes
+ char c1;
+ char c2;
+ char c3;
+ char c4;
+ char c5;
+ // 3 bytes of padding
+ };
+ static_assert(sizeof(Optimal) == 4*8+5+3);
+
+ // no-warning: Bit packing representation is also accepted by this checker, but
+ // it can significantly increase access times, so prefer reordering the fields.
+ #pragma pack(1)
+ struct BitPacked {
+ char c1;
+ std::int64_t big1; // 8 bytes
+ char c2;
+ std::int64_t big2; // 8 bytes
+ char c3;
+ std::int64_t big3; // 8 bytes
+ char c4;
+ std::int64_t big4; // 8 bytes
+ char c5;
+ };
+ static_assert(sizeof(BitPacked) == 4*8+5);
+
+The ``AllowedPad`` option can be used to specify a threshold for the number
+padding bytes raising the warning. If the number of padding bytes of the struct
+and the optimal number of padding bytes differ by more than the threshold value,
+a warning will be raised.
+
+By default, the ``AllowedPad`` threshold is 24 bytes.
+
+To override this threshold to e.g. 4 bytes, use the
+``-analyzer-config optin.performance.Padding:AllowedPad=4`` option.
+
+
.. _optin-portability-UnixAPI:
optin.portability.UnixAPI
@@ -2139,30 +2263,6 @@ Either the comparison is useless or there is division by zero.
alpha.cplusplus
^^^^^^^^^^^^^^^
-.. _alpha-cplusplus-ArrayDelete:
-
-alpha.cplusplus.ArrayDelete (C++)
-"""""""""""""""""""""""""""""""""
-Reports destructions of arrays of polymorphic objects that are destructed as their base class.
-This checker corresponds to the CERT rule `EXP51-CPP: Do not delete an array through a pointer of the incorrect type <https://wiki.sei.cmu.edu/confluence/display/cplusplus/EXP51-CPP.+Do+not+delete+an+array+through+a+pointer+of+the+incorrect+type>`_.
-
-.. code-block:: cpp
-
- class Base {
- virtual ~Base() {}
- };
- class Derived : public Base {}
-
- Base *create() {
- Base *x = new Derived[10]; // note: Casting from 'Derived' to 'Base' here
- return x;
- }
-
- void foo() {
- Base *x = create();
- delete[] x; // warn: Deleting an array of 'Derived' objects as their base class 'Base' is undefined
- }
-
.. _alpha-cplusplus-DeleteWithNonVirtualDtor:
alpha.cplusplus.DeleteWithNonVirtualDtor (C++)
@@ -3020,44 +3120,82 @@ Check for misuses of stream APIs. Check for misuses of stream APIs: ``fopen, fcl
alpha.unix.Stream (C)
"""""""""""""""""""""
-Check stream handling functions: ``fopen, tmpfile, fclose, fread, fwrite, fseek, ftell, rewind, fgetpos,``
-``fsetpos, clearerr, feof, ferror, fileno``.
+Check C stream handling functions:
+``fopen, fdopen, freopen, tmpfile, fclose, fread, fwrite, fgetc, fgets, fputc, fputs, fprintf, fscanf, ungetc, getdelim, getline, fseek, fseeko, ftell, ftello, fflush, rewind, fgetpos, fsetpos, clearerr, feof, ferror, fileno``.
+
+The checker maintains information about the C stream objects (``FILE *``) and
+can detect error conditions related to use of streams. The following conditions
+are detected:
+
+* The ``FILE *`` pointer passed to the function is NULL (the single exception is
+ ``fflush`` where NULL is allowed).
+* Use of stream after close.
+* Opened stream is not closed.
+* Read from a stream after end-of-file. (This is not a fatal error but reported
+ by the checker. Stream remains in EOF state and the read operation fails.)
+* Use of stream when the file position is indeterminate after a previous failed
+ operation. Some functions (like ``ferror``, ``clearerr``, ``fseek``) are
+ allowed in this state.
+* Invalid 3rd ("``whence``") argument to ``fseek``.
+
+The checker does not track the correspondence between integer file descriptors
+and ``FILE *`` pointers. Operations on standard streams like ``stdin`` are not
+treated specially and are therefore often not recognized (because these streams
+are usually not opened explicitly by the program, and are global variables).
.. code-block:: c
- void test() {
+ void test1() {
FILE *p = fopen("foo", "r");
} // warn: opened file is never closed
- void test() {
+ void test2() {
FILE *p = fopen("foo", "r");
fseek(p, 1, SEEK_SET); // warn: stream pointer might be NULL
fclose(p);
}
- void test() {
+ void test3() {
FILE *p = fopen("foo", "r");
+ if (p) {
+ fseek(p, 1, 3); // warn: third arg should be SEEK_SET, SEEK_END, or SEEK_CUR
+ fclose(p);
+ }
+ }
- if (p)
- fseek(p, 1, 3);
- // warn: third arg should be SEEK_SET, SEEK_END, or SEEK_CUR
+ void test4() {
+ FILE *p = fopen("foo", "r");
+ if (!p)
+ return;
fclose(p);
+ fclose(p); // warn: stream already closed
}
- void test() {
+ void test5() {
FILE *p = fopen("foo", "r");
+ if (!p)
+ return;
+
+ fgetc(p);
+ if (!ferror(p))
+ fgetc(p); // warn: possible read after end-of-file
+
fclose(p);
- fclose(p); // warn: already closed
}
- void test() {
- FILE *p = tmpfile();
- ftell(p); // warn: stream pointer might be NULL
+ void test6() {
+ FILE *p = fopen("foo", "r");
+ if (!p)
+ return;
+
+ fgetc(p);
+ if (!feof(p))
+ fgetc(p); // warn: file position may be indeterminate after I/O error
+
fclose(p);
}
-
.. _alpha-unix-cstring-BufferOverlap:
alpha.unix.cstring.BufferOverlap (C)
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index 60db3cf0966c..7a8bd985a91f 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -2991,6 +2991,7 @@ enum CXCallingConv {
CXCallingConv_AArch64SVEPCS = 18,
CXCallingConv_M68kRTD = 19,
CXCallingConv_PreserveNone = 20,
+ CXCallingConv_RISCVVectorCall = 21,
CXCallingConv_Invalid = 100,
CXCallingConv_Unexposed = 200
diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h
index 47ed6d0d1db0..858450926455 100644
--- a/clang/include/clang/AST/DeclBase.h
+++ b/clang/include/clang/AST/DeclBase.h
@@ -669,9 +669,8 @@ public:
/// Whether this declaration comes from another module unit.
bool isInAnotherModuleUnit() const;
- /// FIXME: Implement discarding declarations actually in global module
- /// fragment. See [module.global.frag]p3,4 for details.
- bool isDiscardedInGlobalModuleFragment() const { return false; }
+ /// Whether this declaration comes from explicit global module.
+ bool isFromExplicitGlobalModule() const;
/// Check if we should skip checking ODRHash for declaration \param D.
///
diff --git a/clang/include/clang/AST/DeclContextInternals.h b/clang/include/clang/AST/DeclContextInternals.h
index 903cdb7bfcc8..c4734ab57895 100644
--- a/clang/include/clang/AST/DeclContextInternals.h
+++ b/clang/include/clang/AST/DeclContextInternals.h
@@ -205,7 +205,7 @@ public:
Data.setPointer(Head);
}
- /// Return an array of all the decls that this list represents.
+ /// Return the list of all the decls.
DeclContext::lookup_result getLookupResult() const {
return DeclContext::lookup_result(Data.getPointer());
}
diff --git a/clang/include/clang/AST/FormatString.h b/clang/include/clang/AST/FormatString.h
index e2232fb4a471..a074dd23e2ad 100644
--- a/clang/include/clang/AST/FormatString.h
+++ b/clang/include/clang/AST/FormatString.h
@@ -284,6 +284,8 @@ public:
/// The conversion specifier and the argument type are disallowed by the C
/// standard, but are in practice harmless. For instance, "%p" and int*.
NoMatchPedantic,
+ /// The conversion specifier and the argument type have different sign.
+ NoMatchSignedness,
/// The conversion specifier and the argument type are compatible, but still
/// seems likely to be an error. For instance, "%hd" and _Bool.
NoMatchTypeConfusion,
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index de67f0b57148..efb5bfe7f83d 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -352,6 +352,7 @@ public:
void VisitEnumConstantDecl(const EnumConstantDecl *D);
void VisitIndirectFieldDecl(const IndirectFieldDecl *D);
void VisitFunctionDecl(const FunctionDecl *D);
+ void VisitCXXDeductionGuideDecl(const CXXDeductionGuideDecl *D);
void VisitFieldDecl(const FieldDecl *D);
void VisitVarDecl(const VarDecl *D);
void VisitBindingDecl(const BindingDecl *D);
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index b3bf23227a47..5d8dde37e769 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -1690,7 +1690,10 @@ protected:
/// Whether we have a stored size expression.
LLVM_PREFERRED_TYPE(bool)
- unsigned HasStoredSizeExpr : 1;
+ unsigned HasExternalSize : 1;
+
+ LLVM_PREFERRED_TYPE(unsigned)
+ unsigned SizeWidth : 5;
};
class BuiltinTypeBitfields {
@@ -3338,35 +3341,93 @@ public:
/// Represents the canonical version of C arrays with a specified constant size.
/// For example, the canonical type for 'int A[4 + 4*100]' is a
/// ConstantArrayType where the element type is 'int' and the size is 404.
-class ConstantArrayType final
- : public ArrayType,
- private llvm::TrailingObjects<ConstantArrayType, const Expr *> {
+class ConstantArrayType final : public ArrayType {
friend class ASTContext; // ASTContext creates these.
- friend TrailingObjects;
- llvm::APInt Size; // Allows us to unique the type.
+ struct ExternalSize {
+ ExternalSize(const llvm::APInt &Sz, const Expr *SE)
+ : Size(Sz), SizeExpr(SE) {}
+ llvm::APInt Size; // Allows us to unique the type.
+ const Expr *SizeExpr;
+ };
- ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
- const Expr *sz, ArraySizeModifier sm, unsigned tq)
- : ArrayType(ConstantArray, et, can, sm, tq, sz), Size(size) {
- ConstantArrayTypeBits.HasStoredSizeExpr = sz != nullptr;
- if (ConstantArrayTypeBits.HasStoredSizeExpr) {
- assert(!can.isNull() && "canonical constant array should not have size");
- *getTrailingObjects<const Expr*>() = sz;
- }
+ union {
+ uint64_t Size;
+ ExternalSize *SizePtr;
+ };
+
+ ConstantArrayType(QualType Et, QualType Can, uint64_t Width, uint64_t Sz,
+ ArraySizeModifier SM, unsigned TQ)
+ : ArrayType(ConstantArray, Et, Can, SM, TQ, nullptr), Size(Sz) {
+ ConstantArrayTypeBits.HasExternalSize = false;
+ ConstantArrayTypeBits.SizeWidth = Width / 8;
+ // The in-structure size stores the size in bytes rather than bits so we
+ // drop the three least significant bits since they're always zero anyways.
+ assert(Width < 0xFF && "Type width in bits must be less than 8 bits");
}
- unsigned numTrailingObjects(OverloadToken<const Expr*>) const {
- return ConstantArrayTypeBits.HasStoredSizeExpr;
+ ConstantArrayType(QualType Et, QualType Can, ExternalSize *SzPtr,
+ ArraySizeModifier SM, unsigned TQ)
+ : ArrayType(ConstantArray, Et, Can, SM, TQ, SzPtr->SizeExpr),
+ SizePtr(SzPtr) {
+ ConstantArrayTypeBits.HasExternalSize = true;
+ ConstantArrayTypeBits.SizeWidth = 0;
+
+ assert((SzPtr->SizeExpr == nullptr || !Can.isNull()) &&
+ "canonical constant array should not have size expression");
}
+ static ConstantArrayType *Create(const ASTContext &Ctx, QualType ET,
+ QualType Can, const llvm::APInt &Sz,
+ const Expr *SzExpr, ArraySizeModifier SzMod,
+ unsigned Qual);
+
public:
- const llvm::APInt &getSize() const { return Size; }
+ /// Return the constant array size as an APInt.
+ llvm::APInt getSize() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size
+ : llvm::APInt(ConstantArrayTypeBits.SizeWidth * 8, Size);
+ }
+
+ /// Return the bit width of the size type.
+ unsigned getSizeBitWidth() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size.getBitWidth()
+ : static_cast<unsigned>(ConstantArrayTypeBits.SizeWidth * 8);
+ }
+
+ /// Return true if the size is zero.
+ bool isZeroSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.isZero()
+ : 0 == Size;
+ }
+
+ /// Return the size zero-extended as a uint64_t.
+ uint64_t getZExtSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getZExtValue()
+ : Size;
+ }
+
+ /// Return the size sign-extended as a uint64_t.
+ int64_t getSExtSize() const {
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->Size.getSExtValue()
+ : static_cast<int64_t>(Size);
+ }
+
+ /// Return the size zero-extended to uint64_t or UINT64_MAX if the value is
+ /// larger than UINT64_MAX.
+ uint64_t getLimitedSize() const {
+ return ConstantArrayTypeBits.HasExternalSize
+ ? SizePtr->Size.getLimitedValue()
+ : Size;
+ }
+
+ /// Return a pointer to the size expression.
const Expr *getSizeExpr() const {
- return ConstantArrayTypeBits.HasStoredSizeExpr
- ? *getTrailingObjects<const Expr *>()
- : nullptr;
+ return ConstantArrayTypeBits.HasExternalSize ? SizePtr->SizeExpr : nullptr;
}
+
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -3383,14 +3444,13 @@ public:
static unsigned getMaxSizeBits(const ASTContext &Context);
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
- Profile(ID, Ctx, getElementType(), getSize(), getSizeExpr(),
+ Profile(ID, Ctx, getElementType(), getZExtSize(), getSizeExpr(),
getSizeModifier(), getIndexTypeCVRQualifiers());
}
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
- QualType ET, const llvm::APInt &ArraySize,
- const Expr *SizeExpr, ArraySizeModifier SizeMod,
- unsigned TypeQuals);
+ QualType ET, uint64_t ArraySize, const Expr *SizeExpr,
+ ArraySizeModifier SizeMod, unsigned TypeQuals);
static bool classof(const Type *T) {
return T->getTypeClass() == ConstantArray;
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index 2330697299fd..c30bccd06674 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -744,6 +744,35 @@ RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
std::vector<const FieldDecl *>
getFieldsForInitListExpr(const InitListExpr *InitList);
+/// Helper class for initialization of a record with an `InitListExpr`.
+/// `InitListExpr::inits()` contains the initializers for both the base classes
+/// and the fields of the record; this helper class separates these out into two
+/// different lists. In addition, it deals with special cases associated with
+/// unions.
+class RecordInitListHelper {
+public:
+ // `InitList` must have record type.
+ RecordInitListHelper(const InitListExpr *InitList);
+
+ // Base classes with their associated initializer expressions.
+ ArrayRef<std::pair<const CXXBaseSpecifier *, Expr *>> base_inits() const {
+ return BaseInits;
+ }
+
+ // Fields with their associated initializer expressions.
+ ArrayRef<std::pair<const FieldDecl *, Expr *>> field_inits() const {
+ return FieldInits;
+ }
+
+private:
+ SmallVector<std::pair<const CXXBaseSpecifier *, Expr *>> BaseInits;
+ SmallVector<std::pair<const FieldDecl *, Expr *>> FieldInits;
+
+ // We potentially synthesize an `ImplicitValueInitExpr` for unions. It's a
+ // member variable because we store a pointer to it in `FieldInits`.
+ std::optional<ImplicitValueInitExpr> ImplicitValueInitForUnion;
+};
+
/// Associates a new `RecordValue` with `Loc` and returns the new value.
RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env);
diff --git a/clang/include/clang/Analysis/PathDiagnostic.h b/clang/include/clang/Analysis/PathDiagnostic.h
index 90559e7efb06..5907df022e44 100644
--- a/clang/include/clang/Analysis/PathDiagnostic.h
+++ b/clang/include/clang/Analysis/PathDiagnostic.h
@@ -780,6 +780,9 @@ class PathDiagnostic : public llvm::FoldingSetNode {
PathDiagnosticLocation UniqueingLoc;
const Decl *UniqueingDecl;
+ /// The top-level entry point from which this issue was discovered.
+ const Decl *AnalysisEntryPoint = nullptr;
+
/// Lines executed in the path.
std::unique_ptr<FilesToLineNumsMap> ExecutedLines;
@@ -788,7 +791,7 @@ public:
PathDiagnostic(StringRef CheckerName, const Decl *DeclWithIssue,
StringRef bugtype, StringRef verboseDesc, StringRef shortDesc,
StringRef category, PathDiagnosticLocation LocationToUnique,
- const Decl *DeclToUnique,
+ const Decl *DeclToUnique, const Decl *AnalysisEntryPoint,
std::unique_ptr<FilesToLineNumsMap> ExecutedLines);
~PathDiagnostic();
@@ -852,6 +855,9 @@ public:
return *ExecutedLines;
}
+ /// Get the top-level entry point from which this issue was discovered.
+ const Decl *getAnalysisEntryPoint() const { return AnalysisEntryPoint; }
+
/// Return the semantic context where an issue occurred. If the
/// issue occurs along a path, this represents the "central" area
/// where the bug manifests.
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 3e03e5561264..80e607525a0a 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -3011,6 +3011,13 @@ def PreserveNone : DeclOrTypeAttr, TargetSpecificAttr<TargetAnyX86> {
let Documentation = [PreserveNoneDocs];
}
+def RISCVVectorCC: DeclOrTypeAttr, TargetSpecificAttr<TargetRISCV> {
+ let Spellings = [CXX11<"riscv", "vector_cc">,
+ C23<"riscv", "vector_cc">,
+ Clang<"riscv_vector_cc">];
+ let Documentation = [RISCVVectorCCDocs];
+}
+
def Target : InheritableAttr {
let Spellings = [GCC<"target">];
let Args = [StringArgument<"featuresStr">];
@@ -3088,6 +3095,20 @@ def TargetClones : InheritableAttr {
StringRef getFeatureStr(unsigned Index) const {
return *(featuresStrs_begin() + Index);
}
+ bool isDefaultVersion(unsigned Index) const {
+ return getFeatureStr(Index) == "default";
+ }
+ void getFeatures(llvm::SmallVectorImpl<StringRef> &Out,
+ unsigned Index) const {
+ if (isDefaultVersion(Index)) return;
+ StringRef Features = getFeatureStr(Index);
+ SmallVector<StringRef, 8> AttrFeatures;
+ Features.split(AttrFeatures, "+");
+ for (auto &Feature : AttrFeatures) {
+ Feature = Feature.trim();
+ Out.push_back(Feature);
+ }
+ }
// Given an index into the 'featuresStrs' sequence, compute a unique
// ID to be used with function name mangling for the associated variant.
// This mapping is necessary due to a requirement that the mangling ID
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 9de14f608fd1..3ea4d676b4f8 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -5494,6 +5494,17 @@ for clang builtin functions.
}];
}
+def RISCVVectorCCDocs : Documentation {
+ let Category = DocCatCallingConvs;
+ let Heading = "riscv::vector_cc, riscv_vector_cc, clang::riscv_vector_cc";
+ let Content = [{
+The ``riscv_vector_cc`` attribute can be applied to a function. It preserves 15
+registers namely, v1-v7 and v24-v31 as callee-saved. Callers thus don't need
+to save these registers before function calls, and callees only need to save
+them if they use them.
+ }];
+}
+
def PreferredNameDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
@@ -6069,6 +6080,9 @@ def AlwaysDestroyDocs : Documentation {
The ``always_destroy`` attribute specifies that a variable with static or thread
storage duration should have its exit-time destructor run. This attribute is the
default unless clang was invoked with -fno-c++-static-destructors.
+
+If a variable is explicitly declared with this attribute, Clang will silence
+otherwise applicable ``-Wexit-time-destructors`` warnings.
}];
}
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 491c9d895413..f421223ff087 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -676,7 +676,11 @@ def Clz : Builtin, BitShort_Int_Long_LongLongTemplate {
let Prototype = "int(unsigned T)";
}
-// FIXME: Add int clzimax(uintmax_t)
+def Clzg : Builtin {
+ let Spellings = ["__builtin_clzg"];
+ let Attributes = [NoThrow, Const, Constexpr, CustomTypeChecking];
+ let Prototype = "int(...)";
+}
def Ctz : Builtin, BitShort_Int_Long_LongLongTemplate {
let Spellings = ["__builtin_ctz"];
@@ -684,7 +688,11 @@ def Ctz : Builtin, BitShort_Int_Long_LongLongTemplate {
let Prototype = "int(unsigned T)";
}
-// FIXME: Add int ctzimax(uintmax_t)
+def Ctzg : Builtin {
+ let Spellings = ["__builtin_ctzg"];
+ let Attributes = [NoThrow, Const, Constexpr, CustomTypeChecking];
+ let Prototype = "int(...)";
+}
def FFS : Builtin, BitInt_Long_LongLongTemplate {
let Spellings = ["__builtin_ffs"];
@@ -4591,6 +4599,12 @@ def HLSLWaveActiveCountBits : LangBuiltin<"HLSL_LANG"> {
let Prototype = "unsigned int(bool)";
}
+def HLSLWaveGetLaneIndex : LangBuiltin<"HLSL_LANG"> {
+ let Spellings = ["__builtin_hlsl_wave_get_lane_index"];
+ let Attributes = [NoThrow, Const];
+ let Prototype = "unsigned int()";
+}
+
def HLSLClamp : LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_elementwise_clamp"];
let Attributes = [NoThrow, Const];
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 61ec8b79bf05..c660582cc98e 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -432,13 +432,10 @@ TARGET_BUILTIN(__builtin_amdgcn_s_wakeup_barrier, "vi", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_barrier_leave, "b", "n", "gfx12-insts")
TARGET_BUILTIN(__builtin_amdgcn_s_get_barrier_state, "Uii", "n", "gfx12-insts")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v2i32, "V2iV2i*1", "nc", "gfx12-insts,wavefrontsize32")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v8i16, "V8sV8s*1", "nc", "gfx12-insts,wavefrontsize32")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v8f16, "V8hV8h*1", "nc", "gfx12-insts,wavefrontsize32")
-
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_i32, "ii*1", "nc", "gfx12-insts,wavefrontsize64")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v4i16, "V4sV4s*1", "nc", "gfx12-insts,wavefrontsize64")
-TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v4f16, "V4hV4h*1", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_b64_v2i32, "V2iV2i*1", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_b128_v8i16, "V8sV8s*1", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_b64_i32, "ii*1", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_b128_v4i16, "V4sV4s*1", "nc", "gfx12-insts,wavefrontsize64")
//===----------------------------------------------------------------------===//
// WMMA builtins.
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index e33a1f4c45b9..592ed3bda515 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -753,7 +753,8 @@ def err_drv_hlsl_unsupported_target : Error<
"HLSL code generation is unsupported for target '%0'">;
def err_drv_hlsl_bad_shader_required_in_target : Error<
"%select{shader model|Vulkan environment|shader stage}0 is required as %select{OS|environment}1 in target '%2' for HLSL code generation">;
-
+def err_drv_hlsl_16bit_types_unsupported: Error<
+ "'%0' option requires target HLSL Version >= 2018%select{| and shader model >= 6.2}1, but HLSL Version is '%2'%select{| and shader model is '%3'}1">;
def err_drv_hlsl_bad_shader_unsupported : Error<
"%select{shader model|Vulkan environment|shader stage}0 '%1' in target '%2' is invalid for HLSL code generation">;
def warn_drv_dxc_missing_dxv : Warning<"dxv not found. "
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index bf03d4e8f67e..520168f01fd8 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -573,7 +573,10 @@ def SelTypeCast : DiagGroup<"cast-of-sel-type">;
def FunctionDefInObjCContainer : DiagGroup<"function-def-in-objc-container">;
def BadFunctionCast : DiagGroup<"bad-function-cast">;
def CastFunctionTypeStrict : DiagGroup<"cast-function-type-strict">;
-def CastFunctionType : DiagGroup<"cast-function-type", [CastFunctionTypeStrict]>;
+def CastFunctionTypeMismatch : DiagGroup<"cast-function-type-mismatch">;
+def CastFunctionType : DiagGroup<"cast-function-type",
+ [CastFunctionTypeStrict,
+ CastFunctionTypeMismatch]>;
def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">;
def ObjCPropertyAssignOnObjectType : DiagGroup<"objc-property-assign-on-object-type">;
@@ -982,6 +985,7 @@ def FormatSecurity : DiagGroup<"format-security">;
def FormatNonStandard : DiagGroup<"format-non-iso">;
def FormatY2K : DiagGroup<"format-y2k">;
def FormatPedantic : DiagGroup<"format-pedantic">;
+def FormatSignedness : DiagGroup<"format-signedness">;
def FormatTypeConfusion : DiagGroup<"format-type-confusion">;
def FormatOverflowNonKprintf: DiagGroup<"format-overflow-non-kprintf">;
@@ -1038,7 +1042,7 @@ def Extra : DiagGroup<"extra", [
EmptyInitStatement,
StringConcatation,
FUseLdPath,
- CastFunctionType,
+ CastFunctionTypeMismatch,
]>;
def Most : DiagGroup<"most", [
diff --git a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
index f99a5fca64cb..e3263fe9ccb9 100644
--- a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
+++ b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
@@ -15,6 +15,10 @@ let CategoryName = "Command line" in {
def err_cannot_write_file : Error<"cannot write file '%0': %1">;
def err_no_install_name : Error<"no install name specified: add -install_name <path>">;
def err_no_output_file: Error<"no output file specified">;
+def err_no_such_header_file : Error<"no such %select{public|private|project}1 header file: '%0'">;
+def warn_no_such_excluded_header_file : Warning<"no such excluded %select{public|private}0 header file: '%1'">, InGroup<InstallAPIViolation>;
+def warn_glob_did_not_match: Warning<"glob '%0' did not match any header file">, InGroup<InstallAPIViolation>;
+def err_no_such_umbrella_header_file : Error<"%select{public|private|project}1 umbrella header file not found in input: '%0'">;
} // end of command line category.
let CategoryName = "Verification" in {
@@ -26,6 +30,7 @@ def warn_library_hidden_symbol : Warning<"declaration has external linkage, but
def warn_header_hidden_symbol : Warning<"symbol exported in dynamic library, but marked hidden in declaration '%0'">, InGroup<InstallAPIViolation>;
def err_header_hidden_symbol : Error<"symbol exported in dynamic library, but marked hidden in declaration '%0'">;
def err_header_symbol_missing : Error<"no declaration found for exported symbol '%0' in dynamic library">;
+def warn_header_symbol_missing : Warning<"no declaration was found for exported symbol '%0' in dynamic library">, InGroup<InstallAPIViolation>;
def warn_header_availability_mismatch : Warning<"declaration '%0' is marked %select{available|unavailable}1,"
" but symbol is %select{not |}2exported in dynamic library">, InGroup<InstallAPIViolation>;
def err_header_availability_mismatch : Error<"declaration '%0' is marked %select{available|unavailable}1,"
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 816c3ff5f8b2..46a44418a315 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -268,6 +268,8 @@ def err_expected_semi_after_namespace_name : Error<
"expected ';' after namespace name">;
def err_unexpected_namespace_attributes_alias : Error<
"attributes cannot be specified on namespace alias">;
+def err_unexpected_qualified_namespace_alias : Error<
+ "namespace alias must be a single identifier">;
def err_unexpected_nested_namespace_attribute : Error<
"attributes cannot be specified on a nested namespace definition">;
def err_inline_namespace_alias : Error<"namespace alias cannot be inline">;
@@ -1029,6 +1031,7 @@ def err_expected_lambda_body : Error<"expected body of lambda expression">;
def warn_cxx98_compat_lambda : Warning<
"lambda expressions are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def ext_lambda : ExtWarn<"lambdas are a C++11 extension">, InGroup<CXX11>;
def err_lambda_decl_specifier_repeated : Error<
"%select{'mutable'|'static'|'constexpr'|'consteval'}0 cannot "
"appear multiple times in a lambda declarator">;
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 2646942a53e3..df57f5e6ce11 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -1748,8 +1748,8 @@ def err_type_defined_in_condition : Error<
def err_type_defined_in_enum : Error<
"%0 cannot be defined in an enumeration">;
def ext_type_defined_in_offsetof : Extension<
- "defining a type within '%select{__builtin_offsetof|offsetof}0' is a Clang "
- "extension">, InGroup<GNUOffsetofExtensions>;
+ "defining a type within '%select{__builtin_offsetof|offsetof}0' is a C23 "
+ "extension">, InGroup<C23>;
def note_pure_virtual_function : Note<
"unimplemented pure virtual method %0 in %1">;
@@ -6464,9 +6464,6 @@ def ext_c99_flexible_array_member : Extension<
def err_flexible_array_virtual_base : Error<
"flexible array member %0 not allowed in "
"%select{struct|interface|union|class|enum}1 which has a virtual base class">;
-def err_flexible_array_empty_aggregate : Error<
- "flexible array member %0 not allowed in otherwise empty "
- "%select{struct|interface|union|class|enum}1">;
def err_flexible_array_has_nontrivial_dtor : Error<
"flexible array member %0 of type %1 with non-trivial destruction">;
def ext_flexible_array_in_struct : Extension<
@@ -6481,8 +6478,6 @@ def ext_flexible_array_empty_aggregate_ms : Extension<
"flexible array member %0 in otherwise empty "
"%select{struct|interface|union|class|enum}1 is a Microsoft extension">,
InGroup<MicrosoftFlexibleArray>;
-def err_flexible_array_union : Error<
- "flexible array member %0 in a union is not allowed">;
def ext_flexible_array_union_ms : Extension<
"flexible array member %0 in a union is a Microsoft extension">,
InGroup<MicrosoftFlexibleArray>;
@@ -9058,7 +9053,7 @@ def warn_bad_function_cast : Warning<
InGroup<BadFunctionCast>, DefaultIgnore;
def warn_cast_function_type : Warning<
"cast %diff{from $ to $ |}0,1converts to incompatible function type">,
- InGroup<CastFunctionType>, DefaultIgnore;
+ InGroup<CastFunctionTypeMismatch>, DefaultIgnore;
def warn_cast_function_type_strict : Warning<warn_cast_function_type.Summary>,
InGroup<CastFunctionTypeStrict>, DefaultIgnore;
def err_cast_pointer_to_non_pointer_int : Error<
@@ -9826,6 +9821,9 @@ def warn_format_conversion_argument_type_mismatch : Warning<
def warn_format_conversion_argument_type_mismatch_pedantic : Extension<
warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatPedantic>;
+def warn_format_conversion_argument_type_mismatch_signedness : Warning<
+ warn_format_conversion_argument_type_mismatch.Summary>,
+ InGroup<FormatSignedness>, DefaultIgnore;
def warn_format_conversion_argument_type_mismatch_confusion : Warning<
warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatTypeConfusion>, DefaultIgnore;
@@ -12020,13 +12018,14 @@ def err_builtin_launder_invalid_arg : Error<
"'__builtin_launder' is not allowed">;
def err_builtin_invalid_arg_type: Error <
- "%ordinal0 argument must be a "
- "%select{vector, integer or floating point type|matrix|"
- "pointer to a valid matrix element type|"
- "signed integer or floating point type|vector type|"
- "floating point type|"
- "vector of integers|"
- "type of unsigned integer}1 (was %2)">;
+ "%ordinal0 argument must be "
+ "%select{a vector, integer or floating point type|a matrix|"
+ "a pointer to a valid matrix element type|"
+ "a signed integer or floating point type|a vector type|"
+ "a floating point type|"
+ "a vector of integers|"
+ "an unsigned integer|"
+ "an 'int'}1 (was %2)">;
def err_builtin_matrix_disabled: Error<
"matrix types extension is disabled. Pass -fenable-matrix to enable it">;
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index 726ead4b5ab5..b41aadc73f20 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -261,6 +261,7 @@ EXTENSION(cxx_defaulted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_deleted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_explicit_conversions, LangOpts.CPlusPlus)
EXTENSION(cxx_inline_namespaces, LangOpts.CPlusPlus)
+EXTENSION(cxx_lambdas, LangOpts.CPlusPlus)
EXTENSION(cxx_local_type_template_args, LangOpts.CPlusPlus)
EXTENSION(cxx_nonstatic_member_init, LangOpts.CPlusPlus)
EXTENSION(cxx_override_control, LangOpts.CPlusPlus)
diff --git a/clang/include/clang/Basic/LangStandard.h b/clang/include/clang/Basic/LangStandard.h
index 199e24c67316..8e25afc83366 100644
--- a/clang/include/clang/Basic/LangStandard.h
+++ b/clang/include/clang/Basic/LangStandard.h
@@ -26,8 +26,9 @@ enum class Language : uint8_t {
/// Assembly: we accept this only so that we can preprocess it.
Asm,
- /// LLVM IR: we accept this so that we can run the optimizer on it,
- /// and compile it to assembly or object code.
+ /// LLVM IR & CIR: we accept these so that we can run the optimizer on them,
+ /// and compile them to assembly or object code (or LLVM for CIR).
+ CIR,
LLVM_IR,
///@{ Languages that the frontend can parse and compile.
diff --git a/clang/include/clang/Basic/Specifiers.h b/clang/include/clang/Basic/Specifiers.h
index 8586405825cf..fb11e8212f8b 100644
--- a/clang/include/clang/Basic/Specifiers.h
+++ b/clang/include/clang/Basic/Specifiers.h
@@ -273,29 +273,30 @@ namespace clang {
/// CallingConv - Specifies the calling convention that a function uses.
enum CallingConv {
- CC_C, // __attribute__((cdecl))
- CC_X86StdCall, // __attribute__((stdcall))
- CC_X86FastCall, // __attribute__((fastcall))
- CC_X86ThisCall, // __attribute__((thiscall))
- CC_X86VectorCall, // __attribute__((vectorcall))
- CC_X86Pascal, // __attribute__((pascal))
- CC_Win64, // __attribute__((ms_abi))
- CC_X86_64SysV, // __attribute__((sysv_abi))
- CC_X86RegCall, // __attribute__((regcall))
- CC_AAPCS, // __attribute__((pcs("aapcs")))
- CC_AAPCS_VFP, // __attribute__((pcs("aapcs-vfp")))
- CC_IntelOclBicc, // __attribute__((intel_ocl_bicc))
- CC_SpirFunction, // default for OpenCL functions on SPIR target
- CC_OpenCLKernel, // inferred for OpenCL kernels
- CC_Swift, // __attribute__((swiftcall))
+ CC_C, // __attribute__((cdecl))
+ CC_X86StdCall, // __attribute__((stdcall))
+ CC_X86FastCall, // __attribute__((fastcall))
+ CC_X86ThisCall, // __attribute__((thiscall))
+ CC_X86VectorCall, // __attribute__((vectorcall))
+ CC_X86Pascal, // __attribute__((pascal))
+ CC_Win64, // __attribute__((ms_abi))
+ CC_X86_64SysV, // __attribute__((sysv_abi))
+ CC_X86RegCall, // __attribute__((regcall))
+ CC_AAPCS, // __attribute__((pcs("aapcs")))
+ CC_AAPCS_VFP, // __attribute__((pcs("aapcs-vfp")))
+ CC_IntelOclBicc, // __attribute__((intel_ocl_bicc))
+ CC_SpirFunction, // default for OpenCL functions on SPIR target
+ CC_OpenCLKernel, // inferred for OpenCL kernels
+ CC_Swift, // __attribute__((swiftcall))
CC_SwiftAsync, // __attribute__((swiftasynccall))
- CC_PreserveMost, // __attribute__((preserve_most))
- CC_PreserveAll, // __attribute__((preserve_all))
+ CC_PreserveMost, // __attribute__((preserve_most))
+ CC_PreserveAll, // __attribute__((preserve_all))
CC_AArch64VectorCall, // __attribute__((aarch64_vector_pcs))
- CC_AArch64SVEPCS, // __attribute__((aarch64_sve_pcs))
- CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
- CC_M68kRTD, // __attribute__((m68k_rtd))
- CC_PreserveNone, // __attribute__((preserve_none))
+ CC_AArch64SVEPCS, // __attribute__((aarch64_sve_pcs))
+ CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
+ CC_M68kRTD, // __attribute__((m68k_rtd))
+ CC_PreserveNone, // __attribute__((preserve_none))
+ CC_RISCVVectorCall, // __attribute__((riscv_vector_cc))
};
/// Checks whether the given calling convention supports variadic
diff --git a/clang/include/clang/Basic/SyncScope.h b/clang/include/clang/Basic/SyncScope.h
index bc7ec7b5cf77..45beff41afa1 100644
--- a/clang/include/clang/Basic/SyncScope.h
+++ b/clang/include/clang/Basic/SyncScope.h
@@ -252,8 +252,7 @@ public:
}
bool isValid(unsigned S) const override {
- return S >= static_cast<unsigned>(System) &&
- S <= static_cast<unsigned>(Last);
+ return S <= static_cast<unsigned>(Last);
}
ArrayRef<unsigned> getRuntimeValues() const override {
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 374595edd2ce..e1ef7454f016 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -267,6 +267,9 @@ protected:
LLVM_PREFERRED_TYPE(bool)
unsigned AllowAMDGPUUnsafeFPAtomics : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasUnalignedAccess : 1;
+
unsigned ARMCDECoprocMask : 8;
unsigned MaxOpenCLWorkGroupSize;
@@ -859,6 +862,18 @@ public:
return PointerWidth;
}
+ /// Return true iff unaligned accesses are a single instruction (rather than
+ /// a synthesized sequence).
+ bool hasUnalignedAccess() const { return HasUnalignedAccess; }
+
+ /// Return true iff unaligned accesses are cheap. This affects placement and
+ /// size of bitfield loads/stores. (Not the ABI-mandated placement of
+ /// the bitfields themselves.)
+ bool hasCheapUnalignedBitFieldAccess() const {
+ // Simply forward to the unaligned access getter.
+ return hasUnalignedAccess();
+ }
+
/// \brief Returns the default value of the __USER_LABEL_PREFIX__ macro,
/// which is the prefix given to user symbols by default.
///
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 4a954258ce40..04eb87f0d5d1 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -3635,6 +3635,9 @@ defm preserve_as_comments : BoolFOption<"preserve-as-comments",
"Do not preserve comments in inline assembly">,
PosFlag<SetTrue>>;
def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
+def reexport_framework : Separate<["-"], "reexport_framework">, Flags<[LinkerInput]>;
+def reexport_l : Joined<["-"], "reexport-l">, Flags<[LinkerInput]>;
+def reexport_library : JoinedOrSeparate<["-"], "reexport_library">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>,
Visibility<[ClangOption, CC1Option]>,
@@ -4105,12 +4108,8 @@ defm strict_return : BoolFOption<"strict-return",
" of a non-void function as unreachable">,
PosFlag<SetTrue>>;
-let Group = f_Group in {
- let Visibility = [ClangOption,CC1Option] in {
- def fptrauth_intrinsics : Flag<["-"], "fptrauth-intrinsics">,
- HelpText<"Enable pointer authentication intrinsics">;
- }
- def fno_ptrauth_intrinsics : Flag<["-"], "fno-ptrauth-intrinsics">;
+let Flags = [TargetSpecific] in {
+defm ptrauth_intrinsics : OptInCC1FFlag<"ptrauth-intrinsics", "Enable pointer authentication intrinsics">;
}
def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
@@ -4511,6 +4510,9 @@ def mwindows : Joined<["-"], "mwindows">, Group<m_Group>;
def mdll : Joined<["-"], "mdll">, Group<m_Group>;
def municode : Joined<["-"], "municode">, Group<m_Group>;
def mthreads : Joined<["-"], "mthreads">, Group<m_Group>;
+def marm64x : Joined<["-"], "marm64x">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>,
+ HelpText<"Link as a hybrid ARM64X image">;
def mguard_EQ : Joined<["-"], "mguard=">, Group<m_Group>,
HelpText<"Enable or disable Control Flow Guard checks and guard tables emission">,
Values<"none,cf,cf-nochecks">;
@@ -6688,6 +6690,9 @@ def analyzer_opt_analyze_headers : Flag<["-"], "analyzer-opt-analyze-headers">,
def analyzer_display_progress : Flag<["-"], "analyzer-display-progress">,
HelpText<"Emit verbose output about the analyzer's progress">,
MarshallingInfoFlag<AnalyzerOpts<"AnalyzerDisplayProgress">>;
+def analyzer_note_analysis_entry_points : Flag<["-"], "analyzer-note-analysis-entry-points">,
+ HelpText<"Add a note for each bug report to denote their analysis entry points">,
+ MarshallingInfoFlag<AnalyzerOpts<"AnalyzerNoteAnalysisEntryPoints">>;
def analyze_function : Separate<["-"], "analyze-function">,
HelpText<"Run analysis on specific function (for C++ include parameters in name)">,
MarshallingInfoString<AnalyzerOpts<"AnalyzeSpecificFunction">>;
diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def
index f72c27e1ee70..0e0cae5fb706 100644
--- a/clang/include/clang/Driver/Types.def
+++ b/clang/include/clang/Driver/Types.def
@@ -90,6 +90,7 @@ TYPE("ir", LLVM_BC, INVALID, "bc", phases
TYPE("lto-ir", LTO_IR, INVALID, "s", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("lto-bc", LTO_BC, INVALID, "o", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("cir", CIR, INVALID, "cir", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
// Misc.
TYPE("ast", AST, INVALID, "ast", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge)
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 7ad2579bf777..0720c8283cd7 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -414,6 +414,21 @@ struct FormatStyle {
/// \version 17
ShortCaseStatementsAlignmentStyle AlignConsecutiveShortCaseStatements;
+ /// Style of aligning consecutive TableGen DAGArg operator colons.
+ /// If enabled, align the colon inside DAGArg which have line break inside.
+ /// This works only when TableGenBreakInsideDAGArg is BreakElements or
+ /// BreakAll and the DAGArg is not excepted by
+ /// TableGenBreakingDAGArgOperators's effect.
+ /// \code
+ /// let dagarg = (ins
+ /// a :$src1,
+ /// aa :$src2,
+ /// aaa:$src3
+ /// )
+ /// \endcode
+ /// \version 19
+ AlignConsecutiveStyle AlignConsecutiveTableGenBreakingDAGArgColons;
+
/// Style of aligning consecutive TableGen cond operator colons.
/// Align the colons of cases inside !cond operators.
/// \code
@@ -4879,6 +4894,8 @@ struct FormatStyle {
AlignConsecutiveMacros == R.AlignConsecutiveMacros &&
AlignConsecutiveShortCaseStatements ==
R.AlignConsecutiveShortCaseStatements &&
+ AlignConsecutiveTableGenBreakingDAGArgColons ==
+ R.AlignConsecutiveTableGenBreakingDAGArgColons &&
AlignConsecutiveTableGenCondOperatorColons ==
R.AlignConsecutiveTableGenCondOperatorColons &&
AlignConsecutiveTableGenDefinitionColons ==
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index cce91862ae3d..3464654284f1 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -133,6 +133,24 @@ class CompilerInstance : public ModuleLoader {
std::vector<std::shared_ptr<DependencyCollector>> DependencyCollectors;
+ /// Records the set of modules
+ class FailedModulesSet {
+ llvm::StringSet<> Failed;
+
+ public:
+ bool hasAlreadyFailed(StringRef module) { return Failed.count(module) > 0; }
+
+ void addFailed(StringRef module) { Failed.insert(module); }
+ };
+
+ /// The set of modules that failed to build.
+ ///
+ /// This pointer will be shared among all of the compiler instances created
+ /// to (re)build modules, so that once a module fails to build anywhere,
+ /// other instances will see that the module has failed and won't try to
+ /// build it again.
+ std::shared_ptr<FailedModulesSet> FailedModules;
+
/// The set of top-level modules that has already been built on the
/// fly as part of this overall compilation action.
std::map<std::string, std::string, std::less<>> BuiltModules;
@@ -619,6 +637,24 @@ public:
}
/// @}
+ /// @name Failed modules set
+ /// @{
+
+ bool hasFailedModulesSet() const { return (bool)FailedModules; }
+
+ void createFailedModulesSet() {
+ FailedModules = std::make_shared<FailedModulesSet>();
+ }
+
+ std::shared_ptr<FailedModulesSet> getFailedModulesSetPtr() const {
+ return FailedModules;
+ }
+
+ void setFailedModulesSet(std::shared_ptr<FailedModulesSet> FMS) {
+ FailedModules = FMS;
+ }
+
+ /// }
/// @name Output Files
/// @{
diff --git a/clang/include/clang/Frontend/FrontendActions.h b/clang/include/clang/Frontend/FrontendActions.h
index a620ddfc4044..0518a8823a03 100644
--- a/clang/include/clang/Frontend/FrontendActions.h
+++ b/clang/include/clang/Frontend/FrontendActions.h
@@ -34,12 +34,18 @@ public:
/// Preprocessor-based frontend action that also loads PCH files.
class ReadPCHAndPreprocessAction : public FrontendAction {
+ llvm::unique_function<void(CompilerInstance &)> AdjustCI;
+
void ExecuteAction() override;
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
public:
+ ReadPCHAndPreprocessAction(
+ llvm::unique_function<void(CompilerInstance &)> AdjustCI)
+ : AdjustCI(std::move(AdjustCI)) {}
+
bool usesPreprocessorOnly() const override { return false; }
};
@@ -321,11 +327,15 @@ protected:
class GetDependenciesByModuleNameAction : public PreprocessOnlyAction {
StringRef ModuleName;
+ llvm::unique_function<void(CompilerInstance &)> AdjustCI;
+
void ExecuteAction() override;
public:
- GetDependenciesByModuleNameAction(StringRef ModuleName)
- : ModuleName(ModuleName) {}
+ GetDependenciesByModuleNameAction(
+ StringRef ModuleName,
+ llvm::unique_function<void(CompilerInstance &)> AdjustCI)
+ : ModuleName(ModuleName), AdjustCI(std::move(AdjustCI)) {}
};
} // end namespace clang
diff --git a/clang/include/clang/InstallAPI/DylibVerifier.h b/clang/include/clang/InstallAPI/DylibVerifier.h
index bbfa8711313e..22cdc234486c 100644
--- a/clang/include/clang/InstallAPI/DylibVerifier.h
+++ b/clang/include/clang/InstallAPI/DylibVerifier.h
@@ -28,9 +28,10 @@ enum class VerificationMode {
/// lifetime of InstallAPI.
/// As declarations are collected during AST traversal, they are
/// compared as symbols against what is available in the binary dylib.
-class DylibVerifier {
+class DylibVerifier : llvm::MachO::RecordVisitor {
private:
struct SymbolContext;
+ struct DWARFContext;
public:
enum class Result { NoVerify, Ignore, Valid, Invalid };
@@ -54,7 +55,7 @@ public:
DiagnosticsEngine *Diag = nullptr;
// Handle diagnostics reporting for target level violations.
- void emitDiag(llvm::function_ref<void()> Report);
+ void emitDiag(llvm::function_ref<void()> Report, RecordLoc *Loc = nullptr);
VerifierContext() = default;
VerifierContext(DiagnosticsEngine *Diag) : Diag(Diag) {}
@@ -63,15 +64,19 @@ public:
DylibVerifier() = default;
DylibVerifier(llvm::MachO::Records &&Dylib, DiagnosticsEngine *Diag,
- VerificationMode Mode, bool Demangle)
+ VerificationMode Mode, bool Demangle, StringRef DSYMPath)
: Dylib(std::move(Dylib)), Mode(Mode), Demangle(Demangle),
- Exports(std::make_unique<SymbolSet>()), Ctx(VerifierContext{Diag}) {}
+ DSYMPath(DSYMPath), Exports(std::make_unique<SymbolSet>()),
+ Ctx(VerifierContext{Diag}) {}
Result verify(GlobalRecord *R, const FrontendAttrs *FA);
Result verify(ObjCInterfaceRecord *R, const FrontendAttrs *FA);
Result verify(ObjCIVarRecord *R, const FrontendAttrs *FA,
const StringRef SuperClass);
+ // Scan through dylib slices and report any remaining missing exports.
+ Result verifyRemainingSymbols();
+
/// Initialize target for verification.
void setTarget(const Target &T);
@@ -128,10 +133,24 @@ private:
/// Find matching dylib slice for target triple that is being parsed.
void assignSlice(const Target &T);
+ /// Shared implementation for verifying exported symbols in dylib.
+ void visitSymbolInDylib(const Record &R, SymbolContext &SymCtx);
+
+ void visitGlobal(const GlobalRecord &R) override;
+ void visitObjCInterface(const ObjCInterfaceRecord &R) override;
+ void visitObjCCategory(const ObjCCategoryRecord &R) override;
+ void visitObjCIVar(const ObjCIVarRecord &R, const StringRef Super);
+
/// Gather annotations for symbol for error reporting.
std::string getAnnotatedName(const Record *R, SymbolContext &SymCtx,
bool ValidSourceLoc = true);
+ /// Extract source location for symbol implementations.
+ /// As this is a relatively expensive operation, it is only used
+ /// when there is a violation to report and there is not a known declaration
+ /// in the interface.
+ void accumulateSrcLocForDylibSymbols();
+
// Symbols in dylib.
llvm::MachO::Records Dylib;
@@ -141,11 +160,17 @@ private:
// Attempt to demangle when reporting violations.
bool Demangle = false;
+ // File path to DSYM file.
+ StringRef DSYMPath;
+
// Valid symbols in final text file.
std::unique_ptr<SymbolSet> Exports = std::make_unique<SymbolSet>();
// Track current state of verification while traversing AST.
VerifierContext Ctx;
+
+ // Track DWARF provided source location for dylibs.
+ DWARFContext *DWARFCtx = nullptr;
};
} // namespace installapi
diff --git a/clang/include/clang/InstallAPI/HeaderFile.h b/clang/include/clang/InstallAPI/HeaderFile.h
index 70e83bbb3e76..c67503d4ad49 100644
--- a/clang/include/clang/InstallAPI/HeaderFile.h
+++ b/clang/include/clang/InstallAPI/HeaderFile.h
@@ -13,7 +13,9 @@
#ifndef LLVM_CLANG_INSTALLAPI_HEADERFILE_H
#define LLVM_CLANG_INSTALLAPI_HEADERFILE_H
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/InstallAPI/MachO.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
@@ -22,8 +24,6 @@
namespace clang::installapi {
enum class HeaderType {
- /// Unset or unknown type.
- Unknown,
/// Represents declarations accessible to all clients.
Public,
/// Represents declarations accessible to a disclosed set of clients.
@@ -31,6 +31,8 @@ enum class HeaderType {
/// Represents declarations only accessible as implementation details to the
/// input library.
Project,
+ /// Unset or unknown type.
+ Unknown,
};
inline StringRef getName(const HeaderType T) {
@@ -56,6 +58,12 @@ class HeaderFile {
std::string IncludeName;
/// Supported language mode for header.
std::optional<clang::Language> Language;
+ /// Exclude header file from processing.
+ bool Excluded{false};
+ /// Add header file to processing.
+ bool Extra{false};
+ /// Specify that header file is the umbrella header for library.
+ bool Umbrella{false};
public:
HeaderFile() = delete;
@@ -71,17 +79,52 @@ public:
StringRef getIncludeName() const { return IncludeName; }
StringRef getPath() const { return FullPath; }
+ void setExtra(bool V = true) { Extra = V; }
+ void setExcluded(bool V = true) { Excluded = V; }
+ void setUmbrellaHeader(bool V = true) { Umbrella = V; }
+ bool isExtra() const { return Extra; }
+ bool isExcluded() const { return Excluded; }
+ bool isUmbrellaHeader() const { return Umbrella; }
+
bool useIncludeName() const {
return Type != HeaderType::Project && !IncludeName.empty();
}
bool operator==(const HeaderFile &Other) const {
- return std::tie(Type, FullPath, IncludeName, Language) ==
- std::tie(Other.Type, Other.FullPath, Other.IncludeName,
- Other.Language);
+ return std::tie(Type, FullPath, IncludeName, Language, Excluded, Extra,
+ Umbrella) == std::tie(Other.Type, Other.FullPath,
+ Other.IncludeName, Other.Language,
+ Other.Excluded, Other.Extra,
+ Other.Umbrella);
}
};
+/// Glob that represents a pattern of header files to retreive.
+class HeaderGlob {
+private:
+ std::string GlobString;
+ llvm::Regex Rule;
+ HeaderType Type;
+ bool FoundMatch{false};
+
+public:
+ HeaderGlob(StringRef GlobString, llvm::Regex &&, HeaderType Type);
+
+ /// Create a header glob from string for the header access level.
+ static llvm::Expected<std::unique_ptr<HeaderGlob>>
+ create(StringRef GlobString, HeaderType Type);
+
+ /// Query if provided header matches glob.
+ bool match(const HeaderFile &Header);
+
+ /// Query if a header was matched in the glob, used primarily for error
+ /// reporting.
+ bool didMatch() { return FoundMatch; }
+
+ /// Provide back input glob string.
+ StringRef str() { return GlobString; }
+};
+
/// Assemble expected way header will be included by clients.
/// As in what maps inside the brackets of `#include <IncludeName.h>`
/// For example,
@@ -93,6 +136,19 @@ public:
std::optional<std::string> createIncludeHeaderName(const StringRef FullPath);
using HeaderSeq = std::vector<HeaderFile>;
+/// Determine if Path is a header file.
+/// It does not touch the file system.
+///
+/// \param Path File path to file.
+bool isHeaderFile(StringRef Path);
+
+/// Given input directory, collect all header files.
+///
+/// \param FM FileManager for finding input files.
+/// \param Directory Path to directory file.
+llvm::Expected<PathSeq> enumerateFiles(clang::FileManager &FM,
+ StringRef Directory);
+
} // namespace clang::installapi
#endif // LLVM_CLANG_INSTALLAPI_HEADERFILE_H
diff --git a/clang/include/clang/InstallAPI/MachO.h b/clang/include/clang/InstallAPI/MachO.h
index f0dea8bbd24c..827220dbf39f 100644
--- a/clang/include/clang/InstallAPI/MachO.h
+++ b/clang/include/clang/InstallAPI/MachO.h
@@ -34,12 +34,14 @@ using ObjCCategoryRecord = llvm::MachO::ObjCCategoryRecord;
using ObjCIVarRecord = llvm::MachO::ObjCIVarRecord;
using ObjCIFSymbolKind = llvm::MachO::ObjCIFSymbolKind;
using Records = llvm::MachO::Records;
+using RecordLoc = llvm::MachO::RecordLoc;
using RecordsSlice = llvm::MachO::RecordsSlice;
using BinaryAttrs = llvm::MachO::RecordsSlice::BinaryAttrs;
using SymbolSet = llvm::MachO::SymbolSet;
using SimpleSymbol = llvm::MachO::SimpleSymbol;
using FileType = llvm::MachO::FileType;
using PackedVersion = llvm::MachO::PackedVersion;
+using PathSeq = llvm::MachO::PathSeq;
using Target = llvm::MachO::Target;
using TargetList = llvm::MachO::TargetList;
diff --git a/clang/include/clang/Interpreter/Interpreter.h b/clang/include/clang/Interpreter/Interpreter.h
index 1dcba1ef9679..970e0245417b 100644
--- a/clang/include/clang/Interpreter/Interpreter.h
+++ b/clang/include/clang/Interpreter/Interpreter.h
@@ -30,6 +30,7 @@
namespace llvm {
namespace orc {
class LLJIT;
+class LLJITBuilder;
class ThreadSafeContext;
} // namespace orc
} // namespace llvm
@@ -127,6 +128,13 @@ protected:
// custom runtime.
virtual std::unique_ptr<RuntimeInterfaceBuilder> FindRuntimeInterface();
+ // Lazily construct thev ORCv2 JITBuilder. This called when the internal
+ // IncrementalExecutor is created. The default implementation populates an
+ // in-process JIT with debugging support. Override this to configure the JIT
+ // engine used for execution.
+ virtual llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+ CreateJITBuilder(CompilerInstance &CI);
+
public:
virtual ~Interpreter();
diff --git a/clang/include/clang/Interpreter/Value.h b/clang/include/clang/Interpreter/Value.h
index c380cd91550d..d70e8f871902 100644
--- a/clang/include/clang/Interpreter/Value.h
+++ b/clang/include/clang/Interpreter/Value.h
@@ -76,6 +76,7 @@ class QualType;
X(bool, Bool) \
X(char, Char_S) \
X(signed char, SChar) \
+ X(unsigned char, Char_U) \
X(unsigned char, UChar) \
X(short, Short) \
X(unsigned short, UShort) \
diff --git a/clang/include/clang/Lex/ModuleMap.h b/clang/include/clang/Lex/ModuleMap.h
index 867cb6eab42f..2e28ff6823cb 100644
--- a/clang/include/clang/Lex/ModuleMap.h
+++ b/clang/include/clang/Lex/ModuleMap.h
@@ -263,8 +263,8 @@ private:
Attributes Attrs;
/// If \c InferModules is non-zero, the module map file that allowed
- /// inferred modules. Otherwise, nullopt.
- OptionalFileEntryRef ModuleMapFile;
+ /// inferred modules. Otherwise, invalid.
+ FileID ModuleMapFID;
/// The names of modules that cannot be inferred within this
/// directory.
@@ -279,8 +279,7 @@ private:
/// A mapping from an inferred module to the module map that allowed the
/// inference.
- // FIXME: Consider making the values non-optional.
- llvm::DenseMap<const Module *, OptionalFileEntryRef> InferredModuleAllowedBy;
+ llvm::DenseMap<const Module *, FileID> InferredModuleAllowedBy;
llvm::DenseMap<const Module *, AdditionalModMapsSet> AdditionalModMaps;
@@ -618,8 +617,9 @@ public:
///
/// \param Module The module whose module map file will be returned, if known.
///
- /// \returns The file entry for the module map file containing the given
- /// module, or nullptr if the module definition was inferred.
+ /// \returns The FileID for the module map file containing the given module,
+ /// invalid if the module definition was inferred.
+ FileID getContainingModuleMapFileID(const Module *Module) const;
OptionalFileEntryRef getContainingModuleMapFile(const Module *Module) const;
/// Get the module map file that (along with the module name) uniquely
@@ -631,9 +631,10 @@ public:
/// of inferred modules, returns the module map that allowed the inference
/// (e.g. contained 'module *'). Otherwise, returns
/// getContainingModuleMapFile().
+ FileID getModuleMapFileIDForUniquing(const Module *M) const;
OptionalFileEntryRef getModuleMapFileForUniquing(const Module *M) const;
- void setInferredModuleAllowedBy(Module *M, OptionalFileEntryRef ModMap);
+ void setInferredModuleAllowedBy(Module *M, FileID ModMapFID);
/// Canonicalize \p Path in a manner suitable for a module map file. In
/// particular, this canonicalizes the parent directory separately from the
diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h
index 0836b7d439bb..24e146a589a7 100644
--- a/clang/include/clang/Lex/Preprocessor.h
+++ b/clang/include/clang/Lex/Preprocessor.h
@@ -736,6 +736,19 @@ private:
State ConditionalStackState = Off;
} PreambleConditionalStack;
+ /// Function for getting the dependency preprocessor directives of a file.
+ ///
+ /// These are directives derived from a special form of lexing where the
+ /// source input is scanned for the preprocessor directives that might have an
+ /// effect on the dependencies for a compilation unit.
+ ///
+ /// Enables a client to cache the directives for a file and provide them
+ /// across multiple compiler invocations.
+ /// FIXME: Allow returning an error.
+ using DependencyDirectivesFn = llvm::unique_function<std::optional<
+ ArrayRef<dependency_directives_scan::Directive>>(FileEntryRef)>;
+ DependencyDirectivesFn DependencyDirectivesForFile;
+
/// The current top of the stack that we're lexing from if
/// not expanding a macro and we are lexing directly from source code.
///
@@ -1270,6 +1283,11 @@ public:
/// false if it is producing tokens to be consumed by Parse and Sema.
bool isPreprocessedOutput() const { return PreprocessedOutput; }
+ /// Set the function used to get dependency directives for a file.
+ void setDependencyDirectivesFn(DependencyDirectivesFn Fn) {
+ DependencyDirectivesForFile = std::move(Fn);
+ }
+
/// Return true if we are lexing directly from the specified lexer.
bool isCurrentLexer(const PreprocessorLexer *L) const {
return CurPPLexer == L;
diff --git a/clang/include/clang/Lex/PreprocessorOptions.h b/clang/include/clang/Lex/PreprocessorOptions.h
index f841e4a028df..50b5fba0ff77 100644
--- a/clang/include/clang/Lex/PreprocessorOptions.h
+++ b/clang/include/clang/Lex/PreprocessorOptions.h
@@ -186,41 +186,6 @@ public:
/// with support for lifetime-qualified pointers.
ObjCXXARCStandardLibraryKind ObjCXXARCStandardLibrary = ARCXX_nolib;
- /// Records the set of modules
- class FailedModulesSet {
- llvm::StringSet<> Failed;
-
- public:
- bool hasAlreadyFailed(StringRef module) {
- return Failed.count(module) > 0;
- }
-
- void addFailed(StringRef module) {
- Failed.insert(module);
- }
- };
-
- /// The set of modules that failed to build.
- ///
- /// This pointer will be shared among all of the compiler instances created
- /// to (re)build modules, so that once a module fails to build anywhere,
- /// other instances will see that the module has failed and won't try to
- /// build it again.
- std::shared_ptr<FailedModulesSet> FailedModules;
-
- /// Function for getting the dependency preprocessor directives of a file.
- ///
- /// These are directives derived from a special form of lexing where the
- /// source input is scanned for the preprocessor directives that might have an
- /// effect on the dependencies for a compilation unit.
- ///
- /// Enables a client to cache the directives for a file and provide them
- /// across multiple compiler invocations.
- /// FIXME: Allow returning an error.
- std::function<std::optional<ArrayRef<dependency_directives_scan::Directive>>(
- FileEntryRef)>
- DependencyDirectivesForFile;
-
/// Set up preprocessor for RunAnalysis action.
bool SetUpStaticAnalyzer = false;
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 5ecd2f9eb288..3a1abd4c7892 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -2234,7 +2234,8 @@ private:
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
- void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D);
+ void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap);
bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
diff --git a/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td b/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
index 686e5e99f4a6..5fe5c9286dab 100644
--- a/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
+++ b/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
@@ -622,6 +622,11 @@ def BlockInCriticalSectionChecker : Checker<"BlockInCriticalSection">,
let ParentPackage = Cplusplus in {
+def ArrayDeleteChecker : Checker<"ArrayDelete">,
+ HelpText<"Reports destructions of arrays of polymorphic objects that are "
+ "destructed as their base class.">,
+ Documentation<HasDocumentation>;
+
def InnerPointerChecker : Checker<"InnerPointer">,
HelpText<"Check for inner pointers of C++ containers used after "
"re/deallocation">,
@@ -777,11 +782,6 @@ def ContainerModeling : Checker<"ContainerModeling">,
Documentation<NotDocumented>,
Hidden;
-def CXXArrayDeleteChecker : Checker<"ArrayDelete">,
- HelpText<"Reports destructions of arrays of polymorphic objects that are "
- "destructed as their base class.">,
- Documentation<HasDocumentation>;
-
def DeleteWithNonVirtualDtorChecker : Checker<"DeleteWithNonVirtualDtor">,
HelpText<"Reports destructions of polymorphic objects with a non-virtual "
"destructor in their base class">,
@@ -908,7 +908,7 @@ def PaddingChecker : Checker<"Padding">,
"24",
Released>
]>,
- Documentation<NotDocumented>;
+ Documentation<HasDocumentation>;
} // end: "padding"
diff --git a/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h b/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
index 276d11e80a5b..3a3c1a13d67d 100644
--- a/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
+++ b/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
@@ -227,6 +227,7 @@ public:
unsigned ShouldEmitErrorsOnInvalidConfigValue : 1;
unsigned AnalyzeAll : 1;
unsigned AnalyzerDisplayProgress : 1;
+ unsigned AnalyzerNoteAnalysisEntryPoints : 1;
unsigned eagerlyAssumeBinOpBifurcation : 1;
@@ -291,10 +292,10 @@ public:
ShowCheckerOptionDeveloperList(false), ShowEnabledCheckerList(false),
ShowConfigOptionsList(false),
ShouldEmitErrorsOnInvalidConfigValue(false), AnalyzeAll(false),
- AnalyzerDisplayProgress(false), eagerlyAssumeBinOpBifurcation(false),
- TrimGraph(false), visualizeExplodedGraphWithGraphViz(false),
- UnoptimizedCFG(false), PrintStats(false), NoRetryExhausted(false),
- AnalyzerWerror(false) {}
+ AnalyzerDisplayProgress(false), AnalyzerNoteAnalysisEntryPoints(false),
+ eagerlyAssumeBinOpBifurcation(false), TrimGraph(false),
+ visualizeExplodedGraphWithGraphViz(false), UnoptimizedCFG(false),
+ PrintStats(false), NoRetryExhausted(false), AnalyzerWerror(false) {}
/// Interprets an option's string value as a boolean. The "true" string is
/// interpreted as true and the "false" string is interpreted as false.
diff --git a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index e762f7548e0b..ead96ce6891c 100644
--- a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -586,6 +586,9 @@ class BugReporter {
private:
BugReporterData& D;
+ /// The top-level entry point for the issue to be reported.
+ const Decl *AnalysisEntryPoint = nullptr;
+
/// Generate and flush the diagnostics for the given bug report.
void FlushReport(BugReportEquivClass& EQ);
@@ -623,6 +626,14 @@ public:
Preprocessor &getPreprocessor() { return D.getPreprocessor(); }
+ /// Get the top-level entry point for the issue to be reported.
+ const Decl *getAnalysisEntryPoint() const { return AnalysisEntryPoint; }
+
+ void setAnalysisEntryPoint(const Decl *EntryPoint) {
+ assert(EntryPoint);
+ AnalysisEntryPoint = EntryPoint;
+ }
+
/// Add the given report to the set of reports tracked by BugReporter.
///
/// The reports are usually generated by the checkers. Further, they are
@@ -713,6 +724,7 @@ public:
virtual ~BugReporterContext() = default;
PathSensitiveBugReporter& getBugReporter() { return BR; }
+ const PathSensitiveBugReporter &getBugReporter() const { return BR; }
ProgramStateManager& getStateManager() const {
return BR.getStateManager();
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h
index 3432d2648633..b4e1636130ca 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h
@@ -41,12 +41,8 @@ public:
/// - We also accept calls where the number of arguments or parameters is
/// greater than the specified value.
/// For the exact heuristics, see CheckerContext::isCLibraryFunction().
- /// Note that functions whose declaration context is not a TU (e.g.
- /// methods, functions in namespaces) are not accepted as C library
- /// functions.
- /// FIXME: If I understand it correctly, this discards calls where C++ code
- /// refers a C library function through the namespace `std::` via headers
- /// like <cstdlib>.
+ /// (This mode only matches functions that are declared either directly
+ /// within a TU or in the namespace `std`.)
CLibrary,
/// Matches "simple" functions that are not methods. (Static methods are
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index 0d36587484bf..549c864dc91e 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -59,6 +59,7 @@ namespace ento {
enum CallEventKind {
CE_Function,
+ CE_CXXStaticOperator,
CE_CXXMember,
CE_CXXMemberOperator,
CE_CXXDestructor,
@@ -709,6 +710,77 @@ public:
}
};
+/// Represents a static C++ operator call.
+///
+/// "A" in this example.
+/// However, "B" and "C" are represented by SimpleFunctionCall.
+/// \code
+/// struct S {
+/// int pad;
+/// static void operator()(int x, int y);
+/// };
+/// S s{10};
+/// void (*fptr)(int, int) = &S::operator();
+///
+/// s(1, 2); // A
+/// S::operator()(1, 2); // B
+/// fptr(1, 2); // C
+/// \endcode
+class CXXStaticOperatorCall : public SimpleFunctionCall {
+ friend class CallEventManager;
+
+protected:
+ CXXStaticOperatorCall(const CXXOperatorCallExpr *CE, ProgramStateRef St,
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : SimpleFunctionCall(CE, St, LCtx, ElemRef) {}
+ CXXStaticOperatorCall(const CXXStaticOperatorCall &Other) = default;
+
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXStaticOperatorCall(*this);
+ }
+
+public:
+ const CXXOperatorCallExpr *getOriginExpr() const override {
+ return cast<CXXOperatorCallExpr>(SimpleFunctionCall::getOriginExpr());
+ }
+
+ unsigned getNumArgs() const override {
+ // Ignore the object parameter that is not used for static member functions.
+ assert(getOriginExpr()->getNumArgs() > 0);
+ return getOriginExpr()->getNumArgs() - 1;
+ }
+
+ const Expr *getArgExpr(unsigned Index) const override {
+ // Ignore the object parameter that is not used for static member functions.
+ return getOriginExpr()->getArg(Index + 1);
+ }
+
+ std::optional<unsigned>
+ getAdjustedParameterIndex(unsigned ASTArgumentIndex) const override {
+ // Ignore the object parameter that is not used for static member functions.
+ if (ASTArgumentIndex == 0)
+ return std::nullopt;
+ return ASTArgumentIndex - 1;
+ }
+
+ unsigned getASTArgumentIndex(unsigned CallArgumentIndex) const override {
+ // Account for the object parameter for the static member function.
+ return CallArgumentIndex + 1;
+ }
+
+ OverloadedOperatorKind getOverloadedOperator() const {
+ return getOriginExpr()->getOperator();
+ }
+
+ Kind getKind() const override { return CE_CXXStaticOperator; }
+ StringRef getKindAsString() const override { return "CXXStaticOperatorCall"; }
+
+ static bool classof(const CallEvent *CA) {
+ return CA->getKind() == CE_CXXStaticOperator;
+ }
+};
+
/// Represents a non-static C++ member function call.
///
/// Example: \c obj.fun()
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index 60421e5437d8..d053a9718912 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -15,7 +15,6 @@
#include "ProgramState_Fwd.h"
#include "SVals.h"
-
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OperatorKinds.h"
@@ -113,8 +112,7 @@ public:
OperatorKind operationKindFromOverloadedOperator(OverloadedOperatorKind OOK,
bool IsBinary);
-std::optional<DefinedSVal> getPointeeDefVal(SVal PtrSVal,
- ProgramStateRef State);
+std::optional<SVal> getPointeeVal(SVal PtrSVal, ProgramStateRef State);
} // namespace ento
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 859c1497d7e6..e38a3bb56ece 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -187,6 +187,8 @@ public:
/// Returns true if there is still simulation state on the worklist.
bool ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
+ assert(L->inTopFrame());
+ BR.setAnalysisEntryPoint(L->getDecl());
return Engine.ExecuteWorkList(L, Steps, nullptr);
}
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index ca75c2a756a4..51d76dc257ee 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -494,6 +494,8 @@ private:
InvalidatedSymbols *IS,
RegionAndSymbolInvalidationTraits *HTraits,
const CallEvent *Call) const;
+
+ SVal wrapSymbolicRegion(SVal Base) const;
};
//===----------------------------------------------------------------------===//
@@ -782,20 +784,6 @@ inline SVal ProgramState::getLValue(const ObjCIvarDecl *D, SVal Base) const {
return getStateManager().StoreMgr->getLValueIvar(D, Base);
}
-inline SVal ProgramState::getLValue(const FieldDecl *D, SVal Base) const {
- return getStateManager().StoreMgr->getLValueField(D, Base);
-}
-
-inline SVal ProgramState::getLValue(const IndirectFieldDecl *D,
- SVal Base) const {
- StoreManager &SM = *getStateManager().StoreMgr;
- for (const auto *I : D->chain()) {
- Base = SM.getLValueField(cast<FieldDecl>(I), Base);
- }
-
- return Base;
-}
-
inline SVal ProgramState::getLValue(QualType ElementType, SVal Idx, SVal Base) const{
if (std::optional<NonLoc> N = Idx.getAs<NonLoc>())
return getStateManager().StoreMgr->getLValueElement(ElementType, *N, Base);
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
index 846fdc725397..9a522a3e2fe2 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
@@ -242,6 +242,8 @@ class EntryRef {
/// The underlying cached entry.
const CachedFileSystemEntry &Entry;
+ friend class DependencyScanningWorkerFilesystem;
+
public:
EntryRef(StringRef Name, const CachedFileSystemEntry &Entry)
: Filename(Name), Entry(Entry) {}
@@ -300,14 +302,15 @@ public:
///
/// Attempts to use the local and shared caches first, then falls back to
/// using the underlying filesystem.
- llvm::ErrorOr<EntryRef>
- getOrCreateFileSystemEntry(StringRef Filename,
- bool DisableDirectivesScanning = false);
+ llvm::ErrorOr<EntryRef> getOrCreateFileSystemEntry(StringRef Filename);
-private:
- /// Check whether the file should be scanned for preprocessor directives.
- bool shouldScanForDirectives(StringRef Filename);
+ /// Ensure the directive tokens are populated for this file entry.
+ ///
+ /// Returns true if the directive tokens are populated for this file entry,
+ /// false if not (i.e. this entry is not a file or its scan fails).
+ bool ensureDirectiveTokensArePopulated(EntryRef Entry);
+private:
/// For a filename that's not yet associated with any entry in the caches,
/// uses the underlying filesystem to either look up the entry based in the
/// shared cache indexed by unique ID, or creates new entry from scratch.
@@ -317,11 +320,6 @@ private:
computeAndStoreResult(StringRef OriginalFilename,
StringRef FilenameForLookup);
- /// Scan for preprocessor directives for the given entry if necessary and
- /// returns a wrapper object with reference semantics.
- EntryRef scanForDirectivesIfNecessary(const CachedFileSystemEntry &Entry,
- StringRef Filename, bool Disable);
-
/// Represents a filesystem entry that has been stat-ed (and potentially read)
/// and that's about to be inserted into the cache as `CachedFileSystemEntry`.
struct TentativeEntry {
diff --git a/clang/lib/APINotes/APINotesManager.cpp b/clang/lib/APINotes/APINotesManager.cpp
index f60f09e2b3c2..789bb97d81de 100644
--- a/clang/lib/APINotes/APINotesManager.cpp
+++ b/clang/lib/APINotes/APINotesManager.cpp
@@ -221,6 +221,7 @@ APINotesManager::getCurrentModuleAPINotes(Module *M, bool LookInModule,
ArrayRef<std::string> SearchPaths) {
FileManager &FM = SM.getFileManager();
auto ModuleName = M->getTopLevelModuleName();
+ auto ExportedModuleName = M->getTopLevelModule()->ExportAsModule;
llvm::SmallVector<FileEntryRef, 2> APINotes;
// First, look relative to the module itself.
@@ -233,6 +234,10 @@ APINotesManager::getCurrentModuleAPINotes(Module *M, bool LookInModule,
APINotes.push_back(*File);
}
+ // If module FooCore is re-exported through module Foo, try Foo.apinotes.
+ if (!ExportedModuleName.empty())
+ if (auto File = findAPINotesFile(Dir, ExportedModuleName, WantPublic))
+ APINotes.push_back(*File);
};
if (M->IsFramework) {
diff --git a/clang/lib/APINotes/APINotesWriter.cpp b/clang/lib/APINotes/APINotesWriter.cpp
index 76fd24ccfae9..e3f5d102fcd0 100644
--- a/clang/lib/APINotes/APINotesWriter.cpp
+++ b/clang/lib/APINotes/APINotesWriter.cpp
@@ -441,7 +441,7 @@ void emitVersionedInfo(
std::sort(VI.begin(), VI.end(),
[](const std::pair<VersionTuple, T> &LHS,
const std::pair<VersionTuple, T> &RHS) -> bool {
- assert(LHS.first != RHS.first &&
+ assert((&LHS == &RHS || LHS.first != RHS.first) &&
"two entries for the same version");
return LHS.first < RHS.first;
});
diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp
index 4eae308ef5b3..d8042321319a 100644
--- a/clang/lib/AST/APValue.cpp
+++ b/clang/lib/AST/APValue.cpp
@@ -704,6 +704,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
return;
}
+ if (const auto *AT = Ty->getAs<AtomicType>())
+ Ty = AT->getValueType();
+
switch (getKind()) {
case APValue::None:
Out << "<out of lifetime>";
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index fcf801adeaf5..c90fafb6f653 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1766,7 +1766,7 @@ TypeInfoChars
static getConstantArrayInfoInChars(const ASTContext &Context,
const ConstantArrayType *CAT) {
TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
- uint64_t Size = CAT->getSize().getZExtValue();
+ uint64_t Size = CAT->getZExtSize();
assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
(uint64_t)(-1)/Size) &&
"Overflow in array type char size evaluation");
@@ -1910,7 +1910,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// Model non-constant sized arrays as size zero, but track the alignment.
uint64_t Size = 0;
if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
- Size = CAT->getSize().getZExtValue();
+ Size = CAT->getZExtSize();
TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
@@ -3560,8 +3560,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
llvm::FoldingSetNodeID ID;
- ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM,
- IndexTypeQuals);
+ ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr,
+ ASM, IndexTypeQuals);
void *InsertPos = nullptr;
if (ConstantArrayType *ATP =
@@ -3585,11 +3585,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- void *Mem = Allocate(
- ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
- alignof(ConstantArrayType));
- auto *New = new (Mem)
- ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
+ auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr,
+ ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -7051,7 +7048,7 @@ uint64_t
ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
uint64_t ElementCount = 1;
do {
- ElementCount *= CA->getSize().getZExtValue();
+ ElementCount *= CA->getZExtSize();
CA = dyn_cast_or_null<ConstantArrayType>(
CA->getElementType()->getAsArrayTypeUnsafe());
} while (CA);
@@ -8374,7 +8371,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
S += '[';
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
- S += llvm::utostr(CAT->getSize().getZExtValue());
+ S += llvm::utostr(CAT->getZExtSize());
else {
//Variable length arrays are encoded as a regular array with 0 elements.
assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
@@ -9603,11 +9600,11 @@ static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
- unsigned EltSize = Context.getTypeSize(Info.ElementType);
+ uint64_t EltSize = Context.getTypeSize(Info.ElementType);
if (Info.ElementType == Context.BoolTy)
EltSize = 1;
- unsigned MinElts = Info.EC.getKnownMinValue();
+ uint64_t MinElts = Info.EC.getKnownMinValue();
return VScale->first * MinElts * EltSize;
}
@@ -10808,7 +10805,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
{
const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
- if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize())
return {};
QualType LHSElem = getAsArrayType(LHS)->getElementType();
@@ -13676,22 +13673,19 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
std::vector<std::string> Features;
- StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
if (Target->getTriple().isAArch64()) {
// TargetClones for AArch64
- if (VersionStr != "default") {
- SmallVector<StringRef, 1> VersionFeatures;
- VersionStr.split(VersionFeatures, "+");
- for (auto &VFeature : VersionFeatures) {
- VFeature = VFeature.trim();
+ llvm::SmallVector<StringRef, 8> Feats;
+ TC->getFeatures(Feats, GD.getMultiVersionIndex());
+ for (StringRef Feat : Feats)
+ if (Target->validateCpuSupports(Feat.str()))
// Use '?' to mark features that came from AArch64 TargetClones.
- Features.push_back((StringRef{"?"} + VFeature).str());
- }
- }
+ Features.push_back("?" + Feat.str());
Features.insert(Features.begin(),
Target->getTargetOpts().FeaturesAsWritten.begin(),
Target->getTargetOpts().FeaturesAsWritten.end());
} else {
+ StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
if (VersionStr.starts_with("arch="))
TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1);
else if (VersionStr != "default")
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 95900afdd2c5..131f82985e90 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -2840,7 +2840,7 @@ bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
if (!InitTy)
return false;
- return InitTy->getSize() != 0;
+ return !InitTy->isZeroSize();
}
CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index 04bbc49ab2f3..2cbb86b31b5e 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -1102,9 +1102,13 @@ bool Decl::isInAnotherModuleUnit() const {
return M != getASTContext().getCurrentNamedModule();
}
+bool Decl::isFromExplicitGlobalModule() const {
+ return getOwningModule() && getOwningModule()->isExplicitGlobalModule();
+}
+
bool Decl::shouldSkipCheckingODR() const {
- return getASTContext().getLangOpts().SkipODRCheckInGMF && getOwningModule() &&
- getOwningModule()->isExplicitGlobalModule();
+ return getASTContext().getLangOpts().SkipODRCheckInGMF &&
+ isFromExplicitGlobalModule();
}
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 592d43597dc1..dae8f32fc029 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -209,7 +209,7 @@ namespace {
IsArray = true;
if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
- ArraySize = CAT->getSize().getZExtValue();
+ ArraySize = CAT->getZExtSize();
} else {
assert(I == 0 && "unexpected unsized array designator");
FirstEntryIsUnsizedArray = true;
@@ -401,7 +401,7 @@ namespace {
// This is a most-derived object.
MostDerivedType = CAT->getElementType();
MostDerivedIsArrayElement = true;
- MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedArraySize = CAT->getZExtSize();
MostDerivedPathLength = Entries.size();
}
/// Update this designator to refer to the first element within the array of
@@ -3476,7 +3476,7 @@ static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
- unsigned Elts = CAT->getSize().getZExtValue();
+ unsigned Elts = CAT->getZExtSize();
Result = APValue(APValue::UninitArray(),
std::min(S->getLength(), Elts), Elts);
APSInt Value(Info.Ctx.getTypeSize(CharType),
@@ -3619,7 +3619,7 @@ static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
SourceLocation CallLoc = {}) {
return Info.CheckArraySize(
CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc,
- CAT->getNumAddressingBits(Info.Ctx), CAT->getSize().getZExtValue(),
+ CAT->getNumAddressingBits(Info.Ctx), CAT->getZExtSize(),
/*Diag=*/true);
}
@@ -4908,7 +4908,7 @@ static bool handleDefaultInitValue(QualType T, APValue &Result) {
if (auto *AT =
dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) {
- Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
+ Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize());
if (Result.hasArrayFiller())
Success &=
handleDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
@@ -6595,7 +6595,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
// For arrays, destroy elements right-to-left.
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
- uint64_t Size = CAT->getSize().getZExtValue();
+ uint64_t Size = CAT->getZExtSize();
QualType ElemT = CAT->getElementType();
if (!CheckArraySize(Info, CAT, CallRange.getBegin()))
@@ -7396,7 +7396,7 @@ class BufferToAPValueConverter {
}
std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
- size_t Size = Ty->getSize().getLimitedValue();
+ size_t Size = Ty->getLimitedSize();
CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
APValue ArrayValue(APValue::UninitArray(), Size, Size);
@@ -9951,7 +9951,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
assert(CAT && "unexpected type for array initializer");
unsigned Bits =
- std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
+ std::max(CAT->getSizeBitWidth(), ArrayBound.getBitWidth());
llvm::APInt InitBound = CAT->getSize().zext(Bits);
llvm::APInt AllocBound = ArrayBound.zext(Bits);
if (InitBound.ugt(AllocBound)) {
@@ -10410,7 +10410,7 @@ bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
if (Field->getType()->isIncompleteArrayType()) {
if (auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType())) {
- if (!CAT->getSize().isZero()) {
+ if (!CAT->isZeroSize()) {
// Bail out for now. This might sort of "work", but the rest of the
// code isn't really prepared to handle it.
Info.FFDiag(Init, diag::note_constexpr_unsupported_flexible_array);
@@ -10554,7 +10554,7 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
// End pointer.
if (!HandleLValueArrayAdjustment(Info, E, Array,
ArrayType->getElementType(),
- ArrayType->getSize().getZExtValue()))
+ ArrayType->getZExtSize()))
return false;
Array.moveInto(Result.getStructField(1));
} else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType()))
@@ -10996,8 +10996,7 @@ namespace {
return Error(E);
}
- Result = APValue(APValue::UninitArray(), 0,
- CAT->getSize().getZExtValue());
+ Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
if (!Result.hasArrayFiller())
return true;
@@ -11122,7 +11121,7 @@ bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
Filler = Result.getArrayFiller();
unsigned NumEltsToInit = Args.size();
- unsigned NumElts = CAT->getSize().getZExtValue();
+ unsigned NumElts = CAT->getZExtSize();
// If the initializer might depend on the array index, run it for each
// array element.
@@ -11180,7 +11179,7 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe());
- uint64_t Elements = CAT->getSize().getZExtValue();
+ uint64_t Elements = CAT->getZExtSize();
Result = APValue(APValue::UninitArray(), Elements, Elements);
LValue Subobject = This;
@@ -11225,7 +11224,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
- unsigned FinalSize = CAT->getSize().getZExtValue();
+ unsigned FinalSize = CAT->getZExtSize();
// Preserve the array filler if we had prior zero-initialization.
APValue Filler =
@@ -11940,7 +11939,7 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
return true;
const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
uint64_t Index = Entry.getAsArrayIndex();
- if (Index + 1 != CAT->getSize())
+ if (Index + 1 != CAT->getZExtSize())
return false;
BaseType = CAT->getElementType();
} else if (BaseType->isAnyComplexType()) {
@@ -12354,6 +12353,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll:
case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clzg:
case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
case Builtin::BI__lzcnt:
case Builtin::BI__lzcnt64: {
@@ -12361,14 +12361,28 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- // When the argument is 0, the result of GCC builtins is undefined, whereas
- // for Microsoft intrinsics, the result is the bit-width of the argument.
- bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
- BuiltinOp != Builtin::BI__lzcnt &&
- BuiltinOp != Builtin::BI__lzcnt64;
+ std::optional<APSInt> Fallback;
+ if (BuiltinOp == Builtin::BI__builtin_clzg && E->getNumArgs() > 1) {
+ APSInt FallbackTemp;
+ if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
+ return false;
+ Fallback = FallbackTemp;
+ }
- if (ZeroIsUndefined && !Val)
- return Error(E);
+ if (!Val) {
+ if (Fallback)
+ return Success(*Fallback, E);
+
+ // When the argument is 0, the result of GCC builtins is undefined,
+ // whereas for Microsoft intrinsics, the result is the bit-width of the
+ // argument.
+ bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
+ BuiltinOp != Builtin::BI__lzcnt &&
+ BuiltinOp != Builtin::BI__lzcnt64;
+
+ if (ZeroIsUndefined)
+ return Error(E);
+ }
return Success(Val.countl_zero(), E);
}
@@ -12410,12 +12424,26 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll:
- case Builtin::BI__builtin_ctzs: {
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctzg: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- if (!Val)
+
+ std::optional<APSInt> Fallback;
+ if (BuiltinOp == Builtin::BI__builtin_ctzg && E->getNumArgs() > 1) {
+ APSInt FallbackTemp;
+ if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info))
+ return false;
+ Fallback = FallbackTemp;
+ }
+
+ if (!Val) {
+ if (Fallback)
+ return Success(*Fallback, E);
+
return Error(E);
+ }
return Success(Val.countr_zero(), E);
}
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index 0c80ad109ccb..da8164bad518 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -413,7 +413,7 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return Match;
if (const auto *BT = argTy->getAs<BuiltinType>()) {
// Check if the only difference between them is signed vs unsigned
- // if true, we consider they are compatible.
+ // if true, return match signedness.
switch (BT->getKind()) {
default:
break;
@@ -423,44 +423,53 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
[[fallthrough]];
case BuiltinType::Char_S:
case BuiltinType::SChar:
+ if (T == C.UnsignedShortTy || T == C.ShortTy)
+ return NoMatchTypeConfusion;
+ if (T == C.UnsignedCharTy)
+ return NoMatchSignedness;
+ if (T == C.SignedCharTy)
+ return Match;
+ break;
case BuiltinType::Char_U:
case BuiltinType::UChar:
if (T == C.UnsignedShortTy || T == C.ShortTy)
return NoMatchTypeConfusion;
- if (T == C.UnsignedCharTy || T == C.SignedCharTy)
+ if (T == C.UnsignedCharTy)
return Match;
+ if (T == C.SignedCharTy)
+ return NoMatchSignedness;
break;
case BuiltinType::Short:
if (T == C.UnsignedShortTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::UShort:
if (T == C.ShortTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::Int:
if (T == C.UnsignedIntTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::UInt:
if (T == C.IntTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::Long:
if (T == C.UnsignedLongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::ULong:
if (T == C.LongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::LongLong:
if (T == C.UnsignedLongLongTy)
- return Match;
+ return NoMatchSignedness;
break;
case BuiltinType::ULongLong:
if (T == C.LongLongTy)
- return Match;
+ return NoMatchSignedness;
break;
}
// "Partially matched" because of promotions?
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 73831eefba45..46182809810b 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -819,7 +819,7 @@ bool ByteCodeExprGen<Emitter>::VisitImplicitValueInitExpr(const ImplicitValueIni
const ArrayType *AT = QT->getAsArrayTypeUnsafe();
assert(AT);
const auto *CAT = cast<ConstantArrayType>(AT);
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
PrimType ElemT = classifyPrim(CAT->getElementType());
for (size_t I = 0; I != NumElems; ++I) {
@@ -992,7 +992,7 @@ bool ByteCodeExprGen<Emitter>::VisitInitListExpr(const InitListExpr *E) {
if (const Expr *Filler = E->getArrayFiller()) {
const ConstantArrayType *CAT =
Ctx.getASTContext().getAsConstantArrayType(E->getType());
- uint64_t NumElems = CAT->getSize().getZExtValue();
+ uint64_t NumElems = CAT->getZExtSize();
for (; ElementIndex != NumElems; ++ElementIndex) {
if (!this->visitArrayElemInit(ElementIndex, Filler))
@@ -1318,7 +1318,7 @@ bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
// If the initializer string is too long, a diagnostic has already been
// emitted. Read only the array length from the string literal.
- unsigned ArraySize = CAT->getSize().getZExtValue();
+ unsigned ArraySize = CAT->getZExtSize();
unsigned N = std::min(ArraySize, E->getLength());
size_t CharWidth = E->getCharByteWidth();
@@ -1919,7 +1919,7 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
const ConstantArrayType *CAT =
Ctx.getASTContext().getAsConstantArrayType(E->getType());
assert(CAT);
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
const Function *Func = getFunction(E->getConstructor());
if (!Func || !Func->isConstexpr())
return false;
diff --git a/clang/lib/AST/Interp/ByteCodeStmtGen.h b/clang/lib/AST/Interp/ByteCodeStmtGen.h
index ab7a591fb798..d7e6e5042c27 100644
--- a/clang/lib/AST/Interp/ByteCodeStmtGen.h
+++ b/clang/lib/AST/Interp/ByteCodeStmtGen.h
@@ -82,6 +82,7 @@ private:
OptLabelTy DefaultLabel;
};
+extern template class ByteCodeStmtGen<ByteCodeEmitter>;
extern template class ByteCodeExprGen<EvalEmitter>;
} // namespace interp
diff --git a/clang/lib/AST/Interp/EvaluationResult.cpp b/clang/lib/AST/Interp/EvaluationResult.cpp
index 07b28d07326f..d567b551f7f6 100644
--- a/clang/lib/AST/Interp/EvaluationResult.cpp
+++ b/clang/lib/AST/Interp/EvaluationResult.cpp
@@ -66,7 +66,7 @@ static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc,
const Pointer &BasePtr,
const ConstantArrayType *CAT) {
bool Result = true;
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
QualType ElemType = CAT->getElementType();
if (ElemType->isRecordType()) {
diff --git a/clang/lib/AST/Interp/Program.cpp b/clang/lib/AST/Interp/Program.cpp
index da6f72c62115..25e938e01503 100644
--- a/clang/lib/AST/Interp/Program.cpp
+++ b/clang/lib/AST/Interp/Program.cpp
@@ -355,7 +355,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
QualType ElemTy = ArrayType->getElementType();
// Array of well-known bounds.
if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) {
- size_t NumElems = CAT->getSize().getZExtValue();
+ size_t NumElems = CAT->getZExtSize();
if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
// Arrays of primitives.
unsigned ElemSize = primSize(*T);
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index f619d657ae9f..425f84e8af1f 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -3445,6 +3445,7 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_PreserveAll:
case CC_M68kRTD:
case CC_PreserveNone:
+ case CC_RISCVVectorCall:
// FIXME: we should be mangling all of the above.
return "";
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index e27d44fc2ffe..5861d5a7ea0d 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -695,7 +695,7 @@ void JSONNodeDumper::VisitArrayType(const ArrayType *AT) {
void JSONNodeDumper::VisitConstantArrayType(const ConstantArrayType *CAT) {
// FIXME: this should use ZExt instead of SExt, but JSON doesn't allow a
// narrowing conversion to int64_t so it cannot be expressed.
- JOS.attribute("size", CAT->getSize().getSExtValue());
+ JOS.attribute("size", CAT->getSExtSize());
VisitArrayType(CAT);
}
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index aa26bb7ed46f..addc3140546a 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -3911,7 +3911,8 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "?$RT" << ManglingNumber << '@';
+ Mangler.getStream() << "?";
+ Mangler.mangleSourceName("$RT" + llvm::utostr(ManglingNumber));
Mangler.mangle(VD, "");
}
@@ -4022,10 +4023,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// char bar[42] = "foobar";
// Where it is truncated or zero-padded to fit the array. This is the length
// used for mangling, and any trailing null-bytes also need to be mangled.
- unsigned StringLength = getASTContext()
- .getAsConstantArrayType(SL->getType())
- ->getSize()
- .getZExtValue();
+ unsigned StringLength =
+ getASTContext().getAsConstantArrayType(SL->getType())->getZExtSize();
unsigned StringByteLength = StringLength * SL->getCharByteWidth();
// <char-type>: The "kind" of string literal is encoded into the mangled name.
diff --git a/clang/lib/AST/ScanfFormatString.cpp b/clang/lib/AST/ScanfFormatString.cpp
index 64c430e623b5..7ee21c8c6195 100644
--- a/clang/lib/AST/ScanfFormatString.cpp
+++ b/clang/lib/AST/ScanfFormatString.cpp
@@ -448,9 +448,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
if (CAT->getSizeModifier() == ArraySizeModifier::Normal)
FieldWidth = OptionalAmount(OptionalAmount::Constant,
- CAT->getSize().getZExtValue() - 1,
- "", 0, false);
-
+ CAT->getZExtSize() - 1, "", 0, false);
}
return true;
}
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index b683eb1edd8f..413e452146bd 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -1990,6 +1990,19 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
}
+void TextNodeDumper::VisitCXXDeductionGuideDecl(
+ const CXXDeductionGuideDecl *D) {
+ VisitFunctionDecl(D);
+ switch (D->getDeductionCandidateKind()) {
+ case DeductionCandidate::Normal:
+ case DeductionCandidate::Copy:
+ return;
+ case DeductionCandidate::Aggregate:
+ OS << " aggregate ";
+ break;
+ }
+}
+
void TextNodeDumper::VisitLifetimeExtendedTemporaryDecl(
const LifetimeExtendedTemporaryDecl *D) {
OS << " extended by ";
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index c6fe90ba5e29..8f3e26d46019 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -159,6 +159,22 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can,
ArrayTypeBits.SizeModifier = llvm::to_underlying(sm);
}
+ConstantArrayType *
+ConstantArrayType::Create(const ASTContext &Ctx, QualType ET, QualType Can,
+ const llvm::APInt &Sz, const Expr *SzExpr,
+ ArraySizeModifier SzMod, unsigned Qual) {
+ bool NeedsExternalSize = SzExpr != nullptr || Sz.ugt(0x0FFFFFFFFFFFFFFF) ||
+ Sz.getBitWidth() > 0xFF;
+ if (!NeedsExternalSize)
+ return new (Ctx, alignof(ConstantArrayType)) ConstantArrayType(
+ ET, Can, Sz.getBitWidth(), Sz.getZExtValue(), SzMod, Qual);
+
+ auto *SzPtr = new (Ctx, alignof(ConstantArrayType::ExternalSize))
+ ConstantArrayType::ExternalSize(Sz, SzExpr);
+ return new (Ctx, alignof(ConstantArrayType))
+ ConstantArrayType(ET, Can, SzPtr, SzMod, Qual);
+}
+
unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
QualType ElementType,
const llvm::APInt &NumElements) {
@@ -213,11 +229,10 @@ unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) {
void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, QualType ET,
- const llvm::APInt &ArraySize,
- const Expr *SizeExpr, ArraySizeModifier SizeMod,
- unsigned TypeQuals) {
+ uint64_t ArraySize, const Expr *SizeExpr,
+ ArraySizeModifier SizeMod, unsigned TypeQuals) {
ID.AddPointer(ET.getAsOpaquePtr());
- ID.AddInteger(ArraySize.getZExtValue());
+ ID.AddInteger(ArraySize);
ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
ID.AddBoolean(SizeExpr != nullptr);
@@ -452,12 +467,8 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
// Check that no type class has a non-trival destructor. Types are
// allocated with the BumpPtrAllocator from ASTContext and therefore
// their destructor is not executed.
-//
-// FIXME: ConstantArrayType is not trivially destructible because of its
-// APInt member. It should be replaced in favor of ASTContext allocation.
#define TYPE(CLASS, BASE) \
- static_assert(std::is_trivially_destructible<CLASS##Type>::value || \
- std::is_same<CLASS##Type, ConstantArrayType>::value, \
+ static_assert(std::is_trivially_destructible<CLASS##Type>::value, \
#CLASS "Type should be trivially destructible!");
#include "clang/AST/TypeNodes.inc"
@@ -3473,6 +3484,9 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_PreserveAll: return "preserve_all";
case CC_M68kRTD: return "m68k_rtd";
case CC_PreserveNone: return "preserve_none";
+ // clang-format off
+ case CC_RISCVVectorCall: return "riscv_vector_cc";
+ // clang-format on
}
llvm_unreachable("Invalid calling convention.");
@@ -4063,6 +4077,7 @@ bool AttributedType::isCallingConv() const {
case attr::PreserveAll:
case attr::M68kRTD:
case attr::PreserveNone:
+ case attr::RISCVVectorCC:
return true;
}
llvm_unreachable("invalid attr kind");
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 7032ff2f1846..0aa1d9327d77 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -538,7 +538,7 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
if (T->getSizeModifier() == ArraySizeModifier::Static)
OS << "static ";
- OS << T->getSize().getZExtValue() << ']';
+ OS << T->getZExtSize() << ']';
printAfter(T->getElementType(), OS);
}
@@ -1071,6 +1071,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_PreserveNone:
OS << " __attribute__((preserve_none))";
break;
+ case CC_RISCVVectorCall:
+ OS << "__attribute__((riscv_vector_cc))";
+ break;
}
}
@@ -1960,6 +1963,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::PreserveNone:
OS << "preserve_none";
break;
+ case attr::RISCVVectorCC:
+ OS << "riscv_vector_cc";
+ break;
case attr::NoDeref:
OS << "noderef";
break;
@@ -2303,15 +2309,10 @@ printTo(raw_ostream &OS, ArrayRef<TA> Args, const PrintingPolicy &Policy,
} else {
if (!FirstArg)
OS << Comma;
- if (!Policy.SuppressTagKeyword &&
- Argument.getKind() == TemplateArgument::Type &&
- isa<TagType>(Argument.getAsType()))
- OS << Argument.getAsType().getAsString();
- else
- // Tries to print the argument with location info if exists.
- printArgument(Arg, Policy, ArgOS,
- TemplateParameterList::shouldIncludeTypeForArgument(
- Policy, TPL, ParmIndex));
+ // Tries to print the argument with location info if exists.
+ printArgument(Arg, Policy, ArgOS,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ Policy, TPL, ParmIndex));
}
StringRef ArgString = ArgOS.str();
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index de70cbbf6cdb..64e6155de090 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -2039,7 +2039,7 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
QualType QT = FI->getType();
// It may be a multidimensional array.
while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
break;
QT = AT->getElementType();
}
@@ -2133,7 +2133,7 @@ bool CFGBuilder::hasTrivialDestructor(const VarDecl *VD) const {
// Check for constant size array. Set type to array element type.
while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
return true;
QT = AT->getElementType();
}
diff --git a/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp b/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp
index daa73bed1bd9..255543021a99 100644
--- a/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp
+++ b/clang/lib/Analysis/FlowSensitive/AdornedCFG.cpp
@@ -144,7 +144,7 @@ llvm::Expected<AdornedCFG> AdornedCFG::build(const Decl &D, Stmt &S,
// The shape of certain elements of the AST can vary depending on the
// language. We currently only support C++.
- if (!C.getLangOpts().CPlusPlus)
+ if (!C.getLangOpts().CPlusPlus || C.getLangOpts().ObjC)
return llvm::createStringError(
std::make_error_code(std::errc::invalid_argument),
"Can only analyze C++");
diff --git a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index cc1ebd511191..f729d676dd0d 100644
--- a/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -416,7 +416,7 @@ void Environment::initialize() {
assert(Parent != nullptr);
if (Parent->isLambda()) {
- for (auto Capture : Parent->captures()) {
+ for (const auto &Capture : Parent->captures()) {
if (Capture.capturesVariable()) {
const auto *VarDecl = Capture.getCapturedVar();
assert(VarDecl != nullptr);
@@ -1169,6 +1169,42 @@ getFieldsForInitListExpr(const InitListExpr *InitList) {
return Fields;
}
+RecordInitListHelper::RecordInitListHelper(const InitListExpr *InitList) {
+ auto *RD = InitList->getType()->getAsCXXRecordDecl();
+ assert(RD != nullptr);
+
+ std::vector<const FieldDecl *> Fields = getFieldsForInitListExpr(InitList);
+ ArrayRef<Expr *> Inits = InitList->inits();
+
+ // Unions initialized with an empty initializer list need special treatment.
+ // For structs/classes initialized with an empty initializer list, Clang
+ // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
+ // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
+ SmallVector<Expr *> InitsForUnion;
+ if (InitList->getType()->isUnionType() && Inits.empty()) {
+ assert(Fields.size() == 1);
+ ImplicitValueInitForUnion.emplace(Fields.front()->getType());
+ InitsForUnion.push_back(&*ImplicitValueInitForUnion);
+ Inits = InitsForUnion;
+ }
+
+ size_t InitIdx = 0;
+
+ assert(Fields.size() + RD->getNumBases() == Inits.size());
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ assert(InitIdx < Inits.size());
+ Expr *Init = Inits[InitIdx++];
+ BaseInits.emplace_back(&Base, Init);
+ }
+
+ assert(Fields.size() == Inits.size() - InitIdx);
+ for (const FieldDecl *Field : Fields) {
+ assert(InitIdx < Inits.size());
+ Expr *Init = Inits[InitIdx++];
+ FieldInits.emplace_back(Field, Init);
+ }
+}
+
RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) {
auto &NewVal = Env.create<RecordValue>(Loc);
Env.setValue(Loc, NewVal);
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index dbf4878622eb..cadb1ceb2d85 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -512,27 +512,26 @@ void constructOptionalValue(const Expr &E, Environment &Env,
/// Returns a symbolic value for the "has_value" property of an `optional<T>`
/// value that is constructed/assigned from a value of type `U` or `optional<U>`
/// where `T` is constructible from `U`.
-BoolValue &valueOrConversionHasValue(const FunctionDecl &F, const Expr &E,
+BoolValue &valueOrConversionHasValue(QualType DestType, const Expr &E,
const MatchFinder::MatchResult &MatchRes,
LatticeTransferState &State) {
- assert(F.getTemplateSpecializationArgs() != nullptr);
- assert(F.getTemplateSpecializationArgs()->size() > 0);
-
- const int TemplateParamOptionalWrappersCount =
- countOptionalWrappers(*MatchRes.Context, F.getTemplateSpecializationArgs()
- ->get(0)
- .getAsType()
- .getNonReferenceType());
+ const int DestTypeOptionalWrappersCount =
+ countOptionalWrappers(*MatchRes.Context, DestType);
const int ArgTypeOptionalWrappersCount = countOptionalWrappers(
*MatchRes.Context, E.getType().getNonReferenceType());
- // Check if this is a constructor/assignment call for `optional<T>` with
- // argument of type `U` such that `T` is constructible from `U`.
- if (TemplateParamOptionalWrappersCount == ArgTypeOptionalWrappersCount)
+ // Is this an constructor of the form `template<class U> optional(U &&)` /
+ // assignment of the form `template<class U> optional& operator=(U &&)`
+ // (where `T` is assignable / constructible from `U`)?
+ // We recognize this because the number of optionals in the optional being
+ // assigned to is different from the function argument type.
+ if (DestTypeOptionalWrappersCount != ArgTypeOptionalWrappersCount)
return State.Env.getBoolLiteralValue(true);
- // This is a constructor/assignment call for `optional<T>` with argument of
- // type `optional<U>` such that `T` is constructible from `U`.
+ // Otherwise, this must be a constructor of the form
+ // `template <class U> optional<optional<U> &&)` / assignment of the form
+ // `template <class U> optional& operator=(optional<U> &&)
+ // (where, again, `T` is assignable / constructible from `U`).
auto *Loc = State.Env.get<RecordStorageLocation>(E);
if (auto *HasValueVal = getHasValue(State.Env, Loc))
return *HasValueVal;
@@ -544,10 +543,11 @@ void transferValueOrConversionConstructor(
LatticeTransferState &State) {
assert(E->getNumArgs() > 0);
- constructOptionalValue(*E, State.Env,
- valueOrConversionHasValue(*E->getConstructor(),
- *E->getArg(0), MatchRes,
- State));
+ constructOptionalValue(
+ *E, State.Env,
+ valueOrConversionHasValue(
+ E->getConstructor()->getThisType()->getPointeeType(), *E->getArg(0),
+ MatchRes, State));
}
void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
@@ -566,10 +566,11 @@ void transferValueOrConversionAssignment(
const CXXOperatorCallExpr *E, const MatchFinder::MatchResult &MatchRes,
LatticeTransferState &State) {
assert(E->getNumArgs() > 1);
- transferAssignment(E,
- valueOrConversionHasValue(*E->getDirectCallee(),
- *E->getArg(1), MatchRes, State),
- State);
+ transferAssignment(
+ E,
+ valueOrConversionHasValue(E->getArg(0)->getType().getNonReferenceType(),
+ *E->getArg(1), MatchRes, State),
+ State);
}
void transferNulloptAssignment(const CXXOperatorCallExpr *E,
diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 960e9688ffb7..0a2e8368d541 100644
--- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -689,51 +689,22 @@ public:
}
llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
-
- // This only contains the direct fields for the given type.
- std::vector<const FieldDecl *> FieldsForInit = getFieldsForInitListExpr(S);
-
- // `S->inits()` contains all the initializer expressions, including the
- // ones for direct base classes.
- ArrayRef<Expr *> Inits = S->inits();
- size_t InitIdx = 0;
-
- // Unions initialized with an empty initializer list need special treatment.
- // For structs/classes initialized with an empty initializer list, Clang
- // puts `ImplicitValueInitExpr`s in `InitListExpr::inits()`, but for unions,
- // it doesn't do this -- so we create an `ImplicitValueInitExpr` ourselves.
- std::optional<ImplicitValueInitExpr> ImplicitValueInitForUnion;
- SmallVector<Expr *> InitsForUnion;
- if (S->getType()->isUnionType() && Inits.empty()) {
- assert(FieldsForInit.size() == 1);
- ImplicitValueInitForUnion.emplace(FieldsForInit.front()->getType());
- InitsForUnion.push_back(&*ImplicitValueInitForUnion);
- Inits = InitsForUnion;
- }
-
- // Initialize base classes.
- if (auto* R = S->getType()->getAsCXXRecordDecl()) {
- assert(FieldsForInit.size() + R->getNumBases() == Inits.size());
- for ([[maybe_unused]] const CXXBaseSpecifier &Base : R->bases()) {
- assert(InitIdx < Inits.size());
- auto Init = Inits[InitIdx++];
- assert(Base.getType().getCanonicalType() ==
- Init->getType().getCanonicalType());
- auto *BaseVal = Env.get<RecordValue>(*Init);
- if (!BaseVal)
- BaseVal = cast<RecordValue>(Env.createValue(Init->getType()));
- // Take ownership of the fields of the `RecordValue` for the base class
- // and incorporate them into the "flattened" set of fields for the
- // derived class.
- auto Children = BaseVal->getLoc().children();
- FieldLocs.insert(Children.begin(), Children.end());
- }
- }
-
- assert(FieldsForInit.size() == Inits.size() - InitIdx);
- for (auto Field : FieldsForInit) {
- assert(InitIdx < Inits.size());
- auto Init = Inits[InitIdx++];
+ RecordInitListHelper InitListHelper(S);
+
+ for (auto [Base, Init] : InitListHelper.base_inits()) {
+ assert(Base->getType().getCanonicalType() ==
+ Init->getType().getCanonicalType());
+ auto *BaseVal = Env.get<RecordValue>(*Init);
+ if (!BaseVal)
+ BaseVal = cast<RecordValue>(Env.createValue(Init->getType()));
+ // Take ownership of the fields of the `RecordValue` for the base class
+ // and incorporate them into the "flattened" set of fields for the
+ // derived class.
+ auto Children = BaseVal->getLoc().children();
+ FieldLocs.insert(Children.begin(), Children.end());
+ }
+
+ for (auto [Field, Init] : InitListHelper.field_inits()) {
assert(
// The types are same, or
Field->getType().getCanonicalType().getUnqualifiedType() ==
diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp
index 79f337a91ec8..35472e705cfd 100644
--- a/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/clang/lib/Analysis/PathDiagnostic.cpp
@@ -115,14 +115,17 @@ PathDiagnostic::PathDiagnostic(
StringRef CheckerName, const Decl *declWithIssue, StringRef bugtype,
StringRef verboseDesc, StringRef shortDesc, StringRef category,
PathDiagnosticLocation LocationToUnique, const Decl *DeclToUnique,
+ const Decl *AnalysisEntryPoint,
std::unique_ptr<FilesToLineNumsMap> ExecutedLines)
: CheckerName(CheckerName), DeclWithIssue(declWithIssue),
BugType(StripTrailingDots(bugtype)),
VerboseDesc(StripTrailingDots(verboseDesc)),
ShortDesc(StripTrailingDots(shortDesc)),
Category(StripTrailingDots(category)), UniqueingLoc(LocationToUnique),
- UniqueingDecl(DeclToUnique), ExecutedLines(std::move(ExecutedLines)),
- path(pathImpl) {}
+ UniqueingDecl(DeclToUnique), AnalysisEntryPoint(AnalysisEntryPoint),
+ ExecutedLines(std::move(ExecutedLines)), path(pathImpl) {
+ assert(AnalysisEntryPoint);
+}
void PathDiagnosticConsumer::anchor() {}
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index e1ff0d92f6b2..e03fe1b68300 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -403,10 +403,11 @@ AST_MATCHER(CXXConstructExpr, isSafeSpanTwoParamConstruct) {
QualType Arg0Ty = Arg0->IgnoreImplicit()->getType();
if (Arg0Ty->isConstantArrayType()) {
- const APInt &ConstArrSize = cast<ConstantArrayType>(Arg0Ty)->getSize();
+ const APSInt ConstArrSize =
+ APSInt(cast<ConstantArrayType>(Arg0Ty)->getSize());
// Check form 4:
- return Arg1CV && APSInt::compareValues(APSInt(ConstArrSize), *Arg1CV) == 0;
+ return Arg1CV && APSInt::compareValues(ConstArrSize, *Arg1CV) == 0;
}
return false;
}
@@ -429,14 +430,13 @@ AST_MATCHER(ArraySubscriptExpr, isSafeArraySubscript) {
BaseDRE->getDecl()->getType());
if (!CATy)
return false;
- const APInt ArrSize = CATy->getSize();
if (const auto *IdxLit = dyn_cast<IntegerLiteral>(Node.getIdx())) {
const APInt ArrIdx = IdxLit->getValue();
// FIXME: ArrIdx.isNegative() we could immediately emit an error as that's a
// bug
if (ArrIdx.isNonNegative() &&
- ArrIdx.getLimitedValue() < ArrSize.getLimitedValue())
+ ArrIdx.getLimitedValue() < CATy->getLimitedSize())
return true;
}
diff --git a/clang/lib/Basic/LangStandards.cpp b/clang/lib/Basic/LangStandards.cpp
index cb2c07723499..c8c9292abcb2 100644
--- a/clang/lib/Basic/LangStandards.cpp
+++ b/clang/lib/Basic/LangStandards.cpp
@@ -21,6 +21,8 @@ StringRef clang::languageToString(Language L) {
return "Asm";
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::CIR:
+ return "ClangIR";
case Language::C:
return "C";
case Language::CXX:
@@ -92,6 +94,7 @@ LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
switch (Lang) {
case Language::Unknown:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("Invalid input kind!");
case Language::OpenCL:
return LangStandard::lang_opencl12;
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 5d9055174c08..f96956f31d50 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -157,6 +157,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
HasAArch64SVETypes = false;
HasRISCVVTypes = false;
AllowAMDGPUUnsafeFPAtomics = false;
+ HasUnalignedAccess = false;
ARMCDECoprocMask = 0;
// Default to no types using fpret.
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 1c3199bd76ee..1569b5e04b77 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -188,6 +188,8 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
UseZeroLengthBitfieldAlignment = true;
+ HasUnalignedAccess = true;
+
// AArch64 targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericAArch64);
@@ -496,7 +498,7 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasPAuthLR)
Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
- if (HasUnaligned)
+ if (HasUnalignedAccess)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
if ((FPU & NeonMode) && HasFullFP16)
@@ -921,7 +923,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasSM4 = true;
}
if (Feature == "+strict-align")
- HasUnaligned = false;
+ HasUnalignedAccess = false;
+
// All predecessor archs are added but select the latest one for ArchKind.
if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
ArchInfo = &llvm::AArch64::ARMV8A;
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 542894c66412..12fb50286f75 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -38,7 +38,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasSHA2 = false;
bool HasSHA3 = false;
bool HasSM4 = false;
- bool HasUnaligned = true;
bool HasFullFP16 = false;
bool HasDotProd = false;
bool HasFP16FML = false;
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 55b71557452f..877799c66ec4 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -509,7 +509,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
SHA2 = 0;
AES = 0;
DSP = 0;
- Unaligned = 1;
+ HasUnalignedAccess = true;
SoftFloat = false;
// Note that SoftFloatABI is initialized in our constructor.
HWDiv = 0;
@@ -576,7 +576,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return false;
}
} else if (Feature == "+strict-align") {
- Unaligned = 0;
+ HasUnalignedAccess = false;
} else if (Feature == "+fp16") {
HW_FP |= HW_FP_HP;
} else if (Feature == "+fullfp16") {
@@ -785,7 +785,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + CPUProfile + "'");
// ACLE 6.4.3 Unaligned access supported in hardware
- if (Unaligned)
+ if (HasUnalignedAccess)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
// ACLE 6.4.4 LDREX/STREX
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 71322a094f5e..e69adbe75473 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -88,8 +88,6 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
LLVM_PREFERRED_TYPE(bool)
unsigned DSP : 1;
LLVM_PREFERRED_TYPE(bool)
- unsigned Unaligned : 1;
- LLVM_PREFERRED_TYPE(bool)
unsigned DotProd : 1;
LLVM_PREFERRED_TYPE(bool)
unsigned HasMatMul : 1;
diff --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index 88537989a051..280bd1d8033c 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -285,6 +285,8 @@ bool LoongArchTargetInfo::handleTargetFeatures(
HasFeatureLSX = true;
else if (Feature == "+lasx")
HasFeatureLASX = true;
+ else if (Feature == "-ual")
+ HasUnalignedAccess = false;
}
return true;
}
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 3313102492cb..68572843f2d7 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -132,6 +132,7 @@ public:
: LoongArchTargetInfo(Triple, Opts) {
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = Int64Type = SignedLong;
+ HasUnalignedAccess = true;
resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
// TODO: select appropriate ABI.
setABI("lp64d");
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index 23d4e1b598fa..c9dcf434c93b 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -328,6 +328,8 @@ public:
IsMips16 = true;
else if (Feature == "+micromips")
IsMicromips = true;
+ else if (Feature == "+mips32r6" || Feature == "+mips64r6")
+ HasUnalignedAccess = true;
else if (Feature == "+dsp")
DspRev = std::max(DspRev, DSP1);
else if (Feature == "+dspr2")
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 70683916a8b0..fa2f442e2584 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -92,6 +92,7 @@ public:
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
HasStrictFP = true;
HasIbm128 = true;
+ HasUnalignedAccess = true;
}
// Set the language option for altivec based on our value.
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index a6d4af2b8811..f3d705e1551f 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -467,3 +467,14 @@ ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const {
}
return Ret;
}
+
+TargetInfo::CallingConvCheckResult
+RISCVTargetInfo::checkCallingConvention(CallingConv CC) const {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_RISCVVectorCall:
+ return CCCR_OK;
+ }
+}
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index bfbdafb682c8..78580b5b1c10 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -110,6 +110,8 @@ public:
bool hasBFloat16Type() const override { return true; }
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
+
bool useFP16ConversionIntrinsics() const override {
return false;
}
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 3e08b27972fa..8e302acd51b8 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -47,6 +47,7 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEquad();
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
+ HasUnalignedAccess = true;
if (Triple.isOSzOS()) {
TLSSupported = false;
// All vector types are default aligned on an 8-byte boundary, even if the
diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h
index ea9a092cad80..7e8fdf6096ef 100644
--- a/clang/lib/Basic/Targets/VE.h
+++ b/clang/lib/Basic/Targets/VE.h
@@ -40,6 +40,7 @@ public:
Int64Type = SignedLong;
RegParmMax = 8;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ HasUnalignedAccess = true;
WCharType = UnsignedInt;
WIntType = UnsignedInt;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 83b1711f9fdf..5568aa28eaef 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -84,6 +84,7 @@ public:
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
+ HasUnalignedAccess = true;
}
StringRef getABI() const override;
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index d2232c7d5275..c14e4d5f433d 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -188,6 +188,7 @@ public:
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
HasStrictFP = true;
+ HasUnalignedAccess = true;
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp
index efcff958ce54..acaae9f8c3d8 100644
--- a/clang/lib/CodeGen/ABIInfo.cpp
+++ b/clang/lib/CodeGen/ABIInfo.cpp
@@ -61,7 +61,7 @@ bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
+ uint64_t NElements = AT->getZExtSize();
if (NElements == 0)
return false;
if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
@@ -98,7 +98,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
QualType FT = FD->getType();
while (const ConstantArrayType *AT =
getContext().getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() == 0)
+ if (AT->isZeroSize())
return false;
FT = AT->getElementType();
}
diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp
index 2b20d5a13346..3e34d82cb399 100644
--- a/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -187,7 +187,7 @@ CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
Address NextPtr =
CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
- CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+ CGF.Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr);
// If the argument is smaller than a slot, and this is a big-endian
// target, the argument will be right-adjusted in its slot.
@@ -239,8 +239,8 @@ Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
const llvm::Twine &Name) {
assert(Addr1.getType() == Addr2.getType());
llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
- PHI->addIncoming(Addr1.getPointer(), Block1);
- PHI->addIncoming(Addr2.getPointer(), Block2);
+ PHI->addIncoming(Addr1.emitRawPointer(CGF), Block1);
+ PHI->addIncoming(Addr2.emitRawPointer(CGF), Block2);
CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
return Address(PHI, Addr1.getElementType(), Align);
}
@@ -257,7 +257,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool WasArray = false;
if (AllowArrays)
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize() == 0)
+ if (AT->isZeroSize())
return true;
FT = AT->getElementType();
// The [[no_unique_address]] special case below does not apply to
@@ -352,7 +352,7 @@ const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
// Treat single element arrays as the element.
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
+ if (AT->getZExtSize() != 1)
break;
FT = AT->getElementType();
}
@@ -400,7 +400,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
llvm::Value *Addr =
- CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
+ CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF), BaseTy);
return Address(Addr, ElementTy, TyAlignForABI);
} else {
assert((AI.isDirect() || AI.isExtend()) &&
@@ -416,7 +416,7 @@ Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
Address Temp = CGF.CreateMemTemp(Ty, "varet");
- Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
+ Val = CGF.Builder.CreateVAArg(VAListAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Ty));
CGF.Builder.CreateStore(Val, Temp);
return Temp;
diff --git a/clang/lib/CodeGen/Address.h b/clang/lib/CodeGen/Address.h
index cf48df8f5e73..35ec370a139c 100644
--- a/clang/lib/CodeGen/Address.h
+++ b/clang/lib/CodeGen/Address.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/MathExtras.h"
@@ -22,28 +23,41 @@
namespace clang {
namespace CodeGen {
+class Address;
+class CGBuilderTy;
+class CodeGenFunction;
+class CodeGenModule;
+
// Indicates whether a pointer is known not to be null.
enum KnownNonNull_t { NotKnownNonNull, KnownNonNull };
-/// An aligned address.
-class Address {
+/// An abstract representation of an aligned address. This is designed to be an
+/// IR-level abstraction, carrying just the information necessary to perform IR
+/// operations on an address like loads and stores. In particular, it doesn't
+/// carry C type information or allow the representation of things like
+/// bit-fields; clients working at that level should generally be using
+/// `LValue`.
+/// The pointer contained in this class is known to be unsigned.
+class RawAddress {
llvm::PointerIntPair<llvm::Value *, 1, bool> PointerAndKnownNonNull;
llvm::Type *ElementType;
CharUnits Alignment;
protected:
- Address(std::nullptr_t) : ElementType(nullptr) {}
+ RawAddress(std::nullptr_t) : ElementType(nullptr) {}
public:
- Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
- KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ RawAddress(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
: PointerAndKnownNonNull(Pointer, IsKnownNonNull),
ElementType(ElementType), Alignment(Alignment) {
assert(Pointer != nullptr && "Pointer cannot be null");
assert(ElementType != nullptr && "Element type cannot be null");
}
- static Address invalid() { return Address(nullptr); }
+ inline RawAddress(Address Addr);
+
+ static RawAddress invalid() { return RawAddress(nullptr); }
bool isValid() const {
return PointerAndKnownNonNull.getPointer() != nullptr;
}
@@ -80,6 +94,133 @@ public:
return Alignment;
}
+ /// Return address with different element type, but same pointer and
+ /// alignment.
+ RawAddress withElementType(llvm::Type *ElemTy) const {
+ return RawAddress(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
+ }
+
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
+ }
+};
+
+/// Like RawAddress, an abstract representation of an aligned address, but the
+/// pointer contained in this class is possibly signed.
+class Address {
+ friend class CGBuilderTy;
+
+ // The boolean flag indicates whether the pointer is known to be non-null.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Pointer;
+
+ /// The expected IR type of the pointer. Carrying accurate element type
+ /// information in Address makes it more convenient to work with Address
+ /// values and allows frontend assertions to catch simple mistakes.
+ llvm::Type *ElementType = nullptr;
+
+ CharUnits Alignment;
+
+ /// Offset from the base pointer.
+ llvm::Value *Offset = nullptr;
+
+ llvm::Value *emitRawPointerSlow(CodeGenFunction &CGF) const;
+
+protected:
+ Address(std::nullptr_t) : ElementType(nullptr) {}
+
+public:
+ Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : Pointer(pointer, IsKnownNonNull), ElementType(elementType),
+ Alignment(alignment) {
+ assert(pointer != nullptr && "Pointer cannot be null");
+ assert(elementType != nullptr && "Element type cannot be null");
+ assert(!alignment.isZero() && "Alignment cannot be zero");
+ }
+
+ Address(llvm::Value *BasePtr, llvm::Type *ElementType, CharUnits Alignment,
+ llvm::Value *Offset, KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : Pointer(BasePtr, IsKnownNonNull), ElementType(ElementType),
+ Alignment(Alignment), Offset(Offset) {}
+
+ Address(RawAddress RawAddr)
+ : Pointer(RawAddr.isValid() ? RawAddr.getPointer() : nullptr),
+ ElementType(RawAddr.isValid() ? RawAddr.getElementType() : nullptr),
+ Alignment(RawAddr.isValid() ? RawAddr.getAlignment()
+ : CharUnits::Zero()) {}
+
+ static Address invalid() { return Address(nullptr); }
+ bool isValid() const { return Pointer.getPointer() != nullptr; }
+
+ /// This function is used in situations where the caller is doing some sort of
+ /// opaque "laundering" of the pointer.
+ void replaceBasePointer(llvm::Value *P) {
+ assert(isValid() && "pointer isn't valid");
+ assert(P->getType() == Pointer.getPointer()->getType() &&
+ "Pointer's type changed");
+ Pointer.setPointer(P);
+ assert(isValid() && "pointer is invalid after replacement");
+ }
+
+ CharUnits getAlignment() const { return Alignment; }
+
+ void setAlignment(CharUnits Value) { Alignment = Value; }
+
+ llvm::Value *getBasePointer() const {
+ assert(isValid() && "pointer isn't valid");
+ return Pointer.getPointer();
+ }
+
+ /// Return the type of the pointer value.
+ llvm::PointerType *getType() const {
+ return llvm::PointerType::get(
+ ElementType,
+ llvm::cast<llvm::PointerType>(Pointer.getPointer()->getType())
+ ->getAddressSpace());
+ }
+
+ /// Return the type of the values stored in this address.
+ llvm::Type *getElementType() const {
+ assert(isValid());
+ return ElementType;
+ }
+
+ /// Return the address space that this address resides in.
+ unsigned getAddressSpace() const { return getType()->getAddressSpace(); }
+
+ /// Return the IR name of the pointer value.
+ llvm::StringRef getName() const { return Pointer.getPointer()->getName(); }
+
+ // This function is called only in CGBuilderBaseTy::CreateElementBitCast.
+ void setElementType(llvm::Type *Ty) {
+ assert(hasOffset() &&
+ "this funcion shouldn't be called when there is no offset");
+ ElementType = Ty;
+ }
+
+ /// Whether the pointer is known not to be null.
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)Pointer.getInt();
+ }
+
+ Address setKnownNonNull() {
+ assert(isValid());
+ Pointer.setInt(KnownNonNull);
+ return *this;
+ }
+
+ bool hasOffset() const { return Offset; }
+
+ llvm::Value *getOffset() const { return Offset; }
+
+ /// Return the pointer contained in this class after authenticating it and
+ /// adding offset to it if necessary.
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const {
+ return getBasePointer();
+ }
+
/// Return address with different pointer, but same element type and
/// alignment.
Address withPointer(llvm::Value *NewPointer,
@@ -91,61 +232,59 @@ public:
/// Return address with different alignment, but same pointer and element
/// type.
Address withAlignment(CharUnits NewAlignment) const {
- return Address(getPointer(), getElementType(), NewAlignment,
+ return Address(Pointer.getPointer(), getElementType(), NewAlignment,
isKnownNonNull());
}
/// Return address with different element type, but same pointer and
/// alignment.
Address withElementType(llvm::Type *ElemTy) const {
- return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
- }
-
- /// Whether the pointer is known not to be null.
- KnownNonNull_t isKnownNonNull() const {
- assert(isValid());
- return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
- }
-
- /// Set the non-null bit.
- Address setKnownNonNull() {
- assert(isValid());
- PointerAndKnownNonNull.setInt(true);
- return *this;
+ if (!hasOffset())
+ return Address(getBasePointer(), ElemTy, getAlignment(), nullptr,
+ isKnownNonNull());
+ Address A(*this);
+ A.ElementType = ElemTy;
+ return A;
}
};
+inline RawAddress::RawAddress(Address Addr)
+ : PointerAndKnownNonNull(Addr.isValid() ? Addr.getBasePointer() : nullptr,
+ Addr.isValid() ? Addr.isKnownNonNull()
+ : NotKnownNonNull),
+ ElementType(Addr.isValid() ? Addr.getElementType() : nullptr),
+ Alignment(Addr.isValid() ? Addr.getAlignment() : CharUnits::Zero()) {}
+
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
-class ConstantAddress : public Address {
- ConstantAddress(std::nullptr_t) : Address(nullptr) {}
+class ConstantAddress : public RawAddress {
+ ConstantAddress(std::nullptr_t) : RawAddress(nullptr) {}
public:
ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType,
CharUnits alignment)
- : Address(pointer, elementType, alignment) {}
+ : RawAddress(pointer, elementType, alignment) {}
static ConstantAddress invalid() {
return ConstantAddress(nullptr);
}
llvm::Constant *getPointer() const {
- return llvm::cast<llvm::Constant>(Address::getPointer());
+ return llvm::cast<llvm::Constant>(RawAddress::getPointer());
}
ConstantAddress withElementType(llvm::Type *ElemTy) const {
return ConstantAddress(getPointer(), ElemTy, getAlignment());
}
- static bool isaImpl(Address addr) {
+ static bool isaImpl(RawAddress addr) {
return llvm::isa<llvm::Constant>(addr.getPointer());
}
- static ConstantAddress castImpl(Address addr) {
+ static ConstantAddress castImpl(RawAddress addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
addr.getElementType(), addr.getAlignment());
}
};
-
}
// Present a minimal LLVM-like casting interface.
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index fb03d013e8af..56198385de9d 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -80,7 +80,7 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.alignTo(lvalue.getAlignment()));
- llvm::Value *BitFieldPtr = lvalue.getBitFieldPointer();
+ llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
@@ -139,13 +139,13 @@ namespace {
const LValue &getAtomicLValue() const { return LVal; }
llvm::Value *getAtomicPointer() const {
if (LVal.isSimple())
- return LVal.getPointer(CGF);
+ return LVal.emitRawPointer(CGF);
else if (LVal.isBitField())
- return LVal.getBitFieldPointer();
+ return LVal.getRawBitFieldPointer(CGF);
else if (LVal.isVectorElt())
- return LVal.getVectorPointer();
+ return LVal.getRawVectorPointer(CGF);
assert(LVal.isExtVectorElt());
- return LVal.getExtVectorPointer();
+ return LVal.getRawExtVectorPointer(CGF);
}
Address getAtomicAddress() const {
llvm::Type *ElTy;
@@ -368,7 +368,7 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
return false;
CGF.Builder.CreateMemSet(
- addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
@@ -1055,7 +1055,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
};
- Args.add(RValue::get(CastToGenericAddrSpace(Ptr.getPointer(),
+
+ Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
E->getPtr()->getType())),
getContext().VoidPtrTy);
@@ -1086,10 +1087,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
E->getVal1()->getType())),
getContext().VoidPtrTy);
- Args.add(RValue::get(CastToGenericAddrSpace(Val2.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
E->getVal2()->getType())),
getContext().VoidPtrTy);
Args.add(RValue::get(Order), getContext().IntTy);
@@ -1105,7 +1106,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__scoped_atomic_exchange:
case AtomicExpr::AO__scoped_atomic_exchange_n:
LibCallName = "__atomic_exchange";
- Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
E->getVal1()->getType())),
getContext().VoidPtrTy);
break;
@@ -1120,7 +1121,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
- Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
E->getVal1()->getType())),
getContext().VoidPtrTy);
break;
@@ -1199,7 +1200,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (!HaveRetTy) {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(CastToGenericAddrSpace(Dest.getPointer(), RetTy)),
+ Args.add(RValue::get(
+ CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
getContext().VoidPtrTy);
}
// Order is always the last parameter.
@@ -1513,7 +1515,7 @@ RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
} else
TempAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
// Okay, turn that back into the original value or whole atomic (for
// non-simple lvalues) type.
@@ -1673,9 +1675,9 @@ std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
if (shouldUseLibcall()) {
// Produce a source address.
Address ExpectedAddr = materializeRValue(Expected);
- Address DesiredAddr = materializeRValue(Desired);
- auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
+ auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
Success, Failure);
return std::make_pair(
convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
@@ -1757,7 +1759,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
@@ -1771,10 +1773,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall(
AggValueSlot::ignored(),
SourceLocation(), /*AsValue=*/false);
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
- AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1843,7 +1845,7 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
Address ExpectedAddr = CreateTempAlloca();
- EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
+ EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
auto *ContBB = CGF.createBasicBlock("atomic_cont");
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
CGF.EmitBlock(ContBB);
@@ -1854,10 +1856,10 @@ void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
CGF.Builder.CreateStore(OldVal, DesiredAddr);
}
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
+ llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
+ llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
auto *Res =
- EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
- DesiredAddr.getPointer(),
- AO, Failure);
+ EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -1957,7 +1959,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
- args.add(RValue::get(srcAddr.getPointer()), getContext().VoidPtrTy);
+ args.add(RValue::get(srcAddr.emitRawPointer(*this)),
+ getContext().VoidPtrTy);
args.add(
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
getContext().IntTy);
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index ad0b50d79961..a01f2c7c9798 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -36,7 +36,8 @@ CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
NoEscape(false), HasCXXObject(false), UsesStret(false),
HasCapturedVariableLayout(false), CapturesNonExternalType(false),
- LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) {
+ LocalAddress(RawAddress::invalid()), StructureType(nullptr),
+ Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -794,7 +795,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Otherwise, we have to emit this as a local block.
- Address blockAddr = blockInfo.LocalAddress;
+ RawAddress blockAddr = blockInfo.LocalAddress;
assert(blockAddr.isValid() && "block has no address!");
llvm::Constant *isa;
@@ -939,7 +940,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (CI.isNested())
byrefPointer = Builder.CreateLoad(src, "byref.capture");
else
- byrefPointer = src.getPointer();
+ byrefPointer = src.emitRawPointer(*this);
// Write that void* into the capture field.
Builder.CreateStore(byrefPointer, blockField);
@@ -961,10 +962,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
}
// If it's a reference variable, copy the reference into the block field.
- } else if (type->isReferenceType()) {
- Builder.CreateStore(src.getPointer(), blockField);
+ } else if (auto refType = type->getAs<ReferenceType>()) {
+ Builder.CreateStore(src.emitRawPointer(*this), blockField);
- // If type is const-qualified, copy the value into the block field.
+ // If type is const-qualified, copy the value into the block field.
} else if (type.isConstQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Strong &&
CGM.getCodeGenOpts().OptimizationLevel != 0) {
@@ -1377,7 +1378,7 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
// Allocate a stack slot like for any local variable to guarantee optimal
// debug info at -O0. The mem2reg pass will eliminate it when optimizing.
- Address alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
+ RawAddress alloc = CreateMemTemp(D->getType(), D->getName() + ".addr");
Builder.CreateStore(arg, alloc);
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
@@ -1497,7 +1498,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
// frame setup instruction by llvm::DwarfDebug::beginFunction().
auto NL = ApplyDebugLocation::CreateEmpty(*this);
Builder.CreateStore(BlockPointer, Alloca);
- BlockPointerDbgLoc = Alloca.getPointer();
+ BlockPointerDbgLoc = Alloca.emitRawPointer(*this);
}
// If we have a C++ 'this' reference, go ahead and force it into
@@ -1557,8 +1558,8 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
auto addr = LocalDeclMap.find(variable)->second;
- (void)DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
- Builder);
+ (void)DI->EmitDeclareOfAutoVariable(
+ variable, addr.emitRawPointer(*this), Builder);
continue;
}
@@ -1662,7 +1663,7 @@ struct CallBlockRelease final : EHScopeStack::Cleanup {
if (LoadBlockVarAddr) {
BlockVarAddr = CGF.Builder.CreateLoad(Addr);
} else {
- BlockVarAddr = Addr.getPointer();
+ BlockVarAddr = Addr.emitRawPointer(CGF);
}
CGF.BuildBlockRelease(BlockVarAddr, FieldFlags, CanThrow);
@@ -1962,13 +1963,15 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// it. It's not quite worth the annoyance to avoid creating it in the
// first place.
if (!needsEHCleanup(captureType.isDestructedType()))
- cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
+ if (auto *I =
+ cast_or_null<llvm::Instruction>(dstField.getBasePointer()))
+ I->eraseFromParent();
}
break;
}
case BlockCaptureEntityKind::BlockObject: {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- llvm::Value *dstAddr = dstField.getPointer();
+ llvm::Value *dstAddr = dstField.emitRawPointer(*this);
llvm::Value *args[] = {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
@@ -2139,7 +2142,7 @@ public:
llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
llvm::FunctionCallee fn = CGF.CGM.getBlockObjectAssign();
- llvm::Value *args[] = { destField.getPointer(), srcValue, flagsVal };
+ llvm::Value *args[] = {destField.emitRawPointer(CGF), srcValue, flagsVal};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -2696,7 +2699,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
storeHeaderField(V, getPointerSize(), "byref.isa");
// Store the address of the variable into its own forwarding pointer.
- storeHeaderField(addr.getPointer(), getPointerSize(), "byref.forwarding");
+ storeHeaderField(addr.emitRawPointer(*this), getPointerSize(),
+ "byref.forwarding");
// Blocks ABI:
// c) the flags field is set to either 0 if no helper functions are
diff --git a/clang/lib/CodeGen/CGBlocks.h b/clang/lib/CodeGen/CGBlocks.h
index 4ef1ae9f3365..8d10c4f69b20 100644
--- a/clang/lib/CodeGen/CGBlocks.h
+++ b/clang/lib/CodeGen/CGBlocks.h
@@ -271,7 +271,8 @@ public:
/// The block's captures. Non-constant captures are sorted by their offsets.
llvm::SmallVector<Capture, 4> SortedCaptures;
- Address LocalAddress;
+ // Currently we assume that block-pointer types are never signed.
+ RawAddress LocalAddress;
llvm::StructType *StructureType;
const BlockDecl *Block;
const BlockExpr *BlockExpression;
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
index bf5ab171d720..6dd9da7c4cad 100644
--- a/clang/lib/CodeGen/CGBuilder.h
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -10,7 +10,9 @@
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "Address.h"
+#include "CGValue.h"
#include "CodeGenTypeCache.h"
+#include "llvm/Analysis/Utils/Local.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
@@ -18,12 +20,15 @@
namespace clang {
namespace CodeGen {
+class CGBuilderTy;
class CodeGenFunction;
/// This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
class CGBuilderInserter final : public llvm::IRBuilderDefaultInserter {
+ friend CGBuilderTy;
+
public:
CGBuilderInserter() = default;
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
@@ -43,10 +48,42 @@ typedef llvm::IRBuilder<llvm::ConstantFolder, CGBuilderInserterTy>
CGBuilderBaseTy;
class CGBuilderTy : public CGBuilderBaseTy {
+ friend class Address;
+
/// Storing a reference to the type cache here makes it a lot easier
/// to build natural-feeling, target-specific IR.
const CodeGenTypeCache &TypeCache;
+ CodeGenFunction *getCGF() const { return getInserter().CGF; }
+
+ llvm::Value *emitRawPointerFromAddress(Address Addr) const {
+ return Addr.getBasePointer();
+ }
+
+ template <bool IsInBounds>
+ Address createConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
+ const llvm::Twine &Name) {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ llvm::GetElementPtrInst *GEP;
+ if (IsInBounds)
+ GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1,
+ Name));
+ else
+ GEP = cast<llvm::GetElementPtrInst>(CreateConstGEP2_32(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1,
+ Name));
+ llvm::APInt Offset(
+ DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
+ /*isSigned=*/true);
+ if (!GEP->accumulateConstantOffset(DL, Offset))
+ llvm_unreachable("offset of GEP with constants is always computable");
+ return Address(GEP, GEP->getResultElementType(),
+ Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())),
+ IsInBounds ? Addr.isKnownNonNull() : NotKnownNonNull);
+ }
+
public:
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
: CGBuilderBaseTy(C), TypeCache(TypeCache) {}
@@ -69,20 +106,22 @@ public:
// Note that we intentionally hide the CreateLoad APIs that don't
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
- Addr.getAlignment().getAsAlign(), IsVolatile,
- Name);
+ return CreateAlignedLoad(
+ Addr.getElementType(), emitRawPointerFromAddress(Addr),
+ Addr.getAlignment().getAsAlign(), IsVolatile, Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
@@ -96,7 +135,7 @@ public:
// take an alignment.
llvm::StoreInst *CreateStore(llvm::Value *Val, Address Addr,
bool IsVolatile = false) {
- return CreateAlignedStore(Val, Addr.getPointer(),
+ return CreateAlignedStore(Val, emitRawPointerFromAddress(Addr),
Addr.getAlignment().getAsAlign(), IsVolatile);
}
@@ -132,33 +171,41 @@ public:
llvm::AtomicOrdering FailureOrdering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
return CGBuilderBaseTy::CreateAtomicCmpXchg(
- Addr.getPointer(), Cmp, New, Addr.getAlignment().getAsAlign(),
- SuccessOrdering, FailureOrdering, SSID);
+ Addr.emitRawPointer(*getCGF()), Cmp, New,
+ Addr.getAlignment().getAsAlign(), SuccessOrdering, FailureOrdering,
+ SSID);
}
llvm::AtomicRMWInst *
CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
llvm::AtomicOrdering Ordering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
- return CGBuilderBaseTy::CreateAtomicRMW(Op, Addr.getPointer(), Val,
- Addr.getAlignment().getAsAlign(),
- Ordering, SSID);
+ return CGBuilderBaseTy::CreateAtomicRMW(
+ Op, Addr.emitRawPointer(*getCGF()), Val,
+ Addr.getAlignment().getAsAlign(), Ordering, SSID);
}
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
- return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
- Addr.isKnownNonNull());
+ if (!Addr.hasOffset())
+ return Address(CreateAddrSpaceCast(Addr.getBasePointer(), Ty, Name),
+ ElementTy, Addr.getAlignment(), nullptr,
+ Addr.isKnownNonNull());
+ // Eagerly force a raw address if these is an offset.
+ return RawAddress(
+ CreateAddrSpaceCast(Addr.emitRawPointer(*getCGF()), Ty, Name),
+ ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
- llvm::Value *Ptr =
- CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
- return Address(Ptr, ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
+ if (Addr.getType()->getAddressSpace() == Ty->getPointerAddressSpace())
+ return Addr.withElementType(ElementTy);
+ return CreateAddrSpaceCast(Addr, Ty, ElementTy, Name);
}
/// Given
@@ -176,10 +223,11 @@ public:
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(
- CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
- ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset), Addr.isKnownNonNull());
+ return Address(CreateStructGEP(Addr.getElementType(), Addr.getBasePointer(),
+ Index, Name),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
/// Given
@@ -198,7 +246,7 @@ public:
CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy->getElementType()));
return Address(
- CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
ElTy->getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize),
@@ -216,10 +264,10 @@ public:
const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy));
- return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Index), Name),
- ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
- Addr.isKnownNonNull());
+ return Address(
+ CreateInBoundsGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name),
+ ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -229,110 +277,133 @@ public:
/// where i64 is actually the target word size.
Address CreateConstGEP(Address Addr, uint64_t Index,
const llvm::Twine &Name = "") {
+ llvm::Type *ElTy = Addr.getElementType();
const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
- CharUnits EltSize =
- CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+ CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy));
- return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Index), Name),
+ return Address(CreateGEP(ElTy, Addr.getBasePointer(), getSize(Index), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize),
- NotKnownNonNull);
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
/// Create GEP with single dynamic index. The address alignment is reduced
/// according to the element size.
using CGBuilderBaseTy::CreateGEP;
- Address CreateGEP(Address Addr, llvm::Value *Index,
+ Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index,
const llvm::Twine &Name = "") {
const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
CharUnits EltSize =
CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
return Address(
- CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ CreateGEP(Addr.getElementType(), Addr.emitRawPointer(CGF), Index, Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentOfArrayElement(EltSize), NotKnownNonNull);
+ Addr.getAlignment().alignmentOfArrayElement(EltSize));
}
/// Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
- getSize(Offset), Name),
- Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset),
- Addr.isKnownNonNull());
+ return Address(
+ CreateInBoundsGEP(Addr.getElementType(), Addr.getBasePointer(),
+ getSize(Offset), Name),
+ Addr.getElementType(), Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
+
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ return Address(CreateGEP(Addr.getElementType(), Addr.getBasePointer(),
getSize(Offset), Name),
Addr.getElementType(),
- Addr.getAlignment().alignmentAtOffset(Offset),
- NotKnownNonNull);
+ Addr.getAlignment().alignmentAtOffset(Offset));
}
using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
const llvm::Twine &Name = "") {
- const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ return createConstGEP2_32<true>(Addr, Idx0, Idx1, Name);
+ }
- auto *GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
- Addr.getElementType(), Addr.getPointer(), Idx0, Idx1, Name));
- llvm::APInt Offset(
- DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
- /*isSigned=*/true);
- if (!GEP->accumulateConstantOffset(DL, Offset))
- llvm_unreachable("offset of GEP with constants is always computable");
- return Address(GEP, GEP->getResultElementType(),
- Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())),
- Addr.isKnownNonNull());
+ using CGBuilderBaseTy::CreateConstGEP2_32;
+ Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1,
+ const llvm::Twine &Name = "") {
+ return createConstGEP2_32<false>(Addr, Idx0, Idx1, Name);
+ }
+
+ Address CreateGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *ElementType, CharUnits Align,
+ const Twine &Name = "") {
+ llvm::Value *Ptr = emitRawPointerFromAddress(Addr);
+ return RawAddress(CreateGEP(Addr.getElementType(), Ptr, IdxList, Name),
+ ElementType, Align);
+ }
+
+ using CGBuilderBaseTy::CreateInBoundsGEP;
+ Address CreateInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *ElementType, CharUnits Align,
+ const Twine &Name = "") {
+ return RawAddress(CreateInBoundsGEP(Addr.getElementType(),
+ emitRawPointerFromAddress(Addr),
+ IdxList, Name),
+ ElementType, Align, Addr.isKnownNonNull());
+ }
+
+ using CGBuilderBaseTy::CreateIsNull;
+ llvm::Value *CreateIsNull(Address Addr, const Twine &Name = "") {
+ if (!Addr.hasOffset())
+ return CreateIsNull(Addr.getBasePointer(), Name);
+ // The pointer isn't null if Addr has an offset since offsets can always
+ // be applied inbound.
+ return llvm::ConstantInt::getFalse(Context);
}
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(), Size,
- IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
bool IsVolatile = false) {
- return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(), Size,
- IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpy(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemCpyInline;
llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) {
- return CreateMemCpyInline(
- Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(),
- Src.getAlignment().getAsAlign(), getInt64(Size));
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemCpyInline(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), getInt64(Size));
}
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- return CreateMemMove(Dest.getPointer(), Dest.getAlignment().getAsAlign(),
- Src.getPointer(), Src.getAlignment().getAsAlign(),
- Size, IsVolatile);
+ llvm::Value *DestPtr = emitRawPointerFromAddress(Dest);
+ llvm::Value *SrcPtr = emitRawPointerFromAddress(Src);
+ return CreateMemMove(DestPtr, Dest.getAlignment().getAsAlign(), SrcPtr,
+ Src.getAlignment().getAsAlign(), Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemSet;
llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value,
llvm::Value *Size, bool IsVolatile = false) {
- return CreateMemSet(Dest.getPointer(), Value, Size,
+ return CreateMemSet(emitRawPointerFromAddress(Dest), Value, Size,
Dest.getAlignment().getAsAlign(), IsVolatile);
}
using CGBuilderBaseTy::CreateMemSetInline;
llvm::CallInst *CreateMemSetInline(Address Dest, llvm::Value *Value,
uint64_t Size) {
- return CreateMemSetInline(Dest.getPointer(),
+ return CreateMemSetInline(emitRawPointerFromAddress(Dest),
Dest.getAlignment().getAsAlign(), Value,
getInt64(Size));
}
@@ -346,16 +417,31 @@ public:
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(),
- Index, FieldIndex, DbgInfo),
- ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ return Address(
+ CreatePreserveStructAccessIndex(ElTy, emitRawPointerFromAddress(Addr),
+ Index, FieldIndex, DbgInfo),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset));
+ }
+
+ using CGBuilderBaseTy::CreatePreserveUnionAccessIndex;
+ Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex,
+ llvm::MDNode *DbgInfo) {
+ Addr.replaceBasePointer(CreatePreserveUnionAccessIndex(
+ Addr.getBasePointer(), FieldIndex, DbgInfo));
+ return Addr;
}
using CGBuilderBaseTy::CreateLaunderInvariantGroup;
Address CreateLaunderInvariantGroup(Address Addr) {
- return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()),
- Addr.isKnownNonNull());
+ Addr.replaceBasePointer(CreateLaunderInvariantGroup(Addr.getBasePointer()));
+ return Addr;
+ }
+
+ using CGBuilderBaseTy::CreateStripInvariantGroup;
+ Address CreateStripInvariantGroup(Address Addr) {
+ Addr.replaceBasePointer(CreateStripInvariantGroup(Addr.getBasePointer()));
+ return Addr;
}
};
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 77cb269d43c5..bb007231c0b7 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -792,7 +792,8 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
- return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
+ return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
+ ArgValue);
}
/// Checks if using the result of __builtin_object_size(p, @p From) in place of
@@ -1130,8 +1131,92 @@ struct BitTest {
static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
};
+
+// Returns the first convergence entry/loop/anchor instruction found in |BB|.
+// std::nullptr otherwise.
+llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
+ for (auto &I : *BB) {
+ auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
+ if (II && isConvergenceControlIntrinsic(II->getIntrinsicID()))
+ return II;
+ }
+ return nullptr;
+}
+
} // namespace
+llvm::CallBase *
+CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
+ llvm::Value *ParentToken) {
+ llvm::Value *bundleArgs[] = {ParentToken};
+ llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
+ auto Output = llvm::CallBase::addOperandBundle(
+ Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
+ Input->replaceAllUsesWith(Output);
+ Input->eraseFromParent();
+ return Output;
+}
+
+llvm::IntrinsicInst *
+CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
+ llvm::Value *ParentToken) {
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(&BB->front());
+ auto CB = Builder.CreateIntrinsic(
+ llvm::Intrinsic::experimental_convergence_loop, {}, {});
+ Builder.restoreIP(IP);
+
+ auto I = addConvergenceControlToken(CB, ParentToken);
+ return cast<llvm::IntrinsicInst>(I);
+}
+
+llvm::IntrinsicInst *
+CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
+ auto *BB = &F->getEntryBlock();
+ auto *token = getConvergenceToken(BB);
+ if (token)
+ return token;
+
+ // Adding a convergence token requires the function to be marked as
+ // convergent.
+ F->setConvergent();
+
+ CGBuilderTy::InsertPoint IP = Builder.saveIP();
+ Builder.SetInsertPoint(&BB->front());
+ auto I = Builder.CreateIntrinsic(
+ llvm::Intrinsic::experimental_convergence_entry, {}, {});
+ assert(isa<llvm::IntrinsicInst>(I));
+ Builder.restoreIP(IP);
+
+ return cast<llvm::IntrinsicInst>(I);
+}
+
+llvm::IntrinsicInst *
+CodeGenFunction::getOrEmitConvergenceLoopToken(const LoopInfo *LI) {
+ assert(LI != nullptr);
+
+ auto *token = getConvergenceToken(LI->getHeader());
+ if (token)
+ return token;
+
+ llvm::IntrinsicInst *PII =
+ LI->getParent()
+ ? emitConvergenceLoopToken(
+ LI->getHeader(), getOrEmitConvergenceLoopToken(LI->getParent()))
+ : getOrEmitConvergenceEntryToken(LI->getHeader()->getParent());
+
+ return emitConvergenceLoopToken(LI->getHeader(), PII);
+}
+
+llvm::CallBase *
+CodeGenFunction::addControlledConvergenceToken(llvm::CallBase *Input) {
+ llvm::Value *ParentToken =
+ LoopStack.hasInfo()
+ ? getOrEmitConvergenceLoopToken(&LoopStack.getInfo())
+ : getOrEmitConvergenceEntryToken(Input->getFunction());
+ return addConvergenceControlToken(Input, ParentToken);
+}
+
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
// Main portable variants.
@@ -2116,9 +2201,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
- Address BufAddr =
- Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
- BufferAlignment);
+ Address BufAddr = makeNaturalAddressForPointer(
+ Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
+ BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
@@ -2161,7 +2246,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
// Ignore argument 1, the format string. It is not currently used.
CallArgList Args;
- Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
+ Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
for (const auto &Item : Layout.Items) {
int Size = Item.getSizeByte();
@@ -2201,8 +2286,8 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
if (!isa<Constant>(ArgVal)) {
CleanupKind Cleanup = getARCCleanupKind();
QualType Ty = TheExpr->getType();
- Address Alloca = Address::invalid();
- Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
+ RawAddress Alloca = RawAddress::invalid();
+ RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
ArgVal = EmitARCRetain(Ty, ArgVal);
Builder.CreateStore(ArgVal, Addr);
pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
@@ -2235,7 +2320,7 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
- return RValue::get(BufAddr.getPointer());
+ return RValue::get(BufAddr, *this);
}
static bool isSpecialUnsignedMultiplySignedResult(
@@ -2983,7 +3068,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Check NonnullAttribute/NullabilityArg and Alignment.
auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
unsigned ParmNum) {
- Value *Val = A.getPointer();
+ Value *Val = A.emitRawPointer(*this);
EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
ParmNum);
@@ -3012,13 +3097,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_va_end:
EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0)).getPointer(),
+ : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
BuiltinID != Builtin::BI__builtin_va_end);
return RValue::get(nullptr);
case Builtin::BI__builtin_va_copy: {
- Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
- Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), {DstPtr, SrcPtr});
+ Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
+ Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
+ {DstPtr, SrcPtr});
return RValue::get(nullptr);
}
case Builtin::BIabs:
@@ -3128,36 +3214,66 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
- case Builtin::BI__builtin_ctzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
+ case Builtin::BI__builtin_ctzll:
+ case Builtin::BI__builtin_ctzg: {
+ bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg &&
+ E->getNumArgs() > 1;
+
+ Value *ArgValue =
+ HasFallback ? EmitScalarExpr(E->getArg(0))
+ : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *ZeroUndef =
+ Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
- return RValue::get(Result);
+ if (!HasFallback)
+ return RValue::get(Result);
+
+ Value *Zero = Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *FallbackValue = EmitScalarExpr(E->getArg(1));
+ Value *ResultOrFallback =
+ Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
+ return RValue::get(ResultOrFallback);
}
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
- case Builtin::BI__builtin_clzll: {
- Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
+ case Builtin::BI__builtin_clzll:
+ case Builtin::BI__builtin_clzg: {
+ bool HasFallback = BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg &&
+ E->getNumArgs() > 1;
+
+ Value *ArgValue =
+ HasFallback ? EmitScalarExpr(E->getArg(0))
+ : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
+ Value *ZeroUndef =
+ Builder.getInt1(HasFallback || getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
- return RValue::get(Result);
+ if (!HasFallback)
+ return RValue::get(Result);
+
+ Value *Zero = Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *FallbackValue = EmitScalarExpr(E->getArg(1));
+ Value *ResultOrFallback =
+ Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
+ return RValue::get(ResultOrFallback);
}
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
@@ -3817,13 +3933,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
Address Src = EmitPointerWithAlignment(E->getArg(0));
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
Value *Result = MB.CreateColumnMajorLoad(
- Src.getElementType(), Src.getPointer(),
+ Src.getElementType(), Src.emitRawPointer(*this),
Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
- ResultTy->getNumRows(), ResultTy->getNumColumns(),
- "matrix");
+ ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
return RValue::get(Result);
}
@@ -3838,11 +3954,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
assert(PtrTy && "arg1 must be of pointer type");
bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
- EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Dst.emitRawPointer(*this)),
+ E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
+ 0);
Value *Result = MB.CreateColumnMajorStore(
- Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
- Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
+ Matrix, Dst.emitRawPointer(*this),
+ Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
+ MatrixTy->getNumRows(), MatrixTy->getNumColumns());
return RValue::get(Result);
}
@@ -4001,7 +4119,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
return RValue::get(nullptr);
@@ -4012,10 +4130,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(0));
Address Dest = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
+ EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)),
+ E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
+ 0);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(nullptr);
}
@@ -4032,10 +4152,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
if (BuiltinID == Builtin::BImempcpy ||
BuiltinID == Builtin::BI__builtin_mempcpy)
- return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
- Dest.getPointer(), SizeVal));
+ return RValue::get(Builder.CreateInBoundsGEP(
+ Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
else
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_memcpy_inline: {
@@ -4067,7 +4187,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_objc_memmove_collectable: {
@@ -4076,7 +4196,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
DestAddr, SrcAddr, SizeVal);
- return RValue::get(DestAddr.getPointer());
+ return RValue::get(DestAddr, *this);
}
case Builtin::BI__builtin___memmove_chk: {
@@ -4093,7 +4213,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BImemmove:
@@ -4104,7 +4224,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
@@ -4112,10 +4232,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_memset_inline: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -4123,8 +4243,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
uint64_t Size =
E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Dest.emitRawPointer(*this)),
+ E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
+ 0);
Builder.CreateMemSetInline(Dest, ByteVal, Size);
return RValue::get(nullptr);
}
@@ -4143,7 +4264,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
- return RValue::get(Dest.getPointer());
+ return RValue::get(Dest, *this);
}
case Builtin::BI__builtin_wmemchr: {
// The MSVC runtime library does not provide a definition of wmemchr, so we
@@ -4365,14 +4486,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Store the stack pointer to the setjmp buffer.
Value *StackAddr = Builder.CreateStackSave();
- assert(Buf.getPointer()->getType() == StackAddr->getType());
+ assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
+ return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
@@ -5545,7 +5666,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
- llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
+ llvm::Value *Range = NDRangeL.getAddress(*this).emitRawPointer(*this);
llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
if (NumArgs == 4) {
@@ -5654,9 +5775,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
} else {
- EventWaitList = E->getArg(4)->getType()->isArrayType()
- ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
- : EmitScalarExpr(E->getArg(4));
+ EventWaitList =
+ E->getArg(4)->getType()->isArrayType()
+ ? EmitArrayToPointerDecay(E->getArg(4)).emitRawPointer(*this)
+ : EmitScalarExpr(E->getArg(4));
// Convert to generic address space.
EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
}
@@ -5752,7 +5874,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
- llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
+ llvm::Value *NDRange = NDRangeL.getAddress(*this).emitRawPointer(*this);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel =
@@ -5770,7 +5892,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Name),
{NDRange, Kernel, Block}));
}
-
case Builtin::BI__builtin_store_half:
case Builtin::BI__builtin_store_halff: {
Value *Val = EmitScalarExpr(E->getArg(0));
@@ -5837,7 +5958,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0Val->getType()) {
if (Arg0Ty->isArrayType())
- Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
+ Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
else
Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
}
@@ -5875,7 +5996,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1Val->getType()) {
if (Arg1Ty->isArrayType())
- Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
+ Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
else
Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
}
@@ -5889,7 +6010,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_ms_va_end:
return RValue::get(
- EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
+ EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).emitRawPointer(*this),
BuiltinID == Builtin::BI__builtin_ms_va_start));
case Builtin::BI__builtin_ms_va_copy: {
@@ -5931,8 +6052,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E,
- cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
+ return emitLibraryCall(
+ *this, FD, E, cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
// Check that a call to a target specific builtin has the correct target
// features.
@@ -6049,7 +6170,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(nullptr);
return RValue::get(V);
case TEK_Aggregate:
- return RValue::getAggregate(ReturnValue.getValue(),
+ return RValue::getAggregate(ReturnValue.getAddress(),
ReturnValue.isVolatile());
case TEK_Complex:
llvm_unreachable("No current target builtin returns complex");
@@ -8819,7 +8940,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(PtrOp0.getPointer());
+ Ops.push_back(PtrOp0.emitRawPointer(*this));
continue;
}
}
@@ -8846,7 +8967,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
- Ops.push_back(PtrOp1.getPointer());
+ Ops.push_back(PtrOp1.emitRawPointer(*this));
continue;
}
}
@@ -9267,7 +9388,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
if (ReturnValue.isNull())
return MvecOut;
else
- return Builder.CreateStore(MvecOut, ReturnValue.getValue());
+ return Builder.CreateStore(MvecOut, ReturnValue.getAddress());
}
case CustomCodeGen::VST24: {
@@ -11447,7 +11568,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
- Ops.push_back(PtrOp0.getPointer());
+ Ops.push_back(PtrOp0.emitRawPointer(*this));
continue;
}
}
@@ -13313,15 +13434,15 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
if (!getDebugInfo()) {
CGM.Error(E->getExprLoc(),
"using __builtin_preserve_field_info() without -g");
- return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
+ return IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this)
+ : EmitLValue(Arg).emitRawPointer(*this);
}
// Enable underlying preserve_*_access_index() generation.
bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
IsInPreservedAIRegion = true;
- Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
- : EmitLValue(Arg).getPointer(*this);
+ Value *FieldAddr = IsBitField ? EmitLValue(Arg).getRawBitFieldPointer(*this)
+ : EmitLValue(Arg).emitRawPointer(*this);
IsInPreservedAIRegion = OldIsInPreservedAIRegion;
ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
@@ -14313,14 +14434,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
- Address Tmp = CreateMemTemp(E->getArg(0)->getType());
+ RawAddress Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Tmp.getPointer());
}
case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
- Address Tmp = CreateMemTemp(E->getType());
+ RawAddress Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Tmp.getPointer());
return Builder.CreateLoad(Tmp, "stmxcsr");
@@ -17595,7 +17716,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
if (E->getArg(i)->getType()->isArrayType())
- Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
+ Ops.push_back(
+ EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this));
else
Ops.push_back(EmitScalarExpr(E->getArg(i)));
// The first argument of these two builtins is a pointer used to store their
@@ -18036,15 +18158,22 @@ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
return Arg;
}
-Intrinsic::ID getDotProductIntrinsic(QualType QT) {
+Intrinsic::ID getDotProductIntrinsic(QualType QT, int elementCount) {
+ if (QT->hasFloatingRepresentation()) {
+ switch (elementCount) {
+ case 2:
+ return Intrinsic::dx_dot2;
+ case 3:
+ return Intrinsic::dx_dot3;
+ case 4:
+ return Intrinsic::dx_dot4;
+ }
+ }
if (QT->hasSignedIntegerRepresentation())
return Intrinsic::dx_sdot;
- if (QT->hasUnsignedIntegerRepresentation())
- return Intrinsic::dx_udot;
- assert(QT->hasFloatingRepresentation());
- return Intrinsic::dx_dot;
- ;
+ assert(QT->hasUnsignedIntegerRepresentation());
+ return Intrinsic::dx_udot;
}
Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
@@ -18098,8 +18227,7 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
assert(T0->getScalarType() == T1->getScalarType() &&
"Dot product of vectors need the same element types.");
- [[maybe_unused]] auto *VecTy0 =
- E->getArg(0)->getType()->getAs<VectorType>();
+ auto *VecTy0 = E->getArg(0)->getType()->getAs<VectorType>();
[[maybe_unused]] auto *VecTy1 =
E->getArg(1)->getType()->getAs<VectorType>();
// A HLSLVectorTruncation should have happend
@@ -18108,7 +18236,8 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
return Builder.CreateIntrinsic(
/*ReturnType=*/T0->getScalarType(),
- getDotProductIntrinsic(E->getArg(0)->getType()),
+ getDotProductIntrinsic(E->getArg(0)->getType(),
+ VecTy0->getNumElements()),
ArrayRef<Value *>{Op0, Op1}, nullptr, "dx.dot");
} break;
case Builtin::BI__builtin_hlsl_lerp: {
@@ -18178,6 +18307,14 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
/*ReturnType=*/Op0->getType(), Intrinsic::dx_rsqrt,
ArrayRef<Value *>{Op0}, nullptr, "dx.rsqrt");
}
+ case Builtin::BI__builtin_hlsl_wave_get_lane_index: {
+ auto *CI = EmitRuntimeCall(CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, {}, false), "__hlsl_wave_get_lane_index",
+ {}, false, true));
+ if (getTarget().getTriple().isSPIRVLogical())
+ CI = dyn_cast<CallInst>(addControlledConvergenceToken(CI));
+ return CI;
+ }
}
return nullptr;
}
@@ -18501,43 +18638,25 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
}
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16: {
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16: {
- llvm::Type *ArgTy;
+ Intrinsic::ID IID;
switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
- ArgTy = llvm::Type::getInt32Ty(getLLVMContext());
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getLLVMContext()), 2);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getLLVMContext()), 4);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt16Ty(getLLVMContext()), 4);
- break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getHalfTy(getLLVMContext()), 8);
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
+ IID = Intrinsic::amdgcn_global_load_tr_b64;
break;
- case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16:
- ArgTy = llvm::FixedVectorType::get(
- llvm::Type::getInt16Ty(getLLVMContext()), 8);
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v8i16:
+ IID = Intrinsic::amdgcn_global_load_tr_b128;
break;
}
-
+ llvm::Type *LoadTy = ConvertType(E->getType());
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
- llvm::Function *F =
- CGM.getIntrinsic(Intrinsic::amdgcn_global_load_tr, {ArgTy});
+ llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
return Builder.CreateCall(F, {Addr});
}
case AMDGPU::BI__builtin_amdgcn_get_fpenv: {
@@ -20068,14 +20187,14 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
// Save returned values.
assert(II.NumResults);
if (II.NumResults == 1) {
- Builder.CreateAlignedStore(Result, Dst.getPointer(),
+ Builder.CreateAlignedStore(Result, Dst.emitRawPointer(*this),
CharUnits::fromQuantity(4));
} else {
for (unsigned i = 0; i < II.NumResults; ++i) {
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
Dst.getElementType()),
- Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
+ Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
}
@@ -20115,7 +20234,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < II.NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Src.getElementType(),
- Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
+ Builder.CreateGEP(Src.getElementType(), Src.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ParamType));
@@ -20187,7 +20306,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsA; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcA.getElementType(),
- Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
+ Builder.CreateGEP(SrcA.getElementType(), SrcA.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, AType));
@@ -20197,7 +20316,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsB; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcB.getElementType(),
- Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
+ Builder.CreateGEP(SrcB.getElementType(), SrcB.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, BType));
@@ -20208,7 +20327,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
SrcC.getElementType(),
- Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
+ Builder.CreateGEP(SrcC.getElementType(), SrcC.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, CType));
@@ -20218,7 +20337,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < MI.NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
- Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
+ Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
return Result;
@@ -20476,7 +20595,7 @@ struct BuiltinAlignArgs {
BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
QualType AstType = E->getArg(0)->getType();
if (AstType->isArrayType())
- Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
else
Src = CGF.EmitScalarExpr(E->getArg(0));
SrcType = Src->getType();
@@ -21094,7 +21213,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_get: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Function *Callee;
if (E->getType().isWebAssemblyExternrefType())
@@ -21108,7 +21227,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_set: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Value *Val = EmitScalarExpr(E->getArg(2));
Function *Callee;
@@ -21123,13 +21242,13 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_size: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Value = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
return Builder.CreateCall(Callee, Value);
}
case WebAssembly::BI__builtin_wasm_table_grow: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Val = EmitScalarExpr(E->getArg(1));
Value *NElems = EmitScalarExpr(E->getArg(2));
@@ -21146,7 +21265,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_fill: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
Value *Index = EmitScalarExpr(E->getArg(1));
Value *Val = EmitScalarExpr(E->getArg(2));
Value *NElems = EmitScalarExpr(E->getArg(3));
@@ -21164,8 +21283,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_table_copy: {
assert(E->getArg(0)->getType()->isArrayType());
- Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
- Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
+ Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this);
+ Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).emitRawPointer(*this);
Value *DstIdx = EmitScalarExpr(E->getArg(2));
Value *SrcIdx = EmitScalarExpr(E->getArg(3));
Value *NElems = EmitScalarExpr(E->getArg(4));
@@ -21244,7 +21363,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
- Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment());
+ Address BP = Address(A.emitRawPointer(*this), Int8PtrTy, A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
@@ -21285,8 +21404,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
- DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment());
- llvm::Value *DestAddress = DestAddr.getPointer();
+ DestAddr = DestAddr.withElementType(Int8Ty);
+ llvm::Value *DestAddress = DestAddr.emitRawPointer(*this);
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
@@ -21337,8 +21456,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
- Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
- PredAddr.getAlignment());
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
// These are identical to the builtins above, except they don't consume
@@ -21356,8 +21475,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
- Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
- PredAddr.getAlignment());
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.emitRawPointer(*this),
+ PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index d3f2573fd5e3..0cb5b06a519c 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -331,11 +331,11 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
// Store pointers to the arguments in a locally allocated launch_args.
for (unsigned i = 0; i < Args.size(); ++i) {
- llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
+ llvm::Value *VarPtr = CGF.GetAddrOfLocalVar(Args[i]).emitRawPointer(CGF);
llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, PtrTy);
CGF.Builder.CreateDefaultAlignedStore(
- VoidVarPtr,
- CGF.Builder.CreateConstGEP1_32(PtrTy, KernelArgs.getPointer(), i));
+ VoidVarPtr, CGF.Builder.CreateConstGEP1_32(
+ PtrTy, KernelArgs.emitRawPointer(CGF), i));
}
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
@@ -393,9 +393,10 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
/*isVarArg=*/false),
addUnderscoredPrefixToName("PopCallConfiguration"));
- CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn,
- {GridDim.getPointer(), BlockDim.getPointer(),
- ShmemSize.getPointer(), Stream.getPointer()});
+ CGF.EmitRuntimeCallOrInvoke(cudaPopConfigFn, {GridDim.emitRawPointer(CGF),
+ BlockDim.emitRawPointer(CGF),
+ ShmemSize.emitRawPointer(CGF),
+ Stream.emitRawPointer(CGF)});
// Emit the call to cudaLaunch
llvm::Value *Kernel =
@@ -405,7 +406,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
cudaLaunchKernelFD->getParamDecl(0)->getType());
LaunchKernelArgs.add(RValue::getAggregate(GridDim), Dim3Ty);
LaunchKernelArgs.add(RValue::getAggregate(BlockDim), Dim3Ty);
- LaunchKernelArgs.add(RValue::get(KernelArgs.getPointer()),
+ LaunchKernelArgs.add(RValue::get(KernelArgs, CGF),
cudaLaunchKernelFD->getParamDecl(3)->getType());
LaunchKernelArgs.add(RValue::get(CGF.Builder.CreateLoad(ShmemSize)),
cudaLaunchKernelFD->getParamDecl(4)->getType());
@@ -438,8 +439,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
Offset = Offset.alignTo(TInfo.Align);
llvm::Value *Args[] = {
- CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
- PtrTy),
+ CGF.Builder.CreatePointerCast(
+ CGF.GetAddrOfLocalVar(A).emitRawPointer(CGF), PtrTy),
llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
@@ -605,20 +606,10 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
if (Info.Flags.isManaged()) {
- auto *ManagedVar = new llvm::GlobalVariable(
- CGM.getModule(), Var->getType(),
- /*isConstant=*/false, Var->getLinkage(),
- /*Init=*/Var->isDeclaration()
- ? nullptr
- : llvm::ConstantPointerNull::get(Var->getType()),
- /*Name=*/"", /*InsertBefore=*/nullptr,
- llvm::GlobalVariable::NotThreadLocal);
- ManagedVar->setDSOLocal(Var->isDSOLocal());
- ManagedVar->setVisibility(Var->getVisibility());
- ManagedVar->setExternallyInitialized(true);
- ManagedVar->takeName(Var);
- Var->setName(Twine(ManagedVar->getName() + ".managed"));
- replaceManagedVar(Var, ManagedVar);
+ assert(Var->getName().ends_with(".managed") &&
+ "HIP managed variables not transformed");
+ auto *ManagedVar = CGM.getModule().getNamedGlobal(
+ Var->getName().drop_back(StringRef(".managed").size()));
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
ManagedVar,
@@ -1093,7 +1084,9 @@ void CGNVCUDARuntime::transformManagedVars() {
: llvm::ConstantPointerNull::get(Var->getType()),
/*Name=*/"", /*InsertBefore=*/nullptr,
llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_device));
+ CGM.getContext().getTargetAddressSpace(CGM.getLangOpts().CUDAIsDevice
+ ? LangAS::cuda_device
+ : LangAS::Default));
ManagedVar->setDSOLocal(Var->isDSOLocal());
ManagedVar->setVisibility(Var->getVisibility());
ManagedVar->setExternallyInitialized(true);
@@ -1102,7 +1095,7 @@ void CGNVCUDARuntime::transformManagedVars() {
Var->setName(Twine(ManagedVar->getName()) + ".managed");
// Keep managed variables even if they are not used in device code since
// they need to be allocated by the runtime.
- if (!Var->isDeclaration()) {
+ if (CGM.getLangOpts().CUDAIsDevice && !Var->isDeclaration()) {
assert(!ManagedVar->isDeclaration());
CGM.addCompilerUsedGlobal(Var);
CGM.addCompilerUsedGlobal(ManagedVar);
@@ -1160,9 +1153,8 @@ void CGNVCUDARuntime::createOffloadingEntries() {
// Returns module constructor to be added.
llvm::Function *CGNVCUDARuntime::finalizeModule() {
+ transformManagedVars();
if (CGM.getLangOpts().CUDAIsDevice) {
- transformManagedVars();
-
// Mark ODR-used device variables as compiler used to prevent it from being
// eliminated by optimization. This is necessary for device variables
// ODR-used by host functions. Sema correctly marks them as ODR-used no
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index a8bf57a277e9..7c6dfc3e59d8 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -20,6 +20,12 @@ using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
+Address CGCXXABI::getThisAddress(CodeGenFunction &CGF) {
+ return CGF.makeNaturalAddressForPointer(
+ CGF.CXXABIThisValue, CGF.CXXABIThisDecl->getType()->getPointeeType(),
+ CGF.CXXABIThisAlignment);
+}
+
void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -44,8 +50,12 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
- ThisPtrForCall = This.getPointer();
- const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const auto *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
+ ThisPtrForCall =
+ CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD));
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
llvm::Constant *FnPtr = llvm::Constant::getNullValue(
llvm::PointerType::getUnqual(CGM.getLLVMContext()));
return CGCallee::forDirect(FnPtr, FPT);
@@ -251,16 +261,15 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
- allocPtr = ptr.getPointer();
+ allocPtr = ptr.emitRawPointer(CGF);
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
- Address allocAddr =
- CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
- allocPtr = allocAddr.getPointer();
+ Address allocAddr = CGF.Builder.CreateConstInBoundsByteGEP(ptr, -cookieSize);
+ allocPtr = allocAddr.emitRawPointer(CGF);
numElements = readArrayCookieImpl(CGF, allocAddr, cookieSize);
}
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
index ad1ad08d0856..c7eccbd0095a 100644
--- a/clang/lib/CodeGen/CGCXXABI.h
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -57,12 +57,8 @@ protected:
llvm::Value *getThisValue(CodeGenFunction &CGF) {
return CGF.CXXABIThisValue;
}
- Address getThisAddress(CodeGenFunction &CGF) {
- return Address(
- CGF.CXXABIThisValue,
- CGF.ConvertTypeForMem(CGF.CXXABIThisDecl->getType()->getPointeeType()),
- CGF.CXXABIThisAlignment);
- }
+
+ Address getThisAddress(CodeGenFunction &CGF);
/// Issue a diagnostic about unsupported features in the ABI.
void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
@@ -475,12 +471,6 @@ public:
BaseSubobject Base,
const CXXRecordDecl *NearestVBase) = 0;
- /// Get the address point of the vtable for the given base subobject while
- /// building a constexpr.
- virtual llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) = 0;
-
/// Get the address of the vtable for the given record decl which should be
/// used for the vptr at the given offset in RD.
virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index a28d7888715d..a5fe39633679 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -74,6 +74,9 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
+ // clang-format off
+ case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall;
+ // clang-format on
}
}
@@ -260,6 +263,9 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<PreserveNoneAttr>())
return CC_PreserveNone;
+ if (D->hasAttr<RISCVVectorCCAttr>())
+ return CC_RISCVVectorCall;
+
return CC_C;
}
@@ -933,8 +939,8 @@ struct NoExpansion : TypeExpansion {
static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- return std::make_unique<ConstantArrayExpansion>(
- AT->getElementType(), AT->getSize().getZExtValue());
+ return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
+ AT->getZExtSize());
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
@@ -1031,15 +1037,9 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
ConstantArrayExpansion *CAE,
Address BaseAddr,
llvm::function_ref<void(Address)> Fn) {
- CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
- CharUnits EltAlign =
- BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
- llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
-
for (int i = 0, n = CAE->NumElts; i < n; i++) {
- llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
- BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
- Fn(Address(EltAddr, EltTy, EltAlign));
+ Address EltAddr = CGF.Builder.CreateConstGEP2_32(BaseAddr, 0, i);
+ Fn(EltAddr);
}
}
@@ -1154,9 +1154,10 @@ void CodeGenFunction::ExpandTypeToArgs(
}
/// Create a temporary allocation for the purposes of coercion.
-static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
- CharUnits MinAlign,
- const Twine &Name = "tmp") {
+static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF,
+ llvm::Type *Ty,
+ CharUnits MinAlign,
+ const Twine &Name = "tmp") {
// Don't use an alignment that's worse than what LLVM would prefer.
auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
@@ -1326,11 +1327,11 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
}
// Otherwise do coercion through memory. This is stupid, but simple.
- Address Tmp =
+ RawAddress Tmp =
CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
CGF.Builder.CreateMemCpy(
- Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
- Src.getAlignment().getAsAlign(),
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
+ Src.emitRawPointer(CGF), Src.getAlignment().getAsAlign(),
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
return CGF.Builder.CreateLoad(Tmp);
}
@@ -1414,11 +1415,12 @@ static void CreateCoercedStore(llvm::Value *Src,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
+ RawAddress Tmp =
+ CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
CGF.Builder.CreateMemCpy(
- Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
- Tmp.getAlignment().getAsAlign(),
+ Dst.emitRawPointer(CGF), Dst.getAlignment().getAsAlign(),
+ Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
}
}
@@ -3018,17 +3020,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
- ArgI.getIndirectAlign(), KnownNonNull);
+ Address ParamAddr = makeNaturalAddressForPointer(
+ Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr,
+ nullptr, KnownNonNull);
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
// need to do is realign the value, if requested. Also, if the address
// may be aliased, copy it to ensure that the parameter variable is
// mutable and has a unique adress, as C requires.
- Address V = ParamAddr;
if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
- Address AlignedTemp = CreateMemTemp(Ty, "coerce");
+ RawAddress AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
// appropriate alignment.
@@ -3038,11 +3040,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
CharUnits Size = getContext().getTypeSizeInChars(Ty);
Builder.CreateMemCpy(
AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
- ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
+ ParamAddr.emitRawPointer(*this),
+ ParamAddr.getAlignment().getAsAlign(),
llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
- V = AlignedTemp;
+ ParamAddr = AlignedTemp;
}
- ArgVals.push_back(ParamValue::forIndirect(V));
+ ArgVals.push_back(ParamValue::forIndirect(ParamAddr));
} else {
// Load scalar value from indirect argument.
llvm::Value *V =
@@ -3086,7 +3089,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
- uint64_t ArrSize = ArrTy->getSize().getZExtValue();
+ uint64_t ArrSize = ArrTy->getZExtSize();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
llvm::AttrBuilder Attrs(getLLVMContext());
@@ -3156,10 +3159,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
== ParameterABI::SwiftErrorResult) {
QualType pointeeTy = Ty->getPointeeType();
assert(pointeeTy->isPointerType());
- Address temp =
- CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
- Address arg(V, ConvertTypeForMem(pointeeTy),
- getContext().getTypeAlignInChars(pointeeTy));
+ RawAddress temp =
+ CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
+ Address arg = makeNaturalAddressForPointer(
+ V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
Builder.CreateStore(incomingErrorValue, temp);
V = temp.getPointer();
@@ -3496,7 +3499,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::LoadInst *load =
dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
if (!load || load->isAtomic() || load->isVolatile() ||
- load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getBasePointer())
return nullptr;
// Okay! Burn it all down. This relies for correctness on the
@@ -3533,12 +3536,15 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
/// Heuristically search for a dominating store to the return-value slot.
static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ llvm::Value *ReturnValuePtr = CGF.ReturnValue.getBasePointer();
+
// Check if a User is a store which pointerOperand is the ReturnValue.
// We are looking for stores to the ReturnValue, not for stores of the
// ReturnValue to some other location.
- auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
+ auto GetStoreIfValid = [&CGF,
+ ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * {
auto *SI = dyn_cast<llvm::StoreInst>(U);
- if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
+ if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
return nullptr;
// These aren't actually possible for non-coerced returns, and we
@@ -3552,7 +3558,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// for something immediately preceding the IP. Sometimes this can
// happen with how we generate implicit-returns; it can also happen
// with noreturn cleanups.
- if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
+ if (!ReturnValuePtr->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
@@ -3570,8 +3576,7 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return nullptr;
}
- llvm::StoreInst *store =
- GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
+ llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
if (!store) return nullptr;
// Now do a first-and-dirty dominance check: just walk up the
@@ -4115,7 +4120,11 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
}
static bool isProvablyNull(llvm::Value *addr) {
- return isa<llvm::ConstantPointerNull>(addr);
+ return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
+ return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout());
}
/// Emit the actual writing-back of a writeback.
@@ -4123,21 +4132,20 @@ static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
Address srcAddr = srcLV.getAddress(CGF);
- assert(!isProvablyNull(srcAddr.getPointer()) &&
+ assert(!isProvablyNull(srcAddr.getBasePointer()) &&
"shouldn't have writeback for provably null argument");
llvm::BasicBlock *contBB = nullptr;
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
- CGF.CGM.getDataLayout());
+ bool provablyNonNull = isProvablyNonNull(srcAddr, CGF);
+
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
- llvm::Value *isNull =
- CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
CGF.EmitBlock(writebackBB);
}
@@ -4241,7 +4249,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
// If the address is a constant null, just pass the appropriate null.
- if (isProvablyNull(srcAddr.getPointer())) {
+ if (isProvablyNull(srcAddr.getBasePointer())) {
args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
CRE->getType());
return;
@@ -4270,17 +4278,16 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
- CGF.CGM.getDataLayout());
+ bool provablyNonNull = isProvablyNonNull(srcAddr, CGF);
+
if (provablyNonNull) {
- finalArgument = temp.getPointer();
+ finalArgument = temp.emitRawPointer(CGF);
} else {
- llvm::Value *isNull =
- CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
- finalArgument = CGF.Builder.CreateSelect(isNull,
- llvm::ConstantPointerNull::get(destType),
- temp.getPointer(), "icr.argument");
+ finalArgument = CGF.Builder.CreateSelect(
+ isNull, llvm::ConstantPointerNull::get(destType),
+ temp.emitRawPointer(CGF), "icr.argument");
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
@@ -4404,6 +4411,16 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
}
+void CodeGenFunction::EmitNonNullArgCheck(Address Addr, QualType ArgType,
+ SourceLocation ArgLoc,
+ AbstractCallee AC, unsigned ParmNum) {
+ if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
+ SanOpts.has(SanitizerKind::NullabilityArg)))
+ return;
+
+ EmitNonNullArgCheck(RValue::get(Addr, *this), ArgType, ArgLoc, AC, ParmNum);
+}
+
// Check if the call is going to use the inalloca convention. This needs to
// agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
// later, so we can't check it directly.
@@ -4744,12 +4761,22 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, std::nullopt, name);
+ return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value *>(), name);
}
/// Emits a call to the given nounwind runtime function.
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<Address> args,
+ const llvm::Twine &name) {
+ SmallVector<llvm::Value *, 3> values;
+ for (auto arg : args)
+ values.push_back(arg.emitRawPointer(*this));
+ return EmitNounwindRuntimeCall(callee, values, name);
+}
+
+llvm::CallInst *
+CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
ArrayRef<llvm::Value *> args,
const llvm::Twine &name) {
llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
@@ -5026,7 +5053,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
- Address ArgMemory = Address::invalid();
+ RawAddress ArgMemory = RawAddress::invalid();
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
const llvm::DataLayout &DL = CGM.getDataLayout();
llvm::Instruction *IP = CallArgs.getStackBase();
@@ -5042,7 +5069,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
AI->setAlignment(Align.getAsAlign());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = Address(AI, ArgStruct, Align);
+ ArgMemory = RawAddress(AI, ArgStruct, Align);
}
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
@@ -5051,11 +5078,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- Address SRetAlloca = Address::invalid();
+ RawAddress SRetAlloca = RawAddress::invalid();
llvm::Value *UnusedReturnSizePtr = nullptr;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
if (!ReturnValue.isNull()) {
- SRetPtr = ReturnValue.getValue();
+ SRetPtr = ReturnValue.getAddress();
} else {
SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
@@ -5065,15 +5092,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
if (IRFunctionArgs.hasSRetArg()) {
- IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
+ getAsNaturalPointerTo(SRetPtr, RetTy);
} else if (RetAI.isInAlloca()) {
Address Addr =
Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
- Builder.CreateStore(SRetPtr.getPointer(), Addr);
+ Builder.CreateStore(getAsNaturalPointerTo(SRetPtr, RetTy), Addr);
}
}
- Address swiftErrorTemp = Address::invalid();
+ RawAddress swiftErrorTemp = RawAddress::invalid();
Address swiftErrorArg = Address::invalid();
// When passing arguments using temporary allocas, we need to add the
@@ -5106,9 +5134,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
- Address Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
- : I->getKnownRValue().getAggregateAddress();
+ RawAddress Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress(*this)
+ : I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
@@ -5132,7 +5160,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else if (ArgInfo.getInAllocaIndirect()) {
// Make a temporary alloca and store the address of it into the argument
// struct.
- Address Addr = CreateMemTempWithoutCast(
+ RawAddress Addr = CreateMemTempWithoutCast(
I->Ty, getContext().getTypeAlignInChars(I->Ty),
"indirect-arg-temp");
I->copyInto(*this, Addr);
@@ -5154,12 +5182,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(NumIRArgs == 1);
if (!I->isAggregate()) {
// Make a temporary alloca to pass the argument.
- Address Addr = CreateMemTempWithoutCast(
+ RawAddress Addr = CreateMemTempWithoutCast(
I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
- llvm::Value *Val = Addr.getPointer();
+ llvm::Value *Val = getAsNaturalPointerTo(Addr, I->Ty);
if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Addr.getPointer());
+ Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
I->copyInto(*this, Addr);
@@ -5175,7 +5203,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address Addr = I->hasLValue()
? I->getKnownLValue().getAddress(*this)
: I->getKnownRValue().getAggregateAddress();
- llvm::Value *V = Addr.getPointer();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
@@ -5186,8 +5213,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
bool NeedCopy = false;
if (Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
- Align.getAsAlign()) {
+ llvm::getOrEnforceKnownAlignment(Addr.emitRawPointer(*this),
+ Align.getAsAlign(),
+ *TD) < Align.getAsAlign()) {
NeedCopy = true;
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
@@ -5218,11 +5246,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (NeedCopy) {
// Create an aligned temporary, and copy to it.
- Address AI = CreateMemTempWithoutCast(
+ RawAddress AI = CreateMemTempWithoutCast(
I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
- llvm::Value *Val = AI.getPointer();
+ llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(AI.getPointer());
+ Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
// Emit lifetime markers for the temporary alloca.
@@ -5239,6 +5267,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
+ llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty);
auto *T = llvm::PointerType::get(
CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
@@ -5278,8 +5307,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
QualType pointeeTy = I->Ty->getPointeeType();
- swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
- getContext().getTypeAlignInChars(pointeeTy));
+ swiftErrorArg = makeNaturalAddressForPointer(
+ V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
swiftErrorTemp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
@@ -5416,7 +5445,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
- Address AllocaAddr = Address::invalid();
+ RawAddress AllocaAddr = RawAddress::invalid();
if (I->isAggregate()) {
addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
: I->getKnownRValue().getAggregateAddress();
@@ -5686,6 +5715,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!CI->getType()->isVoidTy())
CI->setName("call");
+ if (getTarget().getTriple().isSPIRVLogical() && CI->isConvergent())
+ CI = addControlledConvergenceToken(CI);
+
// Update largest vector width from the return type.
LargestVectorWidth =
std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
@@ -5850,7 +5882,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return RValue::getComplex(std::make_pair(Real, Imag));
}
case TEK_Aggregate: {
- Address DestPtr = ReturnValue.getValue();
+ Address DestPtr = ReturnValue.getAddress();
bool DestIsVolatile = ReturnValue.isVolatile();
if (!DestPtr.isValid()) {
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 1bd48a072593..6b676ac196db 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -377,6 +377,7 @@ public:
Address getValue() const { return Addr; }
bool isUnused() const { return IsUnused; }
bool isExternallyDestructed() const { return IsExternallyDestructed; }
+ Address getAddress() const { return Addr; }
};
/// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 34319381901a..8c1c8ee455d2 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -139,8 +139,9 @@ Address CodeGenFunction::LoadCXXThisAddress() {
CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent());
}
- llvm::Type *Ty = ConvertType(MD->getFunctionObjectParameterType());
- return Address(LoadCXXThis(), Ty, CXXThisAlignment, KnownNonNull);
+ return makeNaturalAddressForPointer(
+ LoadCXXThis(), MD->getFunctionObjectParameterType(), CXXThisAlignment,
+ false, nullptr, nullptr, KnownNonNull);
}
/// Emit the address of a field using a member data pointer.
@@ -270,7 +271,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
}
// Apply the base offset.
- llvm::Value *ptr = addr.getPointer();
+ llvm::Value *ptr = addr.emitRawPointer(CGF);
ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr");
// If we have a virtual component, the alignment of the result will
@@ -338,8 +339,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
if (sanitizePerformTypeCheck()) {
SanitizerSet SkippedChecks;
SkippedChecks.set(SanitizerKind::Null, !NullCheckValue);
- EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
- DerivedTy, DerivedAlign, SkippedChecks);
+ EmitTypeCheck(TCK_Upcast, Loc, Value.emitRawPointer(*this), DerivedTy,
+ DerivedAlign, SkippedChecks);
}
return Value.withElementType(BaseValueTy);
}
@@ -354,7 +355,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
endBB = createBasicBlock("cast.end");
- llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer());
+ llvm::Value *isNull = Builder.CreateIsNull(Value);
Builder.CreateCondBr(isNull, endBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -363,14 +364,15 @@ Address CodeGenFunction::GetAddressOfBaseClass(
SanitizerSet SkippedChecks;
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
- Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks);
+ Value.emitRawPointer(*this), DerivedTy, DerivedAlign,
+ SkippedChecks);
}
// Compute the virtual offset.
llvm::Value *VirtualOffset = nullptr;
if (VBase) {
VirtualOffset =
- CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
}
// Apply both offsets.
@@ -387,7 +389,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitBlock(endBB);
llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result");
- PHI->addIncoming(Value.getPointer(), notNullBB);
+ PHI->addIncoming(Value.emitRawPointer(*this), notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB);
Value = Value.withPointer(PHI, NotKnownNonNull);
}
@@ -424,15 +426,19 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
CastNotNull = createBasicBlock("cast.notnull");
CastEnd = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer());
+ llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
// Apply the offset.
- llvm::Value *Value = BaseAddr.getPointer();
- Value = Builder.CreateInBoundsGEP(
- Int8Ty, Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr");
+ Address Addr = BaseAddr.withElementType(Int8Ty);
+ Addr = Builder.CreateInBoundsGEP(
+ Addr, Builder.CreateNeg(NonVirtualOffset), Int8Ty,
+ CGM.getClassPointerAlignment(Derived), "sub.ptr");
+
+ // Just cast.
+ Addr = Addr.withElementType(DerivedValueTy);
// Produce a PHI if we had a null-check.
if (NullCheckValue) {
@@ -441,13 +447,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
Builder.CreateBr(CastEnd);
EmitBlock(CastEnd);
+ llvm::Value *Value = Addr.emitRawPointer(*this);
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
- Value = PHI;
+ return Address(PHI, Addr.getElementType(),
+ CGM.getClassPointerAlignment(Derived));
}
- return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived));
+ return Addr;
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -1719,7 +1727,7 @@ namespace {
// Use the base class declaration location as inline DebugLocation. All
// fields of the class are destroyed.
DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass);
- EmitSanitizerDtorFieldsCallback(CGF, Addr.getPointer(),
+ EmitSanitizerDtorFieldsCallback(CGF, Addr.emitRawPointer(CGF),
BaseSize.getQuantity());
// Prevent the current stack frame from disappearing from the stack trace.
@@ -2022,7 +2030,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Find the end of the array.
llvm::Type *elementType = arrayBase.getElementType();
- llvm::Value *arrayBegin = arrayBase.getPointer();
+ llvm::Value *arrayBegin = arrayBase.emitRawPointer(*this);
llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(
elementType, arrayBegin, numElements, "arrayctor.end");
@@ -2118,14 +2126,15 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
Address This = ThisAVS.getAddress();
LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace();
LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace();
- llvm::Value *ThisPtr = This.getPointer();
+ llvm::Value *ThisPtr =
+ getAsNaturalPointerTo(This, D->getThisType()->getPointeeType());
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
llvm::Type *NewType =
llvm::PointerType::get(getLLVMContext(), TargetThisAS);
- ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
- ThisAS, SlotAS, NewType);
+ ThisPtr = getTargetHooks().performAddrSpaceCast(*this, ThisPtr, ThisAS,
+ SlotAS, NewType);
}
// Push the this ptr.
@@ -2194,7 +2203,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const CXXRecordDecl *ClassDecl = D->getParent();
if (!NewPointerIsChecked)
- EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This.getPointer(),
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This,
getContext().getRecordType(ClassDecl), CharUnits::Zero());
if (D->isTrivial() && D->isDefaultConstructor()) {
@@ -2207,10 +2216,9 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
// model that copy.
if (isMemcpyEquivalentSpecialMember(D)) {
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
-
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src = Address(Args[1].getRValue(*this).getScalarVal(), ConvertTypeForMem(SrcTy),
- CGM.getNaturalTypeAlignment(SrcTy));
+ Address Src = makeNaturalAddressForPointer(
+ Args[1].getRValue(*this).getScalarVal(), SrcTy);
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2263,7 +2271,9 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
CallArgList Args;
- CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType());
+ CallArg ThisArg(RValue::get(getAsNaturalPointerTo(
+ This, D->getThisType()->getPointeeType())),
+ D->getThisType());
// Forward the parameters.
if (InheritedFromVBase &&
@@ -2388,12 +2398,14 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This.getPointer()), D->getThisType());
+ Args.add(RValue::get(getAsNaturalPointerTo(This, D->getThisType())),
+ D->getThisType());
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
- llvm::Value *SrcVal = Builder.CreateBitCast(Src.getPointer(), t);
+ llvm::Value *Val = getAsNaturalPointerTo(Src, D->getThisType());
+ llvm::Value *SrcVal = Builder.CreateBitCast(Val, t);
Args.add(RValue::get(SrcVal), QT);
// Skip over first argument (Src).
@@ -2418,7 +2430,9 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
// this
Address This = LoadCXXThisAddress();
- DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType());
+ DelegateArgs.add(RValue::get(getAsNaturalPointerTo(
+ This, (*I)->getType()->getPointeeType())),
+ (*I)->getType());
++I;
// FIXME: The location of the VTT parameter in the parameter list is
@@ -2775,7 +2789,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
if (MayBeNull) {
llvm::Value *DerivedNotNull =
- Builder.CreateIsNotNull(Derived.getPointer(), "cast.nonnull");
+ Builder.CreateIsNotNull(Derived.emitRawPointer(*this), "cast.nonnull");
llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
ContBlock = createBasicBlock("cast.cont");
@@ -2976,7 +2990,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
Address ThisPtr = GetAddrOfBlockDecl(variable);
- CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
+ CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType);
// Add the rest of the parameters.
for (auto *param : BD->parameters())
@@ -3004,7 +3018,7 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
QualType LambdaType = getContext().getRecordType(Lambda);
QualType ThisType = getContext().getPointerType(LambdaType);
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
- CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
+ CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType);
EmitLambdaDelegatingInvokeBody(MD, CallArgs);
}
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp
index f87caf050eea..e6f8e6873004 100644
--- a/clang/lib/CodeGen/CGCleanup.cpp
+++ b/clang/lib/CodeGen/CGCleanup.cpp
@@ -27,7 +27,7 @@ bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
if (rv.isScalar())
return DominatingLLVMValue::needsSaving(rv.getScalarVal());
if (rv.isAggregate())
- return DominatingLLVMValue::needsSaving(rv.getAggregatePointer());
+ return DominatingValue<Address>::needsSaving(rv.getAggregateAddress());
return true;
}
@@ -35,69 +35,40 @@ DominatingValue<RValue>::saved_type
DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isScalar()) {
llvm::Value *V = rv.getScalarVal();
-
- // These automatically dominate and don't need to be saved.
- if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, nullptr, ScalarLiteral);
-
- // Everything else needs an alloca.
- Address addr =
- CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
- CGF.Builder.CreateStore(V, addr);
- return saved_type(addr.getPointer(), nullptr, ScalarAddress);
+ return saved_type(DominatingLLVMValue::save(CGF, V),
+ DominatingLLVMValue::needsSaving(V) ? ScalarAddress
+ : ScalarLiteral);
}
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
- llvm::Type *ComplexTy =
- llvm::StructType::get(V.first->getType(), V.second->getType());
- Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
- CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
- CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
- return saved_type(addr.getPointer(), nullptr, ComplexAddress);
+ return saved_type(DominatingLLVMValue::save(CGF, V.first),
+ DominatingLLVMValue::save(CGF, V.second));
}
assert(rv.isAggregate());
- Address V = rv.getAggregateAddress(); // TODO: volatile?
- if (!DominatingLLVMValue::needsSaving(V.getPointer()))
- return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral,
- V.getAlignment().getQuantity());
-
- Address addr =
- CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
- CGF.Builder.CreateStore(V.getPointer(), addr);
- return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress,
- V.getAlignment().getQuantity());
+ Address V = rv.getAggregateAddress();
+ return saved_type(
+ DominatingValue<Address>::save(CGF, V), rv.isVolatileQualified(),
+ DominatingValue<Address>::needsSaving(V) ? AggregateAddress
+ : AggregateLiteral);
}
/// Given a saved r-value produced by SaveRValue, perform the code
/// necessary to restore it to usability at the current insertion
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
- auto getSavingAddress = [&](llvm::Value *value) {
- auto *AI = cast<llvm::AllocaInst>(value);
- return Address(value, AI->getAllocatedType(),
- CharUnits::fromQuantity(AI->getAlign().value()));
- };
switch (K) {
case ScalarLiteral:
- return RValue::get(Value);
case ScalarAddress:
- return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
+ return RValue::get(DominatingLLVMValue::restore(CGF, Vals.first));
case AggregateLiteral:
+ case AggregateAddress:
return RValue::getAggregate(
- Address(Value, ElementType, CharUnits::fromQuantity(Align)));
- case AggregateAddress: {
- auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
- return RValue::getAggregate(
- Address(addr, ElementType, CharUnits::fromQuantity(Align)));
- }
+ DominatingValue<Address>::restore(CGF, AggregateAddr), IsVolatile);
case ComplexAddress: {
- Address address = getSavingAddress(Value);
- llvm::Value *real =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0));
- llvm::Value *imag =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1));
+ llvm::Value *real = DominatingLLVMValue::restore(CGF, Vals.first);
+ llvm::Value *imag = DominatingLLVMValue::restore(CGF, Vals.second);
return RValue::getComplex(real, imag);
}
}
@@ -294,14 +265,14 @@ void EHScopeStack::popNullFixups() {
BranchFixups.pop_back();
}
-Address CodeGenFunction::createCleanupActiveFlag() {
+RawAddress CodeGenFunction::createCleanupActiveFlag() {
// Create a variable to decide whether the cleanup needs to be run.
- Address active = CreateTempAllocaWithoutCast(
+ RawAddress active = CreateTempAllocaWithoutCast(
Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
- setBeforeOutermostConditional(Builder.getFalse(), active);
+ setBeforeOutermostConditional(Builder.getFalse(), active, *this);
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), active);
@@ -309,7 +280,7 @@ Address CodeGenFunction::createCleanupActiveFlag() {
return active;
}
-void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) {
+void CodeGenFunction::initFullExprCleanupWithFlag(RawAddress ActiveFlag) {
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
@@ -322,15 +293,17 @@ void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) {
void EHScopeStack::Cleanup::anchor() {}
static void createStoreInstBefore(llvm::Value *value, Address addr,
- llvm::Instruction *beforeInst) {
- auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst);
+ llvm::Instruction *beforeInst,
+ CodeGenFunction &CGF) {
+ auto store = new llvm::StoreInst(value, addr.emitRawPointer(CGF), beforeInst);
store->setAlignment(addr.getAlignment().getAsAlign());
}
static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name,
- llvm::Instruction *beforeInst) {
- return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name,
- false, addr.getAlignment().getAsAlign(),
+ llvm::Instruction *beforeInst,
+ CodeGenFunction &CGF) {
+ return new llvm::LoadInst(addr.getElementType(), addr.emitRawPointer(CGF),
+ name, false, addr.getAlignment().getAsAlign(),
beforeInst);
}
@@ -357,8 +330,8 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
// entry which we're currently popping.
if (Fixup.OptimisticBranchBlock == nullptr) {
createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex),
- CGF.getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ CGF.getNormalCleanupDestSlot(), Fixup.InitialBranch,
+ CGF);
Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
}
@@ -385,7 +358,7 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional());
auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(),
- "cleanup.dest", Term);
+ "cleanup.dest", Term, CGF);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
Br->eraseFromParent();
@@ -513,8 +486,8 @@ void CodeGenFunction::PopCleanupBlocks(
I += Header.getSize();
if (Header.isConditional()) {
- Address ActiveFlag =
- reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]);
+ RawAddress ActiveFlag =
+ reinterpret_cast<RawAddress &>(LifetimeExtendedCleanupStack[I]);
initFullExprCleanupWithFlag(ActiveFlag);
I += sizeof(ActiveFlag);
}
@@ -888,7 +861,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (NormalCleanupDestSlot->hasOneUse()) {
NormalCleanupDestSlot->user_back()->eraseFromParent();
NormalCleanupDestSlot->eraseFromParent();
- NormalCleanupDest = Address::invalid();
+ NormalCleanupDest = RawAddress::invalid();
}
llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
@@ -912,9 +885,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// pass the abnormal exit flag to Fn (SEH cleanup)
cleanupFlags.setHasExitSwitch();
- llvm::LoadInst *Load =
- createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest",
- nullptr);
+ llvm::LoadInst *Load = createLoadInstBefore(
+ getNormalCleanupDestSlot(), "cleanup.dest", nullptr, *this);
llvm::SwitchInst *Switch =
llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
@@ -961,8 +933,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!Fixup.Destination) continue;
if (!Fixup.OptimisticBranchBlock) {
createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex),
- getNormalCleanupDestSlot(),
- Fixup.InitialBranch);
+ getNormalCleanupDestSlot(), Fixup.InitialBranch,
+ *this);
Fixup.InitialBranch->setSuccessor(0, NormalEntry);
}
Fixup.OptimisticBranchBlock = NormalExit;
@@ -1135,7 +1107,7 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
// Store the index at the start.
llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI);
+ createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI, *this);
// Adjust BI to point to the first cleanup block.
{
@@ -1269,9 +1241,9 @@ static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
// If we're in a conditional block, ignore the dominating IP and
// use the outermost conditional branch.
if (CGF.isInConditionalBranch()) {
- CGF.setBeforeOutermostConditional(value, var);
+ CGF.setBeforeOutermostConditional(value, var, CGF);
} else {
- createStoreInstBefore(value, var, dominatingIP);
+ createStoreInstBefore(value, var, dominatingIP, CGF);
}
}
@@ -1321,7 +1293,7 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
Scope.setActive(false);
}
-Address CodeGenFunction::getNormalCleanupDestSlot() {
+RawAddress CodeGenFunction::getNormalCleanupDestSlot() {
if (!NormalCleanupDest.isValid())
NormalCleanupDest =
CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h
index 7a7344c07160..03e4a29d7b3d 100644
--- a/clang/lib/CodeGen/CGCleanup.h
+++ b/clang/lib/CodeGen/CGCleanup.h
@@ -333,7 +333,7 @@ public:
Address getActiveFlag() const {
return ActiveFlag;
}
- void setActiveFlag(Address Var) {
+ void setActiveFlag(RawAddress Var) {
assert(Var.getAlignment().isOne());
ActiveFlag = Var;
}
diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp
index b7142ec08af9..93ca711f716f 100644
--- a/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/clang/lib/CodeGen/CGCoroutine.cpp
@@ -867,8 +867,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
EmitStmt(S.getPromiseDeclStmt());
Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl());
- auto *PromiseAddrVoidPtr =
- new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId);
+ auto *PromiseAddrVoidPtr = new llvm::BitCastInst(
+ PromiseAddr.emitRawPointer(*this), VoidPtrTy, "", CoroId);
// Update CoroId to refer to the promise. We could not do it earlier because
// promise local variable was not emitted yet.
CoroId->setArgOperand(1, PromiseAddrVoidPtr);
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 07ecaa81c47d..691fde8b0d8b 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -1440,8 +1440,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
case CC_SwiftAsync:
- // [FIXME: swiftasynccc] Update to SwiftAsync once LLVM support lands.
- return llvm::dwarf::DW_CC_LLVM_Swift;
+ return llvm::dwarf::DW_CC_LLVM_SwiftTail;
case CC_PreserveMost:
return llvm::dwarf::DW_CC_LLVM_PreserveMost;
case CC_PreserveAll:
@@ -1452,6 +1451,8 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_M68kRTD;
case CC_PreserveNone:
return llvm::dwarf::DW_CC_LLVM_PreserveNone;
+ case CC_RISCVVectorCall:
+ return llvm::dwarf::DW_CC_LLVM_RISCVVectorCall;
}
return 0;
}
@@ -3239,7 +3240,7 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
// };
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty))
- Count = CAT->getSize().getZExtValue();
+ Count = CAT->getZExtSize();
else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
if (Expr *Size = VAT->getSizeExpr()) {
Expr::EvalResult Result;
@@ -3463,6 +3464,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::BTFTagAttributed:
T = cast<BTFTagAttributedType>(T)->getWrappedType();
break;
+ case Type::CountAttributed:
+ T = cast<CountAttributedType>(T)->desugar();
+ break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index dc42faf8dbb9..267f2e40a7bb 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -1242,27 +1242,38 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
return;
}
- // If the initializer is small, use a handful of stores.
+ // If the initializer is small or trivialAutoVarInit is set, use a handful of
+ // stores.
+ bool IsTrivialAutoVarInitPattern =
+ CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
+ LangOptions::TrivialAutoVarInitKind::Pattern;
if (shouldSplitConstantStore(CGM, ConstantSize)) {
if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
- const llvm::StructLayout *Layout =
- CGM.getDataLayout().getStructLayout(STy);
- for (unsigned i = 0; i != constant->getNumOperands(); i++) {
- CharUnits CurOff = CharUnits::fromQuantity(Layout->getElementOffset(i));
- Address EltPtr = Builder.CreateConstInBoundsByteGEP(
- Loc.withElementType(CGM.Int8Ty), CurOff);
- emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
- constant->getAggregateElement(i), IsAutoInit);
+ if (STy == Loc.getElementType() ||
+ (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
+ const llvm::StructLayout *Layout =
+ CGM.getDataLayout().getStructLayout(STy);
+ for (unsigned i = 0; i != constant->getNumOperands(); i++) {
+ CharUnits CurOff =
+ CharUnits::fromQuantity(Layout->getElementOffset(i));
+ Address EltPtr = Builder.CreateConstInBoundsByteGEP(
+ Loc.withElementType(CGM.Int8Ty), CurOff);
+ emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
+ constant->getAggregateElement(i), IsAutoInit);
+ }
+ return;
}
- return;
} else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
- for (unsigned i = 0; i != ATy->getNumElements(); i++) {
- Address EltPtr = Builder.CreateConstGEP(
- Loc.withElementType(ATy->getElementType()), i);
- emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
- constant->getAggregateElement(i), IsAutoInit);
+ if (ATy == Loc.getElementType() ||
+ (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
+ for (unsigned i = 0; i != ATy->getNumElements(); i++) {
+ Address EltPtr = Builder.CreateConstGEP(
+ Loc.withElementType(ATy->getElementType()), i);
+ emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
+ constant->getAggregateElement(i), IsAutoInit);
+ }
+ return;
}
- return;
}
}
@@ -1450,7 +1461,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
Address address = Address::invalid();
- Address AllocaAddr = Address::invalid();
+ RawAddress AllocaAddr = RawAddress::invalid();
Address OpenMPLocalAddr = Address::invalid();
if (CGM.getLangOpts().OpenMPIRBuilder)
OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
@@ -1513,7 +1524,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
address = ReturnValue;
- AllocaAddr = ReturnValue;
+ AllocaAddr =
+ RawAddress(ReturnValue.emitRawPointer(*this),
+ ReturnValue.getElementType(), ReturnValue.getAlignment());
+ ;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
const auto *RD = RecordTy->getDecl();
@@ -1524,7 +1538,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
- Address NRVOFlag =
+ RawAddress NRVOFlag =
CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
@@ -1667,7 +1681,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
}
if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
- EmitVarAnnotations(&D, address.getPointer());
+ EmitVarAnnotations(&D, address.emitRawPointer(*this));
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
@@ -1840,12 +1854,13 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm::Value *BaseSizeInChars =
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
Address Begin = Loc.withElementType(Int8Ty);
- llvm::Value *End = Builder.CreateInBoundsGEP(
- Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
+ llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
+ Begin.emitRawPointer(*this),
+ SizeVal, "vla.end");
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
EmitBlock(LoopBB);
llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
- Cur->addIncoming(Begin.getPointer(), OriginBB);
+ Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
auto *I =
Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
@@ -2272,7 +2287,7 @@ void CodeGenFunction::emitDestroy(Address addr, QualType type,
checkZeroLength = false;
}
- llvm::Value *begin = addr.getPointer();
+ llvm::Value *begin = addr.emitRawPointer(*this);
llvm::Value *end =
Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
emitArrayDestroy(begin, end, type, elementAlign, destroyer,
@@ -2532,7 +2547,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
}
Address DeclPtr = Address::invalid();
- Address AllocaPtr = Address::invalid();
+ RawAddress AllocaPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
bool UseIndirectDebugAddress = false;
@@ -2544,8 +2559,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
- auto *V = DeclPtr.getPointer();
- AllocaPtr = DeclPtr;
+ auto *V = DeclPtr.emitRawPointer(*this);
+ AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
// For truly ABI indirect arguments -- those that are not `byval` -- store
// the address of the argument on the stack to preserve debug information.
@@ -2684,7 +2699,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
}
if (D.hasAttr<AnnotateAttr>())
- EmitVarAnnotations(&D, DeclPtr.getPointer());
+ EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
// We can only check return value nullability if all arguments to the
// function satisfy their nullability preconditions. This makes it necessary
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 5a9d06da12de..34f289334a7d 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -397,7 +397,7 @@ namespace {
void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
+ pushFullExprCleanup<FreeException>(EHCleanup, addr.emitRawPointer(*this));
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
@@ -416,8 +416,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
/*IsInit*/ true);
// Deactivate the cleanup block.
- DeactivateCleanupBlock(cleanup,
- cast<llvm::Instruction>(typedAddr.getPointer()));
+ DeactivateCleanupBlock(
+ cleanup, cast<llvm::Instruction>(typedAddr.emitRawPointer(*this)));
}
Address CodeGenFunction::getExceptionSlot() {
@@ -1834,7 +1834,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ParentFP) {
llvm::CallInst *RecoverCall = nullptr;
CGBuilderTy Builder(*this, AllocaInsertPt);
- if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
+ if (auto *ParentAlloca =
+ dyn_cast_or_null<llvm::AllocaInst>(ParentVar.getBasePointer())) {
// Mark the variable escaped if nobody else referenced it and compute the
// localescape index.
auto InsertPair = ParentCGF.EscapedLocals.insert(
@@ -1851,8 +1852,8 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
// If the parent didn't have an alloca, we're doing some nested outlining.
// Just clone the existing localrecover call, but tweak the FP argument to
// use our FP value. All other arguments are constants.
- auto *ParentRecover =
- cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
+ auto *ParentRecover = cast<llvm::IntrinsicInst>(
+ ParentVar.emitRawPointer(*this)->stripPointerCasts());
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
"expected alloca or localrecover in parent LocalDeclMap");
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
@@ -1925,7 +1926,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
if (isa<ImplicitParamDecl>(D) &&
D->getType() == getContext().VoidPtrTy) {
assert(D->getName().starts_with("frame_pointer"));
- FramePtrAddrAlloca = cast<llvm::AllocaInst>(I.second.getPointer());
+ FramePtrAddrAlloca =
+ cast<llvm::AllocaInst>(I.second.getBasePointer());
break;
}
}
@@ -1986,7 +1988,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
LValue ThisFieldLValue =
EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
+ CXXThisValue =
+ ThisFieldLValue.getAddress(*this).emitRawPointer(*this);
} else {
CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation())
.getScalarVal();
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 85f5d739cef4..36872c0fedb7 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -65,21 +65,21 @@ static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
-Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
- CharUnits Align,
- const Twine &Name,
- llvm::Value *ArraySize) {
+RawAddress
+CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name,
+ llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Ty, Align, KnownNonNull);
+ return RawAddress(Alloca, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The alloca is casted to default address space if necessary.
-Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
- const Twine &Name,
- llvm::Value *ArraySize,
- Address *AllocaAddr) {
+RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
+ const Twine &Name,
+ llvm::Value *ArraySize,
+ RawAddress *AllocaAddr) {
auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
if (AllocaAddr)
*AllocaAddr = Alloca;
@@ -101,7 +101,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Ty, Align, KnownNonNull);
+ return RawAddress(V, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -120,28 +120,29 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
/// default alignment of the corresponding LLVM type, which is *not*
/// guaranteed to be related in any way to the expected alignment of
/// an AST type that might have been lowered to Ty.
-Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
CharUnits Align =
CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
return CreateTempAlloca(Ty, Align, Name);
}
-Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
+RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
return CreateTempAlloca(ConvertType(Ty), Align, Name);
}
-Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
- Address *Alloca) {
+RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
+ RawAddress *Alloca) {
// FIXME: Should we prefer the preferred type alignment here?
return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
}
-Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
- const Twine &Name, Address *Alloca) {
- Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
- /*ArraySize=*/nullptr, Alloca);
+RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
+ const Twine &Name,
+ RawAddress *Alloca) {
+ RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
+ /*ArraySize=*/nullptr, Alloca);
if (Ty->isConstantMatrixType()) {
auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
@@ -154,13 +155,14 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
return Result;
}
-Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
+ CharUnits Align,
+ const Twine &Name) {
return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
}
-Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
- const Twine &Name) {
+RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
+ const Twine &Name) {
return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
Name);
}
@@ -359,7 +361,7 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
} else {
CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
- CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
}
CGF.CGM.getCXXABI().registerGlobalDtor(
CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
@@ -384,10 +386,10 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
}
}
-static Address createReferenceTemporary(CodeGenFunction &CGF,
- const MaterializeTemporaryExpr *M,
- const Expr *Inner,
- Address *Alloca = nullptr) {
+static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
+ const MaterializeTemporaryExpr *M,
+ const Expr *Inner,
+ RawAddress *Alloca = nullptr) {
auto &TCG = CGF.getTargetHooks();
switch (M->getStorageDuration()) {
case SD_FullExpression:
@@ -416,7 +418,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
- return Address(C, GV->getValueType(), alignment);
+ return RawAddress(C, GV->getValueType(), alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -448,7 +450,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
auto ownership = M->getType().getObjCLifetime();
if (ownership != Qualifiers::OCL_None &&
ownership != Qualifiers::OCL_ExplicitNone) {
- Address Object = createReferenceTemporary(*this, M, E);
+ RawAddress Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
llvm::Type *Ty = ConvertTypeForMem(E->getType());
Object = Object.withElementType(Ty);
@@ -502,8 +504,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
}
// Create and initialize the reference temporary.
- Address Alloca = Address::invalid();
- Address Object = createReferenceTemporary(*this, M, E, &Alloca);
+ RawAddress Alloca = Address::invalid();
+ RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
@@ -1111,12 +1113,12 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
LValue LV = EmitMemberExpr(ME);
Address Addr = LV.getAddress(*this);
- Res = Addr.getPointer();
+ Res = Addr.emitRawPointer(*this);
} else if (StructBase->getType()->isPointerType()) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
- Res = Addr.getPointer();
+ Res = Addr.emitRawPointer(*this);
} else {
return nullptr;
}
@@ -1282,8 +1284,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Addr.getElementType(), Align,
- IsKnownNonNull);
+ Addr.setAlignment(Align);
}
}
@@ -1300,8 +1301,8 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
CGF.ConvertTypeForMem(E->getType()->getPointeeType());
Addr = Addr.withElementType(ElemTy);
if (CE->getCastKind() == CK_AddressSpaceConversion)
- Addr = CGF.Builder.CreateAddrSpaceCast(Addr,
- CGF.ConvertType(E->getType()));
+ Addr = CGF.Builder.CreateAddrSpaceCast(
+ Addr, CGF.ConvertType(E->getType()), ElemTy);
return Addr;
}
break;
@@ -1364,10 +1365,9 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
- CharUnits Align =
- CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType());
- return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull);
+ return CGF.makeNaturalAddressForPointer(
+ CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),
+ /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
}
/// EmitPointerWithAlignment - Given an expression of pointer type, try to
@@ -1468,8 +1468,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
SkippedChecks.set(SanitizerKind::Null, true);
}
- EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
- LV.getAlignment(), SkippedChecks);
+ EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
}
return LV;
}
@@ -1581,11 +1580,11 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
Address Addr = LV.getAddress(*this);
- llvm::Value *V = Addr.getPointer();
+ llvm::Value *V = Addr.getBasePointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()),
- LV.getType(), getContext(), LV.getBaseInfo(),
- LV.getTBAAInfo());
+ Addr.replaceBasePointer(V);
+ return LValue::MakeAddr(Addr, LV.getType(), getContext(),
+ LV.getBaseInfo(), LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1929,7 +1928,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isNontemporal) {
- if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
if (GV->isThreadLocal())
Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
NotKnownNonNull);
@@ -2039,8 +2038,9 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// Convert the pointer of \p Addr to a pointer to a vector (the value type of
// MatrixType), if it points to a array (the memory type of MatrixType).
-static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
- bool IsVector = true) {
+static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
+ CodeGenFunction &CGF,
+ bool IsVector = true) {
auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
if (ArrayTy && IsVector) {
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
@@ -2077,7 +2077,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isInit, bool isNontemporal) {
- if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
if (GV->isThreadLocal())
Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
NotKnownNonNull);
@@ -2432,14 +2432,12 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
llvm::Type *ResultType = IntPtrTy;
Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
- llvm::Value *RHS = dst.getPointer();
+ llvm::Value *RHS = dst.emitRawPointer(*this);
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- llvm::Value *LHS =
- Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
- "sub.ptr.lhs.cast");
+ llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
+ ResultType, "sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
- CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
- BytesBetween);
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
} else if (Dst.isGlobalObjCRef()) {
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
Dst.isThreadLocalRef());
@@ -2770,12 +2768,9 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
llvm::LoadInst *Load =
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
-
- QualType PointeeType = RefLVal.getType()->getPointeeType();
- CharUnits Align = CGM.getNaturalTypeAlignment(
- PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
- /* forPointeeType= */ true);
- return Address(Load, ConvertTypeForMem(PointeeType), Align);
+ return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
+ CharUnits(), /*ForPointeeType=*/true,
+ PointeeBaseInfo, PointeeTBAAInfo);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
@@ -2792,10 +2787,9 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
- CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
- TBAAInfo,
- /*forPointeeType=*/true));
+ return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
+ CharUnits(), /*ForPointeeType=*/true,
+ BaseInfo, TBAAInfo);
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2991,7 +2985,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
- Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
+ Addr = makeNaturalAddressForPointer(Val, T, Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -3023,11 +3017,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
Address LValueAddress = CapLVal.getAddress(*this);
- CapLVal = MakeAddrLValue(
- Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
- getContext().getDeclAlign(VD)),
- CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
- CapLVal.getTBAAInfo());
+ CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
+ LValueAddress.getElementType(),
+ getContext().getDeclAlign(VD)),
+ CapLVal.getType(),
+ LValueBaseInfo(AlignmentSource::Decl),
+ CapLVal.getTBAAInfo());
// Mark lvalue as nontemporal if the variable is marked as nontemporal
// in simd context.
if (getLangOpts().OpenMP &&
@@ -3083,7 +3078,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Handle threadlocal function locals.
if (VD->getTLSKind() != VarDecl::TLS_None)
addr = addr.withPointer(
- Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull);
+ Builder.CreateThreadLocalAddress(addr.getBasePointer()),
+ NotKnownNonNull);
// Check for OpenMP threadprivate variables.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
@@ -3351,7 +3347,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Pointers are passed directly, everything else is passed by address.
if (!V->getType()->isPointerTy()) {
- Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
+ RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
Builder.CreateStore(V, Ptr);
V = Ptr.getPointer();
}
@@ -3663,12 +3659,29 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
// symbol in LTO mode.
void CodeGenFunction::EmitCfiCheckStub() {
llvm::Module *M = &CGM.getModule();
- auto &Ctx = M->getContext();
+ ASTContext &C = getContext();
+ QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
+
+ FunctionArgList FnArgs;
+ ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
+ ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
+ ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
+ ImplicitParamKind::Other);
+ FnArgs.push_back(&ArgCallsiteTypeId);
+ FnArgs.push_back(&ArgAddr);
+ FnArgs.push_back(&ArgCFICheckFailData);
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
+
llvm::Function *F = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
+ llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
F->setAlignment(llvm::Align(4096));
CGM.setDSOLocal(F);
+
+ llvm::LLVMContext &Ctx = M->getContext();
llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
// CrossDSOCFI pass is not executed if there is no executable code.
SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
@@ -3907,6 +3920,21 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
}
}
+static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
+ ArrayRef<llvm::Value *> indices,
+ llvm::Type *elementType, bool inbounds,
+ bool signedIndices, SourceLocation loc,
+ CharUnits align,
+ const llvm::Twine &name = "arrayidx") {
+ if (inbounds) {
+ return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
+ CodeGenFunction::NotSubtraction, loc,
+ align, name);
+ } else {
+ return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
+ }
+}
+
static CharUnits getArrayElementAlign(CharUnits arrayAlign,
llvm::Value *idx,
CharUnits eltSize) {
@@ -3954,7 +3982,7 @@ static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
llvm::Function *Fn =
CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
- llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()});
+ llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
return Address(Call, Addr.getElementType(), Addr.getAlignment());
}
@@ -4017,7 +4045,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
// We can use that to compute the best alignment of the element.
CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
CharUnits eltAlign =
- getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+ getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
if (hasBPFPreserveStaticOffset(Base))
addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
@@ -4026,19 +4054,19 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
if (!LastIndex ||
(!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
- eltPtr = emitArraySubscriptGEP(
- CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
- signedIndices, loc, name);
+ addr = emitArraySubscriptGEP(CGF, addr, indices,
+ CGF.ConvertTypeForMem(eltType), inbounds,
+ signedIndices, loc, eltAlign, name);
+ return addr;
} else {
// Remember the original array subscript for bpf target
unsigned idx = LastIndex->getZExtValue();
llvm::DIType *DbgInfo = nullptr;
if (arrayType)
DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
- eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
- addr.getPointer(),
- indices.size() - 1,
- idx, DbgInfo);
+ eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
+ addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
+ idx, DbgInfo);
}
return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
@@ -4207,8 +4235,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx,
- false, SignedIndices, E->getExprLoc());
+ emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
+ ScaledIdx, false, SignedIndices, E->getExprLoc());
Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
@@ -4254,7 +4282,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
llvm::Type *CountTy = ConvertType(CountFD->getType());
llvm::Value *Res = Builder.CreateInBoundsGEP(
- Int8Ty, Addr.getPointer(),
+ Int8Ty, Addr.emitRawPointer(*this),
Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
".counted_by.load");
@@ -4500,9 +4528,9 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
BaseInfo = ArrayLV.getBaseInfo();
TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
} else {
- Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
- TBAAInfo, BaseTy, ResultExprTy,
- IsLowerBound);
+ Address Base =
+ emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
+ ResultExprTy, IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
!getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
@@ -4589,7 +4617,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
SkippedChecks.set(SanitizerKind::Alignment, true);
if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
SkippedChecks.set(SanitizerKind::Null, true);
- EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
} else
@@ -4638,8 +4666,8 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
AlignmentSource::Decl);
else
- LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(),
- D->getType().getNonReferenceType());
+ LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
+ D->getType().getNonReferenceType());
} else {
QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
@@ -4829,7 +4857,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// information provided by invariant.group. This is because accessing
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
- auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
+ auto *stripped =
+ Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
addr = Address(stripped, addr.getElementType(), addr.getAlignment());
}
}
@@ -4848,10 +4877,11 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// Remember the original union field index
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
rec->getLocation());
- addr = Address(
- Builder.CreatePreserveUnionAccessIndex(
- addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
- addr.getElementType(), addr.getAlignment());
+ addr =
+ Address(Builder.CreatePreserveUnionAccessIndex(
+ addr.emitRawPointer(*this),
+ getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
+ addr.getElementType(), addr.getAlignment());
}
if (FieldType->isReferenceType())
@@ -5088,11 +5118,9 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue(
if (Info.LHS && Info.RHS) {
Address lhsAddr = Info.LHS->getAddress(*this);
Address rhsAddr = Info.RHS->getAddress(*this);
- llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
- phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
- phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
- Address result(phi, lhsAddr.getElementType(),
- std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
+ Address result = mergeAddressesInConditionalExpr(
+ lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
+ Builder.GetInsertBlock(), expr->getType());
AlignmentSource alignSource =
std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
Info.RHS->getBaseInfo().getAlignmentSource());
@@ -5179,7 +5207,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
Address V = LV.getAddress(*this);
const auto *DCE = cast<CXXDynamicCastExpr>(E);
- return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
case CK_ConstructorConversion:
@@ -5244,8 +5272,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (sanitizePerformTypeCheck())
- EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
- Derived.getPointer(), E->getType());
+ EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,
+ E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
EmitVTablePtrCheckForCast(E->getType(), Derived,
@@ -5601,7 +5629,7 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
- return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+ return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 5190b22bcc16..143855aa84ca 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -294,10 +294,10 @@ void AggExprEmitter::withReturnValueSlot(
// Otherwise, EmitCall will emit its own, notice that it's "unused", and end
// its lifetime before we have the chance to emit a proper destructor call.
bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
- (RequiresDestruction && !Dest.getAddress().isValid());
+ (RequiresDestruction && Dest.isIgnored());
Address RetAddr = Address::invalid();
- Address RetAllocaAddr = Address::invalid();
+ RawAddress RetAllocaAddr = RawAddress::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
llvm::Value *LifetimeSizePtr = nullptr;
@@ -329,7 +329,8 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
+ Src.getAggregatePointer(E->getType(), CGF));
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -448,7 +449,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxStart[] = { Zero, Zero };
llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart");
+ ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxStart,
+ "arraystart");
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
@@ -465,7 +467,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
// End pointer.
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend");
+ ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
+ "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
@@ -516,9 +519,9 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- DestPtr.getElementType(), DestPtr.getPointer(), indices,
- "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(DestPtr.getElementType(),
+ DestPtr.emitRawPointer(CGF),
+ indices, "arrayinit.begin");
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
@@ -1059,7 +1062,7 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
if (RV.isScalar())
return {RV.getScalarVal(), nullptr};
if (RV.isAggregate())
- return {RV.getAggregatePointer(), nullptr};
+ return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
assert(RV.isComplex());
return RV.getComplexVal();
};
@@ -1818,7 +1821,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// else, clean it up for -O0 builds and general tidiness.
if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
+ dyn_cast<llvm::GetElementPtrInst>(LV.emitRawPointer(CGF)))
if (GEP->use_empty())
GEP->eraseFromParent();
}
@@ -1849,9 +1852,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// destPtr is an array*. Construct an elementType* by drilling down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = {zero, zero};
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- destPtr.getElementType(), destPtr.getPointer(), indices,
- "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
+ destPtr.emitRawPointer(CGF),
+ indices, "arrayinit.begin");
// Prepare to special-case multidimensional array initialization: we avoid
// emitting multiple destructor loops in that case.
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 2adbef6d5512..a4fb673284ce 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -280,7 +280,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
- This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
+ This = MakeAddrLValue(ThisValue, Base->getType()->getPointeeType(),
+ BaseInfo, TBAAInfo);
} else {
This = EmitLValue(Base);
}
@@ -353,10 +354,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
SkippedChecks.set(SanitizerKind::Null, true);
}
- EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
- This.getPointer(*this),
- C.getRecordType(CalleeDecl->getParent()),
- /*Alignment=*/CharUnits::Zero(), SkippedChecks);
+
+ if (sanitizePerformTypeCheck())
+ EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
+ This.emitRawPointer(*this),
+ C.getRecordType(CalleeDecl->getParent()),
+ /*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -455,7 +458,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
- EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
QualType(MPT->getClass(), 0));
// Get the member function pointer.
@@ -1073,8 +1076,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Move past these elements.
InitListElements =
cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
- ->getSize()
- .getZExtValue();
+ ->getZExtSize();
CurPtr = Builder.CreateConstInBoundsGEP(
CurPtr, InitListElements, "string.init.end");
@@ -1110,9 +1112,10 @@ void CodeGenFunction::EmitNewArrayInitializer(
// alloca.
EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
"array.init.end");
- CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
- pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
- ElementType, ElementAlign,
+ CleanupDominator =
+ Builder.CreateStore(BeginPtr.emitRawPointer(*this), EndOfInit);
+ pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),
+ EndOfInit, ElementType, ElementAlign,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
}
@@ -1124,16 +1127,17 @@ void CodeGenFunction::EmitNewArrayInitializer(
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (EndOfInit.isValid()) {
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
}
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr,
AggValueSlot::DoesNotOverlap);
- CurPtr = Address(Builder.CreateInBoundsGEP(
- CurPtr.getElementType(), CurPtr.getPointer(),
- Builder.getSize(1), "array.exp.next"),
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
+ CurPtr.emitRawPointer(*this),
+ Builder.getSize(1),
+ "array.exp.next"),
CurPtr.getElementType(),
StartAlign.alignmentAtOffset((++i) * ElementSize));
}
@@ -1187,7 +1191,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
// FIXME: Share this cleanup with the constructor call emission rather than
// having it create a cleanup of its own.
if (EndOfInit.isValid())
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
// Emit a constructor call loop to initialize the remaining elements.
if (InitListElements)
@@ -1250,15 +1254,15 @@ void CodeGenFunction::EmitNewArrayInitializer(
llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
// Find the end of the array, hoisted out of the loop.
- llvm::Value *EndPtr =
- Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(),
- NumElements, "array.end");
+ llvm::Value *EndPtr = Builder.CreateInBoundsGEP(
+ BeginPtr.getElementType(), BeginPtr.emitRawPointer(*this), NumElements,
+ "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
if (!ConstNum) {
- llvm::Value *IsEmpty =
- Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
+ llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr.emitRawPointer(*this),
+ EndPtr, "array.isempty");
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
}
@@ -1268,19 +1272,20 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
- CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
+ CurPtrPhi->addIncoming(CurPtr.emitRawPointer(*this), EntryBB);
CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
if (EndOfInit.isValid())
- Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
+ Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
// Enter a partial-destruction Cleanup if necessary.
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
- pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
- ElementType, ElementAlign,
- getDestroyer(DtorKind));
+ llvm::Value *BeginPtrRaw = BeginPtr.emitRawPointer(*this);
+ llvm::Value *CurPtrRaw = CurPtr.emitRawPointer(*this);
+ pushRegularPartialArrayCleanup(BeginPtrRaw, CurPtrRaw, ElementType,
+ ElementAlign, getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
CleanupDominator = Builder.CreateUnreachable();
}
@@ -1296,9 +1301,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Advance to the next element by adjusting the pointer type as necessary.
- llvm::Value *NextPtr =
- Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
- "array.next");
+ llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(
+ ElementTy, CurPtr.emitRawPointer(*this), 1, "array.next");
// Check whether we've gotten to the end of the array and, if so,
// exit the loop.
@@ -1524,14 +1528,9 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
- DirectCleanup *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr.getPointer(),
- AllocSize,
- E->passAlignment(),
- AllocAlign);
+ DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(
+ EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(),
+ NewPtr.emitRawPointer(CGF), AllocSize, E->passAlignment(), AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
@@ -1542,7 +1541,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
// Otherwise, we need to save all this stuff.
DominatingValue<RValue>::saved_type SavedNewPtr =
- DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF));
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
@@ -1591,8 +1590,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
isa<StringLiteral>(IgnoreParen) || isa<ObjCEncodeExpr>(IgnoreParen)) {
minElements =
cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
- ->getSize()
- .getZExtValue();
+ ->getZExtSize();
} else if (ILE || CPLIE) {
minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
}
@@ -1620,14 +1618,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
- allocation = allocation.withAlignment(allocAlign);
+ allocation.setAlignment(allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
- allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
+ allocatorArgs.add(RValue::get(allocation, *this), arg->getType());
}
} else {
@@ -1715,8 +1713,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
contBB = createBasicBlock("new.cont");
- llvm::Value *isNull =
- Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
+ llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
@@ -1762,12 +1759,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
SkippedChecks.set(SanitizerKind::Null, nullCheck);
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
- result.getPointer(), allocType, result.getAlignment(),
- SkippedChecks, numElements);
+ result, allocType, result.getAlignment(), SkippedChecks,
+ numElements);
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
- llvm::Value *resultPtr = result.getPointer();
+ llvm::Value *resultPtr = result.emitRawPointer(*this);
if (E->isArray()) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
@@ -1911,7 +1908,8 @@ static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
Dtor);
else
- CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
+ CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.emitRawPointer(CGF),
+ ElementType);
}
/// Emit the code for deleting a single object.
@@ -1927,8 +1925,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// dynamic type, the static type shall be a base class of the dynamic type
// of the object to be deleted and the static type shall have a virtual
// destructor or the behavior is undefined.
- CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
- DE->getExprLoc(), Ptr.getPointer(),
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr,
ElementType);
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
@@ -1977,9 +1974,8 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- Ptr.getPointer(),
- OperatorDelete, ElementType);
+ CGF.EHStack.pushCleanup<CallObjectDelete>(
+ NormalAndEHCleanup, Ptr.emitRawPointer(CGF), OperatorDelete, ElementType);
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
@@ -2066,7 +2062,7 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
CharUnits elementAlign =
deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
- llvm::Value *arrayBegin = deletedPtr.getPointer();
+ llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
@@ -2097,7 +2093,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
@@ -2132,10 +2128,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
- Ptr.getPointer(), GEP, "del.first"),
- ConvertTypeForMem(DeleteTy), Ptr.getAlignment(),
- Ptr.isKnownNonNull());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),
+ Ptr.getAlignment(), "del.first");
}
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
@@ -2193,7 +2187,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
// destruction and the static type of the operand is neither the constructor
// or destructor’s class nor one of its bases, the behavior is undefined.
CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
- ThisPtr.getPointer(), SrcRecordTy);
+ ThisPtr, SrcRecordTy);
// C++ [expr.typeid]p2:
// If the glvalue expression is obtained by applying the unary * operator to
@@ -2209,7 +2203,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
CGF.createBasicBlock("typeid.bad_typeid");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
- llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
CGF.EmitBlock(BadTypeidBlock);
@@ -2295,8 +2289,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
// construction or destruction and the static type of the operand is not a
// pointer to or object of the constructor or destructor’s own class or one
// of its bases, the dynamic_cast results in undefined behavior.
- EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
- SrcRecordTy);
+ EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy);
if (DCE->isAlwaysNull()) {
if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {
@@ -2331,7 +2324,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
CastNull = createBasicBlock("dynamic_cast.null");
CastNotNull = createBasicBlock("dynamic_cast.notnull");
- llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
+ llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index b873bc6737bb..c3774d0cb75e 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -289,7 +289,7 @@ public:
const BinOpInfo &Op);
QualType GetHigherPrecisionFPType(QualType ElementType) {
- const auto *CurrentBT = dyn_cast<BuiltinType>(ElementType);
+ const auto *CurrentBT = cast<BuiltinType>(ElementType);
switch (CurrentBT->getKind()) {
case BuiltinType::Kind::Float16:
return CGF.getContext().FloatTy;
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 75286dceb13a..36d7493d9a6b 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -656,7 +656,7 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
}
unsigned NumElementsToUpdate =
- FillC ? CAT->getSize().getZExtValue() : Updater->getNumInits();
+ FillC ? CAT->getZExtSize() : Updater->getNumInits();
for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) {
Expr *Init = nullptr;
if (I < Updater->getNumInits())
@@ -800,8 +800,8 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
// Add a vtable pointer, if we need one and it hasn't already been added.
if (Layout.hasOwnVFPtr()) {
llvm::Constant *VTableAddressPoint =
- CGM.getCXXABI().getVTableAddressPointForConstExpr(
- BaseSubobject(CD, Offset), VTableClass);
+ CGM.getCXXABI().getVTableAddressPoint(BaseSubobject(CD, Offset),
+ VTableClass);
if (!AppendBytes(Offset, VTableAddressPoint))
return false;
}
@@ -1249,7 +1249,7 @@ public:
auto *CAT = CGM.getContext().getAsConstantArrayType(ILE->getType());
assert(CAT && "can't emit array init for non-constant-bound array");
unsigned NumInitElements = ILE->getNumInits();
- unsigned NumElements = CAT->getSize().getZExtValue();
+ unsigned NumElements = CAT->getZExtSize();
// Initialising an array requires us to automatically
// initialise any elements that have not been initialised explicitly
@@ -1374,7 +1374,7 @@ public:
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
- Str.resize(CAT->getSize().getZExtValue(), '\0');
+ Str.resize(CAT->getZExtSize(), '\0');
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -2382,7 +2382,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
llvm::Constant *Element =
ConstantEmitter::emitNullForMemory(*this, ElementTy);
- unsigned NumElements = CAT->getSize().getZExtValue();
+ unsigned NumElements = CAT->getZExtSize();
SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
return llvm::ConstantArray::get(ATy, Array);
}
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 8536570087ad..83247aa48f86 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -2250,7 +2250,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// performed and the object is not of the derived type.
if (CGF.sanitizePerformTypeCheck())
CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
- Derived.getPointer(), DestTy->getPointeeType());
+ Derived, DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
@@ -2258,13 +2258,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CodeGenFunction::CFITCK_DerivedCast,
CE->getBeginLoc());
- return Derived.getPointer();
+ return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
// The EmitPointerWithAlignment path does this fine; just discard
// the alignment.
- return CGF.EmitPointerWithAlignment(CE).getPointer();
+ return CGF.getAsNaturalPointerTo(CGF.EmitPointerWithAlignment(CE),
+ CE->getType()->getPointeeType());
}
case CK_Dynamic: {
@@ -2274,7 +2275,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_ArrayToPointerDecay:
- return CGF.EmitArrayToPointerDecay(E).getPointer();
+ return CGF.getAsNaturalPointerTo(CGF.EmitArrayToPointerDecay(E),
+ CE->getType()->getPointeeType());
case CK_FunctionToPointerDecay:
return EmitLValue(E).getPointer(CGF);
@@ -5588,3 +5590,16 @@ CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
return GEPVal;
}
+
+Address CodeGenFunction::EmitCheckedInBoundsGEP(
+ Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
+ bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
+ const Twine &Name) {
+ if (!SanOpts.has(SanitizerKind::PointerOverflow))
+ return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name);
+
+ return RawAddress(
+ EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
+ IdxList, SignedIndices, IsSubtraction, Loc, Name),
+ elementType, Align);
+}
diff --git a/clang/lib/CodeGen/CGLoopInfo.h b/clang/lib/CodeGen/CGLoopInfo.h
index a1c8c7e5307f..0fe33b289130 100644
--- a/clang/lib/CodeGen/CGLoopInfo.h
+++ b/clang/lib/CodeGen/CGLoopInfo.h
@@ -110,6 +110,10 @@ public:
/// been processed.
void finish();
+ /// Returns the first outer loop containing this loop if any, nullptr
+ /// otherwise.
+ const LoopInfo *getParent() const { return Parent; }
+
private:
/// Loop ID metadata.
llvm::TempMDTuple TempLoopID;
@@ -291,12 +295,13 @@ public:
/// Set no progress for the next loop pushed.
void setMustProgress(bool P) { StagedAttrs.MustProgress = P; }
-private:
/// Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
/// Return the LoopInfo for the current loop. HasInfo should be called
/// first to ensure LoopInfo is present.
const LoopInfo &getInfo() const { return *Active.back(); }
+
+private:
/// The set of attributes that will be applied to the next pushed loop.
LoopAttributes StagedAttrs;
/// Stack of active loops.
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index 75c1d7fbea84..8fade0fac21e 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -366,7 +366,7 @@ template <class Derived> struct GenFuncBase {
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
llvm::Value *DstArrayEnd = CGF.Builder.CreateInBoundsGEP(
- CGF.Int8Ty, DstAddr.getPointer(), SizeInBytes);
+ CGF.Int8Ty, DstAddr.emitRawPointer(CGF), SizeInBytes);
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
// Create the header block and insert the phi instructions.
@@ -376,7 +376,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur");
- PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB);
+ PHIs[I]->addIncoming(StartAddrs[I].emitRawPointer(CGF), PreheaderBB);
}
// Create the exit and loop body blocks.
@@ -410,7 +410,7 @@ template <class Derived> struct GenFuncBase {
// Instrs to update the destination and source addresses.
// Update phi instructions.
NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize);
- PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB);
+ PHIs[I]->addIncoming(NewAddrs[I].emitRawPointer(CGF), LoopBB);
}
// Insert an unconditional branch to the header block.
@@ -488,7 +488,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] = Addrs[I].getPointer();
+ Ptrs[I] = Addrs[I].emitRawPointer(CallerCGF);
}
if (llvm::Function *F =
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index f3a948cf13f9..c7f497a7c845 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -94,8 +94,8 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// and cast value to correct type
Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- llvm::Value *BitCast =
- Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT));
+ llvm::Value *BitCast = Builder.CreateBitCast(
+ Temporary.emitRawPointer(*this), ConvertType(ArgQT));
Args.add(RValue::get(BitCast), ArgQT);
// Create char array to store type encoding
@@ -204,11 +204,11 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
const ParmVarDecl *argDecl = *PI++;
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Objects.getPointer()), ArgQT);
+ Args.add(RValue::get(Objects, *this), ArgQT);
if (DLE) {
argDecl = *PI++;
ArgQT = argDecl->getType().getUnqualifiedType();
- Args.add(RValue::get(Keys.getPointer()), ArgQT);
+ Args.add(RValue::get(Keys, *this), ArgQT);
}
argDecl = *PI;
ArgQT = argDecl->getType().getUnqualifiedType();
@@ -827,7 +827,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- llvm::Value *dest = CGF.ReturnValue.getPointer();
+ llvm::Value *dest = CGF.ReturnValue.emitRawPointer(CGF);
args.add(RValue::get(dest), Context.VoidPtrTy);
args.add(RValue::get(src), Context.VoidPtrTy);
@@ -1147,8 +1147,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
callCStructCopyConstructor(Dst, Src);
} else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar,
- AtomicHelperFn);
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this),
+ ivar, AtomicHelperFn);
}
return;
}
@@ -1163,7 +1163,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(),
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.emitRawPointer(*this),
ivar, AtomicHelperFn);
}
return;
@@ -1287,7 +1287,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress(*this).getPointer();
+ value = LV.getAddress(*this).emitRawPointer(*this);
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
@@ -1821,16 +1821,14 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
- Args.add(RValue::get(StatePtr.getPointer()),
- getContext().getPointerType(StateTy));
+ Args.add(RValue::get(StatePtr, *this), getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
- Args.add(RValue::get(ItemsPtr.getPointer()),
- getContext().getPointerType(ItemsTy));
+ Args.add(RValue::get(ItemsPtr, *this), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType());
@@ -2198,7 +2196,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
if (!fn)
fn = getARCIntrinsic(IntID, CGF.CGM);
- return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
+ return CGF.EmitNounwindRuntimeCall(fn, addr.emitRawPointer(CGF));
}
/// Perform an operation having the following signature:
@@ -2216,9 +2214,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
llvm::Type *origType = value->getType();
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
- };
+ CGF.Builder.CreateBitCast(addr.emitRawPointer(CGF), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)};
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
@@ -2237,9 +2234,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
fn = getARCIntrinsic(IntID, CGF.CGM);
llvm::Value *args[] = {
- CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
- CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy)
- };
+ CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF), CGF.Int8PtrPtrTy),
+ CGF.Builder.CreateBitCast(src.emitRawPointer(CGF), CGF.Int8PtrPtrTy)};
CGF.EmitNounwindRuntimeCall(fn, args);
}
@@ -2490,9 +2486,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM);
llvm::Value *args[] = {
- Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
- Builder.CreateBitCast(value, Int8PtrTy)
- };
+ Builder.CreateBitCast(addr.emitRawPointer(*this), Int8PtrPtrTy),
+ Builder.CreateBitCast(value, Int8PtrTy)};
EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
@@ -2643,7 +2638,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
if (!fn)
fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM);
- EmitNounwindRuntimeCall(fn, addr.getPointer());
+ EmitNounwindRuntimeCall(fn, addr.emitRawPointer(*this));
}
/// void \@objc_moveWeak(i8** %dest, i8** %src)
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index a36b0cdddaf0..4e7f777ba1d9 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -706,7 +706,8 @@ protected:
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {
- EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd};
+ EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -761,8 +762,8 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::FunctionCallee LookupFn = SlotLookupFn;
// Store the receiver on the stack so that we can reload it later
- Address ReceiverPtr =
- CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
+ RawAddress ReceiverPtr =
+ CGF.CreateTempAlloca(Receiver->getType(), CGF.getPointerAlign());
Builder.CreateStore(Receiver, ReceiverPtr);
llvm::Value *self;
@@ -778,9 +779,9 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn2->addParamAttr(0, llvm::Attribute::NoCapture);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
- EnforceType(Builder, cmd, SelectorTy),
- EnforceType(Builder, self, IdTy) };
+ EnforceType(Builder, ReceiverPtr.getPointer(), PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy)};
llvm::CallBase *slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args);
slot->setOnlyReadsMemory();
slot->setMetadata(msgSendMDKind, node);
@@ -800,7 +801,7 @@ class CGObjCGNUstep : public CGObjCGNU {
llvm::Value *cmd,
MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {ObjCSuper.getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {ObjCSuper.emitRawPointer(CGF), cmd};
llvm::CallInst *slot =
CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
@@ -1221,10 +1222,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *cmd, MessageSendInfo &MSI) override {
// Don't access the slot unless we're trying to cache the result.
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder,
- ObjCSuper.getPointer(),
- PtrToObjCSuperTy),
- cmd};
+ llvm::Value *lookupArgs[] = {
+ CGObjCGNU::EnforceType(Builder, ObjCSuper.emitRawPointer(CGF),
+ PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -2186,7 +2187,8 @@ protected:
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {
- EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd,
+ EnforceType(Builder, ObjCSuper.emitRawPointer(CGF), PtrToObjCSuperTy),
+ cmd,
};
if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
@@ -4201,15 +4203,15 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
- return B.CreateCall(WeakReadFn,
- EnforceType(B, AddrWeakObj.getPointer(), PtrToIdTy));
+ return B.CreateCall(
+ WeakReadFn, EnforceType(B, AddrWeakObj.emitRawPointer(CGF), PtrToIdTy));
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
B.CreateCall(WeakAssignFn, {src, dstVal});
}
@@ -4218,7 +4220,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
B.CreateCall(GlobalAssignFn, {src, dstVal});
@@ -4229,7 +4231,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), IdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), IdTy);
B.CreateCall(IvarAssignFn, {src, dstVal, ivarOffset});
}
@@ -4237,7 +4239,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.emitRawPointer(CGF), PtrToIdTy);
B.CreateCall(StrongCastAssignFn, {src, dstVal});
}
@@ -4246,8 +4248,8 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
- llvm::Value *DestPtrVal = EnforceType(B, DestPtr.getPointer(), PtrTy);
- llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.getPointer(), PtrTy);
+ llvm::Value *DestPtrVal = EnforceType(B, DestPtr.emitRawPointer(CGF), PtrTy);
+ llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.emitRawPointer(CGF), PtrTy);
B.CreateCall(MemMoveFn, {DestPtrVal, SrcPtrVal, Size});
}
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index e815e097e1fb..8a599c10e1ca 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1310,7 +1310,7 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
- Address EmitSelectorAddr(Selector Sel);
+ ConstantAddress EmitSelectorAddr(Selector Sel);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1538,7 +1538,7 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel);
- Address EmitSelectorAddr(Selector Sel);
+ ConstantAddress EmitSelectorAddr(Selector Sel);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -2064,9 +2064,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- Address ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
- "objc_super");
+ RawAddress ObjCSuper = CGF.CreateTempAlloca(
+ ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateStore(ReceiverAsObject,
@@ -2501,12 +2500,12 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
auto *CArray = cast<ConstantArrayType>(Array);
- uint64_t ElCount = CArray->getSize().getZExtValue();
+ uint64_t ElCount = CArray->getZExtSize();
assert(CArray && "only array with known element size is supported");
FQT = CArray->getElementType();
while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
auto *CArray = cast<ConstantArrayType>(Array);
- ElCount *= CArray->getSize().getZExtValue();
+ ElCount *= CArray->getZExtSize();
FQT = CArray->getElementType();
}
if (FQT->isRecordType() && ElCount) {
@@ -4259,7 +4258,7 @@ namespace {
CGF.EmitBlock(FinallyCallExit);
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
CGF.EmitBlock(FinallyNoCallExit);
@@ -4425,7 +4424,9 @@ void FragileHazards::emitHazardsInNewBlocks() {
}
static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
- if (V.isValid()) S.insert(V.getPointer());
+ if (V.isValid())
+ if (llvm::Value *Ptr = V.getBasePointer())
+ S.insert(Ptr);
}
void FragileHazards::collectLocals() {
@@ -4628,13 +4629,13 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// - Call objc_exception_try_enter to push ExceptionData on top of
// the EH stack.
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
// - Call setjmp on the exception data buffer.
llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP(
- ObjCTypes.ExceptionDataTy, ExceptionData.getPointer(), GEPIndexes,
+ ObjCTypes.ExceptionDataTy, ExceptionData.emitRawPointer(CGF), GEPIndexes,
"setjmp_buffer");
llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(
ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
@@ -4673,9 +4674,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
} else {
// Retrieve the exception object. We may emit multiple blocks but
// nothing can cross this so the value is already in SSA form.
- llvm::CallInst *Caught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer(), "caught");
+ llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF),
+ "caught");
// Push the exception to rethrow onto the EH value stack for the
// benefit of any @throws in the handlers.
@@ -4698,7 +4699,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Enter a new exception try block (in case a @catch block
// throws an exception).
CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(),
- ExceptionData.getPointer());
+ ExceptionData.emitRawPointer(CGF));
llvm::CallInst *SetJmpResult =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(),
@@ -4829,9 +4830,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Extract the new exception and save it to the
// propagating-exception slot.
assert(PropagatingExnVar.isValid());
- llvm::CallInst *NewCaught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer(), "caught");
+ llvm::CallInst *NewCaught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF),
+ "caught");
CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
// Don't pop the catch handler; the throw already did.
@@ -4861,9 +4862,8 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Otherwise, just look in the buffer for the exception to throw.
} else {
- llvm::CallInst *Caught =
- CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData.getPointer());
+ llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(
+ ObjCTypes.getExceptionExtractFn(), ExceptionData.emitRawPointer(CGF));
PropagatingExn = Caught;
}
@@ -4906,7 +4906,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type* DestTy = AddrWeakObj.getElementType();
llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
- AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
AddrWeakObjVal, "weakread");
@@ -4928,8 +4928,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = { src, dstVal };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
@@ -4950,8 +4950,8 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
@@ -4977,8 +4977,8 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -4997,8 +4997,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "strongassign");
@@ -5007,7 +5007,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
Address DestPtr, Address SrcPtr,
llvm::Value *size) {
- llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
+ llvm::Value *args[] = {DestPtr.emitRawPointer(CGF),
+ SrcPtr.emitRawPointer(CGF), size};
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -5243,7 +5244,7 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel) {
return CGF.Builder.CreateLoad(EmitSelectorAddr(Sel));
}
-Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
+ConstantAddress CGObjCMac::EmitSelectorAddr(Selector Sel) {
CharUnits Align = CGM.getPointerAlign();
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
@@ -5254,7 +5255,7 @@ Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
Entry->setExternallyInitialized(true);
}
- return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
+ return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -5326,7 +5327,7 @@ void IvarLayoutBuilder::visitField(const FieldDecl *field,
}
// Unlike incomplete arrays, constant arrays can be nested.
while (auto arrayType = CGM.getContext().getAsConstantArrayType(fieldType)) {
- numElts *= arrayType->getSize().getZExtValue();
+ numElts *= arrayType->getZExtSize();
fieldType = arrayType->getElementType();
}
@@ -7323,7 +7324,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
ObjCTypes.MessageRefTy, CGF.getPointerAlign());
// Update the message ref argument.
- args[1].setRValue(RValue::get(mref.getPointer()));
+ args[1].setRValue(RValue::get(mref, CGF));
// Load the function to call from the message ref table.
Address calleeAddr = CGF.Builder.CreateStructGEP(mref, 0);
@@ -7552,9 +7553,8 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// ...
// Create and init a super structure; this is a (receiver, class)
// pair we will pass to objc_msgSendSuper.
- Address ObjCSuper =
- CGF.CreateTempAlloca(ObjCTypes.SuperTy, CGF.getPointerAlign(),
- "objc_super");
+ RawAddress ObjCSuper = CGF.CreateTempAlloca(
+ ObjCTypes.SuperTy, CGF.getPointerAlign(), "objc_super");
llvm::Value *ReceiverAsObject =
CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
@@ -7594,7 +7594,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
return LI;
}
-Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
+ConstantAddress CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
CharUnits Align = CGM.getPointerAlign();
if (!Entry) {
@@ -7610,7 +7610,7 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
CGM.addCompilerUsedGlobal(Entry);
}
- return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
+ return ConstantAddress(Entry, ObjCTypes.SelectorPtrTy, Align);
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -7629,8 +7629,8 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -7650,8 +7650,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
@@ -7660,7 +7660,8 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr,
llvm::Value *Size) {
- llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
+ llvm::Value *args[] = {DestPtr.emitRawPointer(CGF),
+ SrcPtr.emitRawPointer(CGF), Size};
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -7672,7 +7673,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
Address AddrWeakObj) {
llvm::Type *DestTy = AddrWeakObj.getElementType();
llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
- AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ AddrWeakObj.emitRawPointer(CGF), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
AddrWeakObjVal, "weakread");
@@ -7694,8 +7695,8 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
@@ -7716,8 +7717,8 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- llvm::Value *dstVal =
- CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *dstVal = CGF.Builder.CreateBitCast(dst.emitRawPointer(CGF),
+ ObjCTypes.PtrObjectPtrTy);
llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 424564f97599..01d0f35da196 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -67,7 +67,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
- LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
+ LValue LV = CGF.MakeNaturalAlignRawAddrLValue(V, IvarTy);
return LV;
}
@@ -233,7 +233,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
llvm::Instruction *CPICandidate = Handler.Block->getFirstNonPHI();
if (auto *CPI = dyn_cast_or_null<llvm::CatchPadInst>(CPICandidate)) {
CGF.CurrentFuncletPad = CPI;
- CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
+ CPI->setOperand(2, CGF.getExceptionSlot().emitRawPointer(CGF));
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
}
}
@@ -405,7 +405,7 @@ bool CGObjCRuntime::canMessageReceiverBeNull(CodeGenFunction &CGF,
auto self = curMethod->getSelfDecl();
if (self->getType().isConstQualified()) {
if (auto LI = dyn_cast<llvm::LoadInst>(receiver->stripPointerCasts())) {
- llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).getPointer();
+ llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).emitRawPointer(CGF);
if (selfAddr == LI->getPointerOperand()) {
return false;
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index e8a68dbcc687..bc363313dec6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -622,7 +622,7 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
+ LValue LV = CGF.MakeNaturalAlignRawAddrLValue(GV, Ty);
RValue InitRVal;
switch (CGF.getEvaluationKind(Ty)) {
case TEK_Scalar:
@@ -668,8 +668,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
llvm::Value *SrcBegin = nullptr;
if (DRD)
- SrcBegin = SrcAddr.getPointer();
- llvm::Value *DestBegin = DestAddr.getPointer();
+ SrcBegin = SrcAddr.emitRawPointer(CGF);
+ llvm::Value *DestBegin = DestAddr.emitRawPointer(CGF);
// Cast from pointer to array type to pointer to single element.
llvm::Value *DestEnd =
CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
@@ -912,7 +912,7 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
Address OriginalBaseAddress, llvm::Value *Addr) {
- Address Tmp = Address::invalid();
+ RawAddress Tmp = RawAddress::invalid();
Address TopTmp = Address::invalid();
Address MostTopTmp = Address::invalid();
BaseTy = BaseTy.getNonReferenceType();
@@ -971,10 +971,10 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
- SharedAddr.getPointer());
+ SharedAddr.emitRawPointer(CGF));
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivateAddr.getPointer(), SharedAddr.getType());
+ PrivateAddr.emitRawPointer(CGF), SharedAddr.getType());
llvm::Value *Ptr = CGF.Builder.CreateGEP(
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
@@ -1557,7 +1557,7 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName);
}
-Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
+ConstantAddress CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
auto LinkageForVariable = [&VD, this]() {
@@ -1579,8 +1579,8 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
LinkageForVariable);
if (!addr)
- return Address::invalid();
- return Address(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
+ return ConstantAddress::invalid();
+ return ConstantAddress(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
llvm::Constant *
@@ -1604,7 +1604,7 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return Address(
@@ -1627,7 +1627,8 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
llvm::Value *Args[] = {
- OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
+ OMPLoc,
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
@@ -1900,13 +1901,13 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
// OutlinedFn(&GTid, &zero_bound, CapturedStruct);
Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
- Address ZeroAddrBound =
+ RawAddress ZeroAddrBound =
CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".bound.zero.addr");
CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
// ThreadId for serialized parallels is 0.
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ThreadIDAddr.emitRawPointer(CGF));
OutlinedFnArgs.push_back(ZeroAddrBound.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
@@ -2272,7 +2273,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
BufSize, // size_t <buf_size>
- CL.getPointer(), // void *<copyprivate list>
+ CL.emitRawPointer(CGF), // void *<copyprivate list>
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
@@ -2591,10 +2592,10 @@ static void emitForStaticInitCall(
ThreadId,
CGF.Builder.getInt32(addMonoNonMonoModifier(CGF.CGM, Schedule, M1,
M2)), // Schedule type
- Values.IL.getPointer(), // &isLastIter
- Values.LB.getPointer(), // &LB
- Values.UB.getPointer(), // &UB
- Values.ST.getPointer(), // &Stride
+ Values.IL.emitRawPointer(CGF), // &isLastIter
+ Values.LB.emitRawPointer(CGF), // &LB
+ Values.UB.emitRawPointer(CGF), // &UB
+ Values.ST.emitRawPointer(CGF), // &Stride
CGF.Builder.getIntN(Values.IVSize, 1), // Incr
Chunk // Chunk
};
@@ -2697,12 +2698,11 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
// kmp_int[32|64] *p_stride);
llvm::Value *Args[] = {
- emitUpdateLocation(CGF, Loc),
- getThreadID(CGF, Loc),
- IL.getPointer(), // &isLastIter
- LB.getPointer(), // &Lower
- UB.getPointer(), // &Upper
- ST.getPointer() // &Stride
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ IL.emitRawPointer(CGF), // &isLastIter
+ LB.emitRawPointer(CGF), // &Lower
+ UB.emitRawPointer(CGF), // &Upper
+ ST.emitRawPointer(CGF) // &Stride
};
llvm::Value *Call = CGF.EmitRuntimeCall(
OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args);
@@ -3047,7 +3047,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
CGF.Builder
.CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
CGF.VoidPtrTy, CGF.Int8Ty)
- .getPointer()};
+ .emitRawPointer(CGF)};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
@@ -3574,7 +3574,8 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
- UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
+ UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF),
+ /*Idx0=*/1);
llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
@@ -3888,8 +3889,9 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *Size;
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- LValue Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
+ LValue Base =
+ CGF.MakeAddrLValue(CGF.Builder.CreateGEP(CGF, AffinitiesArray, Idx),
+ KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
@@ -3910,7 +3912,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *LocRef = emitUpdateLocation(CGF, Loc);
llvm::Value *GTid = getThreadID(CGF, Loc);
llvm::Value *AffinListPtr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AffinitiesArray.getPointer(), CGM.VoidPtrTy);
+ AffinitiesArray.emitRawPointer(CGF), CGM.VoidPtrTy);
// FIXME: Emit the function and ignore its result for now unless the
// runtime function is properly implemented.
(void)CGF.EmitRuntimeCall(
@@ -3921,8 +3923,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *NewTaskNewTaskTTy =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NewTask, KmpTaskTWithPrivatesPtrTy);
- LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
- KmpTaskTWithPrivatesQTy);
+ LValue Base = CGF.MakeNaturalAlignRawAddrLValue(NewTaskNewTaskTTy,
+ KmpTaskTWithPrivatesQTy);
LValue TDBase =
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
// Fill the data in the resulting kmp_task_t record.
@@ -4047,7 +4049,7 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
- Base.getAddress(CGF),
+ CGF, Base.getAddress(CGF),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
@@ -4097,7 +4099,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
+ CGF.Builder.CreateGEP(CGF, DependenciesArray, Idx), KmpDependInfoTy);
}
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
@@ -4195,7 +4197,7 @@ void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
+ Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos);
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
// Increase pos.
@@ -4430,7 +4432,7 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
+ Addr.getElementType(), Addr.emitRawPointer(CGF),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
CGF.VoidPtrTy);
@@ -4460,8 +4462,8 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
Address Begin = Base.getAddress(CGF);
// Cast from pointer to array type to pointer to single element.
- llvm::Value *End = CGF.Builder.CreateGEP(
- Begin.getElementType(), Begin.getPointer(), NumDeps);
+ llvm::Value *End = CGF.Builder.CreateGEP(Begin.getElementType(),
+ Begin.emitRawPointer(CGF), NumDeps);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
@@ -4469,7 +4471,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
CGF.EmitBlock(BodyBB);
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
- ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
+ ElementPHI->addIncoming(Begin.emitRawPointer(CGF), EntryBB);
Begin = Begin.withPointer(ElementPHI, KnownNonNull);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
@@ -4483,12 +4485,12 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
FlagsLVal);
// Shift the address forward by one element.
- Address ElementNext =
- CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext");
- ElementPHI->addIncoming(ElementNext.getPointer(),
- CGF.Builder.GetInsertBlock());
+ llvm::Value *ElementNext =
+ CGF.Builder.CreateConstGEP(Begin, /*Index=*/1, "omp.elementNext")
+ .emitRawPointer(CGF);
+ ElementPHI->addIncoming(ElementNext, CGF.Builder.GetInsertBlock());
llvm::Value *IsEmpty =
- CGF.Builder.CreateICmpEQ(ElementNext.getPointer(), End, "omp.isempty");
+ CGF.Builder.CreateICmpEQ(ElementNext, End, "omp.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Done.
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
@@ -4531,7 +4533,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
DepTaskArgs[1] = ThreadID;
DepTaskArgs[2] = NewTask;
DepTaskArgs[3] = NumOfElements;
- DepTaskArgs[4] = DependenciesArray.getPointer();
+ DepTaskArgs[4] = DependenciesArray.emitRawPointer(CGF);
DepTaskArgs[5] = CGF.Builder.getInt32(0);
DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
@@ -4563,7 +4565,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF);
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
DepWaitTaskArgs[6] =
@@ -4725,8 +4727,8 @@ static void EmitOMPAggregateReduction(
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
- llvm::Value *RHSBegin = RHSAddr.getPointer();
- llvm::Value *LHSBegin = LHSAddr.getPointer();
+ llvm::Value *RHSBegin = RHSAddr.emitRawPointer(CGF);
+ llvm::Value *LHSBegin = LHSAddr.emitRawPointer(CGF);
// Cast from pointer to array type to pointer to single element.
llvm::Value *LHSEnd =
CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
@@ -4990,7 +4992,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
QualType ReductionArrayTy = C.getConstantArrayType(
C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
- Address ReductionList =
+ RawAddress ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
const auto *IPriv = Privates.begin();
unsigned Idx = 0;
@@ -5462,7 +5464,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
C.getConstantArrayType(RDType, ArraySize, nullptr,
ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
// kmp_task_red_input_t .rd_input.[Size];
- Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
+ RawAddress TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
Data.ReductionCopies, Data.ReductionOps);
for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
@@ -5473,7 +5475,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
/*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
".rd_input.gep.");
- LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
+ LValue ElemLVal = CGF.MakeNaturalAlignRawAddrLValue(GEP, RDType);
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
RCG.emitSharedOrigLValue(CGF, Cnt);
@@ -5629,7 +5631,7 @@ void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
DepWaitTaskArgs[2] = NumOfElements;
- DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[3] = DependenciesArray.emitRawPointer(CGF);
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
DepWaitTaskArgs[6] =
@@ -5852,7 +5854,7 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
- llvm::Value *Traits = Addr.getPointer();
+ llvm::Value *Traits = Addr.emitRawPointer(CGF);
llvm::Value *AllocatorVal =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
@@ -6796,7 +6798,7 @@ private:
OASE->getBase()->IgnoreParenImpCasts())
.getCanonicalType();
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// If we don't have a constant dimension length, we have to consider
// the current section as having any size, so it is not necessarily
// unitary. If it happen to be unity size, that's user fault.
@@ -7312,17 +7314,19 @@ private:
CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
.getAddress(CGF);
}
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, ComponentLB.getPointer(), LB.getPointer());
+ llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ Size = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr,
+ LBPtr);
break;
}
}
assert(Size && "Failed to determine structure size");
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -7332,13 +7336,14 @@ private:
LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
- LB.getPointer());
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
+ LBPtr);
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -7356,20 +7361,21 @@ private:
(Next == CE && MapType != OMPC_MAP_unknown)) {
if (!IsMappingWholeStruct) {
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
: 1);
} else {
StructBaseCombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- StructBaseCombinedInfo.BasePointers.push_back(BP.getPointer());
+ StructBaseCombinedInfo.BasePointers.push_back(
+ BP.emitRawPointer(CGF));
StructBaseCombinedInfo.DevicePtrDecls.push_back(nullptr);
StructBaseCombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- StructBaseCombinedInfo.Pointers.push_back(LB.getPointer());
+ StructBaseCombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
StructBaseCombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
StructBaseCombinedInfo.NonContigInfo.Dims.push_back(
@@ -7546,8 +7552,8 @@ private:
// it.
if (DimSizes.size() < Components.size() - 1) {
if (CAT)
- DimSizes.push_back(llvm::ConstantInt::get(
- CGF.Int64Ty, CAT->getSize().getZExtValue()));
+ DimSizes.push_back(
+ llvm::ConstantInt::get(CGF.Int64Ty, CAT->getZExtSize()));
else if (VAT)
DimSizes.push_back(CGF.Builder.CreateIntCast(
CGF.EmitScalarExpr(VAT->getSizeExpr()), CGF.Int64Ty,
@@ -8211,11 +8217,11 @@ public:
}
CombinedInfo.Exprs.push_back(VD);
// Base is the base of the struct
- CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.BasePointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
CombinedInfo.DevicePtrDecls.push_back(nullptr);
CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
// Pointer is the address of the lowest element
- llvm::Value *LB = LBAddr.getPointer();
+ llvm::Value *LB = LBAddr.emitRawPointer(CGF);
const CXXMethodDecl *MD =
CGF.CurFuncDecl ? dyn_cast<CXXMethodDecl>(CGF.CurFuncDecl) : nullptr;
const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr;
@@ -8229,7 +8235,7 @@ public:
// if the this[:1] expression had appeared in a map clause with a map-type
// of tofrom.
// Emit this[:1]
- CombinedInfo.Pointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.Pointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
QualType Ty = MD->getFunctionObjectParameterType();
llvm::Value *Size =
CGF.Builder.CreateIntCast(CGF.getTypeSize(Ty), CGF.Int64Ty,
@@ -8238,7 +8244,7 @@ public:
} else {
CombinedInfo.Pointers.push_back(LB);
// Size is (addr of {highest+1} element) - (addr of lowest element)
- llvm::Value *HB = HBAddr.getPointer();
+ llvm::Value *HB = HBAddr.emitRawPointer(CGF);
llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(
HBAddr.getElementType(), HB, /*Idx0=*/1);
llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
@@ -8747,7 +8753,7 @@ public:
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
AlignmentSource::Decl));
- CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
+ CombinedInfo.Pointers.push_back(PtrAddr.emitRawPointer(CGF));
} else {
CombinedInfo.Pointers.push_back(CV);
}
@@ -9558,10 +9564,11 @@ static void emitTargetCallKernelLaunch(
bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
unsigned NumTargetItems = InputInfo.NumberOfTargetItems;
- llvm::Value *BasePointersArray = InputInfo.BasePointersArray.getPointer();
- llvm::Value *PointersArray = InputInfo.PointersArray.getPointer();
- llvm::Value *SizesArray = InputInfo.SizesArray.getPointer();
- llvm::Value *MappersArray = InputInfo.MappersArray.getPointer();
+ llvm::Value *BasePointersArray =
+ InputInfo.BasePointersArray.emitRawPointer(CGF);
+ llvm::Value *PointersArray = InputInfo.PointersArray.emitRawPointer(CGF);
+ llvm::Value *SizesArray = InputInfo.SizesArray.emitRawPointer(CGF);
+ llvm::Value *MappersArray = InputInfo.MappersArray.emitRawPointer(CGF);
auto &&EmitTargetCallFallbackCB =
[&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
@@ -10309,15 +10316,16 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Source location for the ident struct
llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer()};
+ llvm::Value *OffloadingArgs[] = {
+ RTLoc,
+ DeviceID,
+ PointerNum,
+ InputInfo.BasePointersArray.emitRawPointer(CGF),
+ InputInfo.PointersArray.emitRawPointer(CGF),
+ InputInfo.SizesArray.emitRawPointer(CGF),
+ MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.emitRawPointer(CGF)};
// Select the right runtime function call for each standalone
// directive.
@@ -11128,7 +11136,7 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
getThreadID(CGF, D.getBeginLoc()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).getPointer(),
+ CGF.Builder.CreateConstArrayGEP(DimsAddr, 0).emitRawPointer(CGF),
CGM.VoidPtrTy)};
llvm::FunctionCallee RTLFn = OMPBuilder.getOrCreateRuntimeFunction(
@@ -11162,7 +11170,8 @@ static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM,
/*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
- ULoc, ThreadID, CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
+ ULoc, ThreadID,
+ CGF.Builder.CreateConstArrayGEP(CntAddr, 0).emitRawPointer(CGF)};
llvm::FunctionCallee RTLFn;
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
OMPDoacrossKind<T> ODK;
@@ -11332,7 +11341,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
Args[0] = CGF.CGM.getOpenMPRuntime().getThreadID(
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr.getPointer(), CGF.VoidPtrTy);
+ Addr.emitRawPointer(CGF), CGF.VoidPtrTy);
llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
CGF.EmitRuntimeCall(RTLFn, Args);
@@ -11690,15 +11699,17 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
- LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
+ LValue LastIVLVal =
+ CGF.MakeNaturalAlignRawAddrLValue(LastIV, IVLVal.getType());
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
llvm::GlobalVariable *Last = OMPBuilder.getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- Last->setAlignment(LVal.getAlignment().getAsAlign());
- LValue LastLVal = CGF.MakeAddrLValue(
- Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
+ cast<llvm::GlobalVariable>(Last)->setAlignment(
+ LVal.getAlignment().getAsAlign());
+ LValue LastLVal =
+ CGF.MakeRawAddrLValue(Last, LVal.getType(), LVal.getAlignment());
// Global loop counter. Required to handle inner parallel-for regions.
// iv
@@ -11871,9 +11882,8 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
// The variable was not updated in the region - exit.
if (!GV)
return;
- LValue LPLVal = CGF.MakeAddrLValue(
- Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
- PrivLVal.getType().getNonReferenceType());
+ LValue LPLVal = CGF.MakeRawAddrLValue(
+ GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index c3206427b143..522ae3d35d22 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -1068,13 +1068,12 @@ public:
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
- const VarDecl *VD,
- Address VDAddr,
+ const VarDecl *VD, Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
- virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
+ virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 299ee1460b3d..5baac8f0e3e2 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1096,7 +1096,8 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
- LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
+ LValue VarAddr =
+ CGF.MakeNaturalAlignPointeeRawAddrLValue(CastedVoidPtr, VarTy);
Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
Rec.second.GlobalizedVal = VoidPtr;
@@ -1206,8 +1207,8 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
+ RawAddress ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name=*/".zero.addr");
CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
// We don't emit any thread id function call in bare kernel, but because the
@@ -1215,7 +1216,7 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
if (IsBareKernel)
OutlinedFnArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
else
- OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
+ OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).emitRawPointer(CGF));
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
@@ -1289,7 +1290,7 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
- Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
+ Bld.CreateBitOrPointerCast(CapturedVarsAddrs.emitRawPointer(CGF),
CGF.VoidPtrPtrTy),
llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
@@ -1503,17 +1504,18 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF.EmitBlock(PreCondBB);
llvm::PHINode *PhiSrc =
Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
- PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
+ PhiSrc->addIncoming(Ptr.emitRawPointer(CGF), CurrentBB);
llvm::PHINode *PhiDest =
Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
- PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
+ PhiDest->addIncoming(ElemPtr.emitRawPointer(CGF), CurrentBB);
Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
ElemPtr =
Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
+ llvm::Value *PtrEndRaw = PtrEnd.emitRawPointer(CGF);
+ llvm::Value *PtrRaw = Ptr.emitRawPointer(CGF);
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- CGF.Int8Ty, PtrEnd.getPointer(),
- Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
- CGF.VoidPtrTy));
+ CGF.Int8Ty, PtrEndRaw,
+ Bld.CreatePointerBitCastOrAddrSpaceCast(PtrRaw, CGF.VoidPtrTy));
Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
@@ -1528,8 +1530,8 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
TBAAAccessInfo());
Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
- PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
- PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
+ PhiSrc->addIncoming(LocalPtr.emitRawPointer(CGF), ThenBB);
+ PhiDest->addIncoming(LocalElemPtr.emitRawPointer(CGF), ThenBB);
CGF.EmitBranch(PreCondBB);
CGF.EmitBlock(ExitBB);
} else {
@@ -1676,10 +1678,10 @@ static void emitReductionListCopy(
// scope and that of functions it invokes (i.e., reduce_function).
// RemoteReduceData[i] = (void*)&RemoteElem
if (UpdateDestListPtr) {
- CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
- DestElementAddr.getPointer(), CGF.VoidPtrTy),
- DestElementPtrAddr, /*Volatile=*/false,
- C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ DestElementAddr.emitRawPointer(CGF), CGF.VoidPtrTy),
+ DestElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy);
}
++Idx;
@@ -1830,7 +1832,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// elemptr = ((CopyType*)(elemptrptr)) + I
Address ElemPtr(ElemPtrPtr, CopyType, Align);
if (NumIters > 1)
- ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
+ ElemPtr = Bld.CreateGEP(CGF, ElemPtr, Cnt);
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
@@ -1894,7 +1896,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address TargetElemPtr(TargetElemPtrVal, CopyType, Align);
if (NumIters > 1)
- TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
+ TargetElemPtr = Bld.CreateGEP(CGF, TargetElemPtr, Cnt);
// *TargetElemPtr = SrcMediumVal;
llvm::Value *SrcMediumValue =
@@ -2105,9 +2107,9 @@ static llvm::Function *emitShuffleAndReduceFunction(
CGF.EmitBlock(ThenBB);
// reduce_function(LocalReduceList, RemoteReduceList)
llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- LocalReduceList.getPointer(), CGF.VoidPtrTy);
+ LocalReduceList.emitRawPointer(CGF), CGF.VoidPtrTy);
llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- RemoteReduceList.getPointer(), CGF.VoidPtrTy);
+ RemoteReduceList.emitRawPointer(CGF), CGF.VoidPtrTy);
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
Bld.CreateBr(MergeBB);
@@ -2218,9 +2220,9 @@ static llvm::Value *emitListToGlobalCopyFunction(
llvm::Value *BufferPtr =
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- GlobLVal.setAddress(Address(GlobAddr.getPointer(),
+ GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
@@ -2304,7 +2306,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- Address ReductionList =
+ RawAddress ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
auto IPriv = Privates.begin();
llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
@@ -2319,10 +2321,10 @@ static llvm::Value *emitListToGlobalReduceFunction(
llvm::Value *BufferPtr =
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
- C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
+ /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2425,9 +2427,9 @@ static llvm::Value *emitGlobalToListCopyFunction(
llvm::Value *BufferPtr =
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- GlobLVal.setAddress(Address(GlobAddr.getPointer(),
+ GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
@@ -2526,10 +2528,10 @@ static llvm::Value *emitGlobalToListReduceFunction(
llvm::Value *BufferPtr =
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
- C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
+ /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2545,7 +2547,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
}
// Call reduce_function(ReduceList, GlobalReduceList)
- llvm::Value *GlobalReduceList = ReductionList.getPointer();
+ llvm::Value *GlobalReduceList = ReductionList.emitRawPointer(CGF);
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
@@ -2876,7 +2878,7 @@ void CGOpenMPRuntimeGPU::emitReduction(
}
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
+ ReductionList.emitRawPointer(CGF), CGF.VoidPtrTy);
llvm::Function *ReductionFn = emitReductionFunction(
CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
Privates, LHSExprs, RHSExprs, ReductionOps);
@@ -3106,15 +3108,15 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
// Get the array of arguments.
SmallVector<llvm::Value *, 8> Args;
- Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
- Args.emplace_back(ZeroAddr.getPointer());
+ Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).emitRawPointer(CGF));
+ Args.emplace_back(ZeroAddr.emitRawPointer(CGF));
CGBuilderTy &Bld = CGF.Builder;
auto CI = CS.capture_begin();
// Use global memory for data sharing.
// Handle passing of global args to workers.
- Address GlobalArgs =
+ RawAddress GlobalArgs =
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
@@ -3400,7 +3402,7 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
.getAddress(CGF);
- CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
+ CGF.EmitStoreOfScalar(VDAddr.emitRawPointer(CGF), VarLVal);
}
}
}
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 7822903b89ce..e32023aeac1e 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -47,8 +47,10 @@ namespace {
/// [i8 x 3] instead of i24. The function clipTailPadding does this.
/// C++ examples that require clipping:
/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
-/// struct A { int a : 24; }; // a must be clipped because a struct like B
-// could exist: struct B : A { char b; }; // b goes at offset 3
+/// struct A { int a : 24; ~A(); }; // a must be clipped because:
+/// struct B : A { char b; }; // b goes at offset 3
+/// * The allocation of bitfield access units is described in more detail in
+/// CGRecordLowering::accumulateBitFields.
/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
/// fields. The existing asserts suggest that LLVM assumes that *every* field
/// has an underlying storage type. Therefore empty structures containing
@@ -184,8 +186,9 @@ struct CGRecordLowering {
void lower(bool NonVirtualBaseType);
void lowerUnion(bool isNoUniqueAddress);
void accumulateFields();
- void accumulateBitFields(RecordDecl::field_iterator Field,
- RecordDecl::field_iterator FieldEnd);
+ RecordDecl::field_iterator
+ accumulateBitFields(RecordDecl::field_iterator Field,
+ RecordDecl::field_iterator FieldEnd);
void computeVolatileBitfields();
void accumulateBases();
void accumulateVPtrs();
@@ -378,13 +381,15 @@ void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
void CGRecordLowering::accumulateFields() {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end();
- Field != FieldEnd;) {
+ Field != FieldEnd;) {
if (Field->isBitField()) {
- RecordDecl::field_iterator Start = Field;
- // Iterate to gather the list of bitfields.
- for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
- accumulateBitFields(Start, Field);
- } else if (!Field->isZeroSize(Context)) {
+ Field = accumulateBitFields(Field, FieldEnd);
+ assert((Field == FieldEnd || !Field->isBitField()) &&
+ "Failed to accumulate all the bitfields");
+ } else if (Field->isZeroSize(Context)) {
+ // Empty fields have no storage.
+ ++Field;
+ } else {
// Use base subobject layout for the potentially-overlapping field,
// as it is done in RecordLayoutBuilder
Members.push_back(MemberInfo(
@@ -394,33 +399,33 @@ void CGRecordLowering::accumulateFields() {
: getStorageType(*Field),
*Field));
++Field;
- } else {
- ++Field;
}
}
}
-void
+// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
+// iterator of the record. Return the first non-bitfield encountered.
+RecordDecl::field_iterator
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd) {
- // Run stores the first element of the current run of bitfields. FieldEnd is
- // used as a special value to note that we don't have a current run. A
- // bitfield run is a contiguous collection of bitfields that can be stored in
- // the same storage block. Zero-sized bitfields and bitfields that would
- // cross an alignment boundary break a run and start a new one.
- RecordDecl::field_iterator Run = FieldEnd;
- // Tail is the offset of the first bit off the end of the current run. It's
- // used to determine if the ASTRecordLayout is treating these two bitfields as
- // contiguous. StartBitOffset is offset of the beginning of the Run.
- uint64_t StartBitOffset, Tail = 0;
if (isDiscreteBitFieldABI()) {
- for (; Field != FieldEnd; ++Field) {
- uint64_t BitOffset = getFieldBitOffset(*Field);
+ // Run stores the first element of the current run of bitfields. FieldEnd is
+ // used as a special value to note that we don't have a current run. A
+ // bitfield run is a contiguous collection of bitfields that can be stored
+ // in the same storage block. Zero-sized bitfields and bitfields that would
+ // cross an alignment boundary break a run and start a new one.
+ RecordDecl::field_iterator Run = FieldEnd;
+ // Tail is the offset of the first bit off the end of the current run. It's
+ // used to determine if the ASTRecordLayout is treating these two bitfields
+ // as contiguous. StartBitOffset is offset of the beginning of the Run.
+ uint64_t StartBitOffset, Tail = 0;
+ for (; Field != FieldEnd && Field->isBitField(); ++Field) {
// Zero-width bitfields end runs.
if (Field->isZeroLengthBitField(Context)) {
Run = FieldEnd;
continue;
}
+ uint64_t BitOffset = getFieldBitOffset(*Field);
llvm::Type *Type =
Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
// If we don't have a run yet, or don't live within the previous run's
@@ -439,82 +444,248 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Field));
}
- return;
+ return Field;
}
- // Check if OffsetInRecord (the size in bits of the current run) is better
- // as a single field run. When OffsetInRecord has legal integer width, and
- // its bitfield offset is naturally aligned, it is better to make the
- // bitfield a separate storage component so as it can be accessed directly
- // with lower cost.
- auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
- uint64_t StartBitOffset) {
- if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
- return false;
- if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
- !DataLayout.fitsInLegalInteger(OffsetInRecord))
- return false;
- // Make sure StartBitOffset is naturally aligned if it is treated as an
- // IType integer.
- if (StartBitOffset %
- Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
- 0)
- return false;
- return true;
- };
+ // The SysV ABI can overlap bitfield storage units with both other bitfield
+ // storage units /and/ other non-bitfield data members. Accessing a sequence
+ // of bitfields mustn't interfere with adjacent non-bitfields -- they're
+ // permitted to be accessed in separate threads for instance.
+
+ // We split runs of bit-fields into a sequence of "access units". When we emit
+ // a load or store of a bit-field, we'll load/store the entire containing
+ // access unit. As mentioned, the standard requires that these loads and
+ // stores must not interfere with accesses to other memory locations, and it
+ // defines the bit-field's memory location as the current run of
+ // non-zero-width bit-fields. So an access unit must never overlap with
+ // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
+ // free to draw the lines as we see fit.
+
+ // Drawing these lines well can be complicated. LLVM generally can't modify a
+ // program to access memory that it didn't before, so using very narrow access
+ // units can prevent the compiler from using optimal access patterns. For
+ // example, suppose a run of bit-fields occupies four bytes in a struct. If we
+ // split that into four 1-byte access units, then a sequence of assignments
+ // that doesn't touch all four bytes may have to be emitted with multiple
+ // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
+ // very wide access units, we may find ourselves emitting accesses to
+ // bit-fields we didn't really need to touch, just because LLVM was unable to
+ // clean up after us.
+
+ // It is desirable to have access units be aligned powers of 2 no larger than
+ // a register. (On non-strict alignment ISAs, the alignment requirement can be
+ // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
+ // accesses and bit manipulation. If no bitfield straddles across the two
+ // separate accesses, it is better to have separate 2-byte and 1-byte access
+ // units, as then LLVM will not generate unnecessary memory accesses, or bit
+ // manipulation. Similarly, on a strict-alignment architecture, it is better
+ // to keep access-units naturally aligned, to avoid similar bit
+ // manipulation synthesizing larger unaligned accesses.
+
+ // Bitfields that share parts of a single byte are, of necessity, placed in
+ // the same access unit. That unit will encompass a consecutive run where
+ // adjacent bitfields share parts of a byte. (The first bitfield of such an
+ // access unit will start at the beginning of a byte.)
+
+ // We then try and accumulate adjacent access units when the combined unit is
+ // naturally sized, no larger than a register, and (on a strict alignment
+ // ISA), naturally aligned. Note that this requires lookahead to one or more
+ // subsequent access units. For instance, consider a 2-byte access-unit
+ // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
+ // but we would not want to merge a 2-byte followed by a single 1-byte (and no
+ // available tail padding). We keep track of the best access unit seen so far,
+ // and use that when we determine we cannot accumulate any more. Then we start
+ // again at the bitfield following that best one.
+
+ // The accumulation is also prevented when:
+ // *) it would cross a character-aigned zero-width bitfield, or
+ // *) fine-grained bitfield access option is in effect.
+
+ CharUnits RegSize =
+ bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
+ unsigned CharBits = Context.getCharWidth();
+
+ // Data about the start of the span we're accumulating to create an access
+ // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
+ // we've not got a current span. The span starts at the BeginOffset character
+ // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
+ // include padding when we've advanced to a subsequent bitfield run.
+ RecordDecl::field_iterator Begin = FieldEnd;
+ CharUnits BeginOffset;
+ uint64_t BitSizeSinceBegin;
+
+ // The (non-inclusive) end of the largest acceptable access unit we've found
+ // since Begin. If this is Begin, we're gathering the initial set of bitfields
+ // of a new span. BestEndOffset is the end of that acceptable access unit --
+ // it might extend beyond the last character of the bitfield run, using
+ // available padding characters.
+ RecordDecl::field_iterator BestEnd = Begin;
+ CharUnits BestEndOffset;
- // The start field is better as a single field run.
- bool StartFieldAsSingleRun = false;
for (;;) {
- // Check to see if we need to start a new run.
- if (Run == FieldEnd) {
- // If we're out of fields, return.
- if (Field == FieldEnd)
+ // AtAlignedBoundary is true iff Field is the (potential) start of a new
+ // span (or the end of the bitfields). When true, LimitOffset is the
+ // character offset of that span and Barrier indicates whether the new
+ // span cannot be merged into the current one.
+ bool AtAlignedBoundary = false;
+ bool Barrier = false;
+
+ if (Field != FieldEnd && Field->isBitField()) {
+ uint64_t BitOffset = getFieldBitOffset(*Field);
+ if (Begin == FieldEnd) {
+ // Beginning a new span.
+ Begin = Field;
+ BestEnd = Begin;
+
+ assert((BitOffset % CharBits) == 0 && "Not at start of char");
+ BeginOffset = bitsToCharUnits(BitOffset);
+ BitSizeSinceBegin = 0;
+ } else if ((BitOffset % CharBits) != 0) {
+ // Bitfield occupies the same character as previous bitfield, it must be
+ // part of the same span. This can include zero-length bitfields, should
+ // the target not align them to character boundaries. Such non-alignment
+ // is at variance with the standards, which require zero-length
+ // bitfields be a barrier between access units. But of course we can't
+ // achieve that in the middle of a character.
+ assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
+ "Concatenating non-contiguous bitfields");
+ } else {
+ // Bitfield potentially begins a new span. This includes zero-length
+ // bitfields on non-aligning targets that lie at character boundaries
+ // (those are barriers to merging).
+ if (Field->isZeroLengthBitField(Context))
+ Barrier = true;
+ AtAlignedBoundary = true;
+ }
+ } else {
+ // We've reached the end of the bitfield run. Either we're done, or this
+ // is a barrier for the current span.
+ if (Begin == FieldEnd)
break;
- // Any non-zero-length bitfield can start a new run.
- if (!Field->isZeroLengthBitField(Context)) {
- Run = Field;
- StartBitOffset = getFieldBitOffset(*Field);
- Tail = StartBitOffset + Field->getBitWidthValue(Context);
- StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
- StartBitOffset);
+
+ Barrier = true;
+ AtAlignedBoundary = true;
+ }
+
+ // InstallBest indicates whether we should create an access unit for the
+ // current best span: fields [Begin, BestEnd) occupying characters
+ // [BeginOffset, BestEndOffset).
+ bool InstallBest = false;
+ if (AtAlignedBoundary) {
+ // Field is the start of a new span or the end of the bitfields. The
+ // just-seen span now extends to BitSizeSinceBegin.
+
+ // Determine if we can accumulate that just-seen span into the current
+ // accumulation.
+ CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
+ if (BestEnd == Begin) {
+ // This is the initial run at the start of a new span. By definition,
+ // this is the best seen so far.
+ BestEnd = Field;
+ BestEndOffset = BeginOffset + AccessSize;
+ if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
+ // Fine-grained access, so no merging of spans.
+ InstallBest = true;
+ else if (!BitSizeSinceBegin)
+ // A zero-sized initial span -- this will install nothing and reset
+ // for another.
+ InstallBest = true;
+ } else if (AccessSize > RegSize)
+ // Accumulating the just-seen span would create a multi-register access
+ // unit, which would increase register pressure.
+ InstallBest = true;
+
+ if (!InstallBest) {
+ // Determine if accumulating the just-seen span will create an expensive
+ // access unit or not.
+ llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+ if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
+ // Unaligned accesses are expensive. Only accumulate if the new unit
+ // is naturally aligned. Otherwise install the best we have, which is
+ // either the initial access unit (can't do better), or a naturally
+ // aligned accumulation (since we would have already installed it if
+ // it wasn't naturally aligned).
+ CharUnits Align = getAlignment(Type);
+ if (Align > Layout.getAlignment())
+ // The alignment required is greater than the containing structure
+ // itself.
+ InstallBest = true;
+ else if (!BeginOffset.isMultipleOf(Align))
+ // The access unit is not at a naturally aligned offset within the
+ // structure.
+ InstallBest = true;
+ }
+
+ if (!InstallBest) {
+ // Find the next used storage offset to determine what the limit of
+ // the current span is. That's either the offset of the next field
+ // with storage (which might be Field itself) or the end of the
+ // non-reusable tail padding.
+ CharUnits LimitOffset;
+ for (auto Probe = Field; Probe != FieldEnd; ++Probe)
+ if (!Probe->isZeroSize(Context)) {
+ // A member with storage sets the limit.
+ assert((getFieldBitOffset(*Probe) % CharBits) == 0 &&
+ "Next storage is not byte-aligned");
+ LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
+ goto FoundLimit;
+ }
+ // We reached the end of the fields. We can't necessarily use tail
+ // padding in C++ structs, so the NonVirtual size is what we must
+ // use there.
+ LimitOffset = RD ? Layout.getNonVirtualSize() : Layout.getDataSize();
+ FoundLimit:;
+
+ CharUnits TypeSize = getSize(Type);
+ if (BeginOffset + TypeSize <= LimitOffset) {
+ // There is space before LimitOffset to create a naturally-sized
+ // access unit.
+ BestEndOffset = BeginOffset + TypeSize;
+ BestEnd = Field;
+ }
+
+ if (Barrier)
+ // The next field is a barrier that we cannot merge across.
+ InstallBest = true;
+ else
+ // Otherwise, we're not installing. Update the bit size
+ // of the current span to go all the way to LimitOffset, which is
+ // the (aligned) offset of next bitfield to consider.
+ BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
+ }
}
- ++Field;
- continue;
}
- // If the start field of a new run is better as a single run, or
- // if current field (or consecutive fields) is better as a single run, or
- // if current field has zero width bitfield and either
- // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
- // true, or
- // if the offset of current field is inconsistent with the offset of
- // previous field plus its offset,
- // skip the block below and go ahead to emit the storage.
- // Otherwise, try to add bitfields to the run.
- if (!StartFieldAsSingleRun && Field != FieldEnd &&
- !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
- (!Field->isZeroLengthBitField(Context) ||
- (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
- !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
- Tail == getFieldBitOffset(*Field)) {
- Tail += Field->getBitWidthValue(Context);
+ if (InstallBest) {
+ assert((Field == FieldEnd || !Field->isBitField() ||
+ (getFieldBitOffset(*Field) % CharBits) == 0) &&
+ "Installing but not at an aligned bitfield or limit");
+ CharUnits AccessSize = BestEndOffset - BeginOffset;
+ if (!AccessSize.isZero()) {
+ // Add the storage member for the access unit to the record. The
+ // bitfields get the offset of their storage but come afterward and
+ // remain there after a stable sort.
+ llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
+ Members.push_back(StorageInfo(BeginOffset, Type));
+ for (; Begin != BestEnd; ++Begin)
+ if (!Begin->isZeroLengthBitField(Context))
+ Members.push_back(
+ MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
+ }
+ // Reset to start a new span.
+ Field = BestEnd;
+ Begin = FieldEnd;
+ } else {
+ assert(Field != FieldEnd && Field->isBitField() &&
+ "Accumulating past end of bitfields");
+ assert(!Barrier && "Accumulating across barrier");
+ // Accumulate this bitfield into the current (potential) span.
+ BitSizeSinceBegin += Field->getBitWidthValue(Context);
++Field;
- continue;
}
-
- // We've hit a break-point in the run and need to emit a storage field.
- llvm::Type *Type = getIntNType(Tail - StartBitOffset);
- // Add the storage member to the record and set the bitfield info for all of
- // the bitfields in the run. Bitfields get the offset of their storage but
- // come afterward and remain there after a stable sort.
- Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
- for (; Run != Field; ++Run)
- Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
- MemberInfo::Field, nullptr, *Run));
- Run = FieldEnd;
- StartFieldAsSingleRun = false;
}
+
+ return Field;
}
void CGRecordLowering::accumulateBases() {
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index cb5a004e4f4a..576fe2f7a2d4 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2294,7 +2294,7 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
Address Addr = InputValue.getAddress(*this);
ConstraintStr += '*';
- return {Addr.getPointer(), Addr.getElementType()};
+ return {InputValue.getPointer(*this), Addr.getElementType()};
}
std::pair<llvm::Value *, llvm::Type *>
@@ -2701,7 +2701,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ArgTypes.push_back(DestAddr.getType());
ArgElemTypes.push_back(DestAddr.getElementType());
- Args.push_back(DestAddr.getPointer());
+ Args.push_back(DestAddr.emitRawPointer(*this));
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -3076,8 +3076,8 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
// Initialize variable-length arrays.
- LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
- Ctx.getTagDeclType(RD));
+ LValue Base = MakeNaturalAlignRawAddrLValue(
+ CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg =
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index f37ac549d10a..e6d504bcdeca 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -350,7 +350,8 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
llvm::Value *SrcAddrVal = EmitScalarConversion(
- DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
+ DstAddr.emitRawPointer(*this),
+ Ctx.getPointerType(Ctx.getUIntPtrType()),
Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
LValue SrcLV =
MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
@@ -364,7 +365,8 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
CapturedVars.push_back(CV);
} else {
assert(CurCap->capturesVariable() && "Expected capture by reference.");
- CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
+ CapturedVars.push_back(
+ EmitLValue(*I).getAddress(*this).emitRawPointer(*this));
}
}
}
@@ -375,8 +377,9 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &Ctx = CGF.getContext();
llvm::Value *CastedPtr = CGF.EmitScalarConversion(
- AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
+ AddrLV.getAddress(CGF).emitRawPointer(CGF), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
+ // FIXME: should the pointee type (DstType) be passed?
Address TmpAddr =
CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
return TmpAddr;
@@ -702,8 +705,8 @@ void CodeGenFunction::EmitOMPAggregateAssign(
llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
- llvm::Value *SrcBegin = SrcAddr.getPointer();
- llvm::Value *DestBegin = DestAddr.getPointer();
+ llvm::Value *SrcBegin = SrcAddr.emitRawPointer(*this);
+ llvm::Value *DestBegin = DestAddr.emitRawPointer(*this);
// Cast from pointer to array type to pointer to single element.
llvm::Value *DestEnd = Builder.CreateInBoundsGEP(DestAddr.getElementType(),
DestBegin, NumElements);
@@ -1007,10 +1010,10 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
CopyBegin = createBasicBlock("copyin.not.master");
CopyEnd = createBasicBlock("copyin.not.master.end");
// TODO: Avoid ptrtoint conversion.
- auto *MasterAddrInt =
- Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
- auto *PrivateAddrInt =
- Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
+ auto *MasterAddrInt = Builder.CreatePtrToInt(
+ MasterAddr.emitRawPointer(*this), CGM.IntPtrTy);
+ auto *PrivateAddrInt = Builder.CreatePtrToInt(
+ PrivateAddr.emitRawPointer(*this), CGM.IntPtrTy);
Builder.CreateCondBr(
Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
CopyEnd);
@@ -1666,7 +1669,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Data =
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
+ CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy);
llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
std::string Suffix = getNameWithSeparators({"cache", ""});
llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
@@ -2045,7 +2048,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
->getParam(0)
->getType()
.getNonReferenceType();
- Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
+ RawAddress CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
@@ -2061,7 +2064,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
LValue LCVal = EmitLValue(LoopVarRef);
Address LoopVarAddress = LCVal.getAddress(*this);
emitCapturedStmtCall(*this, LoopVarClosure,
- {LoopVarAddress.getPointer(), IndVar});
+ {LoopVarAddress.emitRawPointer(*this), IndVar});
RunCleanupsScope BodyScope(*this);
EmitStmt(BodyStmt);
@@ -4795,7 +4798,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr = CGF.CreateMemTemp(
+ RawAddress PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
@@ -4803,7 +4806,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -4813,7 +4816,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
for (const Expr *E : Data.LastprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".lastpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -4826,7 +4829,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Ty = CGF.getContext().getPointerType(Ty);
if (isAllocatableDecl(VD))
Ty = CGF.getContext().getPointerType(Ty);
- Address PrivatePtr = CGF.CreateMemTemp(
+ RawAddress PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
auto Result = UntiedLocalVars.insert(
std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
@@ -4859,7 +4862,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (auto *DI = CGF.getDebugInfo())
if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
(void)DI->EmitDeclareOfAutoVariable(
- Pair.first, Pair.second.getPointer(), CGF.Builder,
+ Pair.first, Pair.second.getBasePointer(), CGF.Builder,
/*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
@@ -4912,14 +4915,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
- Replacement =
- Address(CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
- CGF.getContext().getPointerType(
- Data.ReductionCopies[Cnt]->getType()),
- Data.ReductionCopies[Cnt]->getExprLoc()),
- CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
- Replacement.getAlignment());
+ Replacement = Address(
+ CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
+ CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
@@ -4970,7 +4973,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
@@ -5089,7 +5092,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// If there is no user-defined mapper, the mapper array will be nullptr. In
// this case, we don't need to privatize it.
if (!isa_and_nonnull<llvm::ConstantPointerNull>(
- InputInfo.MappersArray.getPointer())) {
+ InputInfo.MappersArray.emitRawPointer(*this))) {
MVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
TargetScope.addPrivate(MVD, InputInfo.MappersArray);
@@ -5115,7 +5118,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Address PrivatePtr =
+ RawAddress PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
@@ -5194,14 +5197,14 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
- Replacement =
- Address(CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
- CGF.getContext().getPointerType(
- Data.ReductionCopies[Cnt]->getType()),
- Data.ReductionCopies[Cnt]->getExprLoc()),
- CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
- Replacement.getAlignment());
+ Replacement = Address(
+ CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
+ CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
@@ -5247,7 +5250,7 @@ void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
- Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
@@ -5394,7 +5397,7 @@ void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
*this, Dependencies, DC->getBeginLoc());
- EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
+ EmitStoreOfScalar(DepAddr.emitRawPointer(*this), DOLVal);
return;
}
if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
@@ -6471,21 +6474,21 @@ static void emitOMPAtomicCompareExpr(
D->getType()->hasSignedIntegerRepresentation());
llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
- XAddr.getPointer(), XAddr.getElementType(),
+ XAddr.emitRawPointer(CGF), XAddr.getElementType(),
X->getType()->hasSignedIntegerRepresentation(),
X->getType().isVolatileQualified()};
llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
if (V) {
LValue LV = CGF.EmitLValue(V);
Address Addr = LV.getAddress(CGF);
- VOpVal = {Addr.getPointer(), Addr.getElementType(),
+ VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
V->getType()->hasSignedIntegerRepresentation(),
V->getType().isVolatileQualified()};
}
if (R) {
LValue LV = CGF.EmitLValue(R);
Address Addr = LV.getAddress(CGF);
- ROpVal = {Addr.getPointer(), Addr.getElementType(),
+ ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
R->getType()->hasSignedIntegerRepresentation(),
R->getType().isVolatileQualified()};
}
@@ -7029,7 +7032,7 @@ void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
std::tie(NumDependences, DependenciesArray) =
CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences,
S.getBeginLoc());
- DependenceList = DependenciesArray.getPointer();
+ DependenceList = DependenciesArray.emitRawPointer(*this);
}
Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 8dee3f74b44b..862369ae009f 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -201,14 +201,13 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- Address ThisPtr =
- Address(&*AI, ConvertTypeForMem(MD->getFunctionObjectParameterType()),
- CGM.getClassPointerAlignment(MD->getParent()));
+ Address ThisPtr = makeNaturalAddressForPointer(
+ &*AI, MD->getFunctionObjectParameterType(),
+ CGM.getClassPointerAlignment(MD->getParent()));
llvm::BasicBlock *EntryBB = &Fn->front();
llvm::BasicBlock::iterator ThisStore =
llvm::find_if(*EntryBB, [&](llvm::Instruction &I) {
- return isa<llvm::StoreInst>(I) &&
- I.getOperand(0) == ThisPtr.getPointer();
+ return isa<llvm::StoreInst>(I) && I.getOperand(0) == &*AI;
});
assert(ThisStore != EntryBB->end() &&
"Store of this should be in entry block?");
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 1e6f67250583..cc9ad10ae596 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -14,12 +14,13 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
+#include "Address.h"
+#include "CodeGenTBAA.h"
+#include "EHScopeStack.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
-#include "llvm/IR/Value.h"
#include "llvm/IR/Type.h"
-#include "Address.h"
-#include "CodeGenTBAA.h"
+#include "llvm/IR/Value.h"
namespace llvm {
class Constant;
@@ -28,57 +29,64 @@ namespace llvm {
namespace clang {
namespace CodeGen {
- class AggValueSlot;
- class CodeGenFunction;
- struct CGBitFieldInfo;
+class AggValueSlot;
+class CGBuilderTy;
+class CodeGenFunction;
+struct CGBitFieldInfo;
/// RValue - This trivial value class is used to represent the result of an
/// expression that is evaluated. It can be one of three things: either a
/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
/// address of an aggregate value in memory.
class RValue {
- enum Flavor { Scalar, Complex, Aggregate };
+ friend struct DominatingValue<RValue>;
- // The shift to make to an aggregate's alignment to make it look
- // like a pointer.
- enum { AggAlignShift = 4 };
+ enum FlavorEnum { Scalar, Complex, Aggregate };
- // Stores first value and flavor.
- llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
- // Stores second value and volatility.
- llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
- // Stores element type for aggregate values.
- llvm::Type *ElementType;
+ union {
+ // Stores first and second value.
+ struct {
+ llvm::Value *first;
+ llvm::Value *second;
+ } Vals;
+
+ // Stores aggregate address.
+ Address AggregateAddr;
+ };
+
+ unsigned IsVolatile : 1;
+ unsigned Flavor : 2;
public:
- bool isScalar() const { return V1.getInt() == Scalar; }
- bool isComplex() const { return V1.getInt() == Complex; }
- bool isAggregate() const { return V1.getInt() == Aggregate; }
+ RValue() : Vals{nullptr, nullptr}, Flavor(Scalar) {}
+
+ bool isScalar() const { return Flavor == Scalar; }
+ bool isComplex() const { return Flavor == Complex; }
+ bool isAggregate() const { return Flavor == Aggregate; }
- bool isVolatileQualified() const { return V2.getInt(); }
+ bool isVolatileQualified() const { return IsVolatile; }
/// getScalarVal() - Return the Value* of this scalar value.
llvm::Value *getScalarVal() const {
assert(isScalar() && "Not a scalar!");
- return V1.getPointer();
+ return Vals.first;
}
/// getComplexVal - Return the real/imag components of this complex value.
///
std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
- return std::make_pair(V1.getPointer(), V2.getPointer());
+ return std::make_pair(Vals.first, Vals.second);
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
Address getAggregateAddress() const {
assert(isAggregate() && "Not an aggregate!");
- auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
- return Address(
- V1.getPointer(), ElementType, CharUnits::fromQuantity(align));
+ return AggregateAddr;
}
- llvm::Value *getAggregatePointer() const {
- assert(isAggregate() && "Not an aggregate!");
- return V1.getPointer();
+
+ llvm::Value *getAggregatePointer(QualType PointeeType,
+ CodeGenFunction &CGF) const {
+ return getAggregateAddress().getBasePointer();
}
static RValue getIgnored() {
@@ -88,17 +96,19 @@ public:
static RValue get(llvm::Value *V) {
RValue ER;
- ER.V1.setPointer(V);
- ER.V1.setInt(Scalar);
- ER.V2.setInt(false);
+ ER.Vals.first = V;
+ ER.Flavor = Scalar;
+ ER.IsVolatile = false;
return ER;
}
+ static RValue get(Address Addr, CodeGenFunction &CGF) {
+ return RValue::get(Addr.emitRawPointer(CGF));
+ }
static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
RValue ER;
- ER.V1.setPointer(V1);
- ER.V2.setPointer(V2);
- ER.V1.setInt(Complex);
- ER.V2.setInt(false);
+ ER.Vals = {V1, V2};
+ ER.Flavor = Complex;
+ ER.IsVolatile = false;
return ER;
}
static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
@@ -107,15 +117,15 @@ public:
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
+
+ /// Convert an Address to an RValue. If the Address is not
+ /// signed, create an RValue using the unsigned address. Otherwise, resign the
+ /// address using the provided type.
static RValue getAggregate(Address addr, bool isVolatile = false) {
RValue ER;
- ER.V1.setPointer(addr.getPointer());
- ER.V1.setInt(Aggregate);
- ER.ElementType = addr.getElementType();
-
- auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
- ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
- ER.V2.setInt(isVolatile);
+ ER.AggregateAddr = addr;
+ ER.Flavor = Aggregate;
+ ER.IsVolatile = isVolatile;
return ER;
}
};
@@ -178,8 +188,10 @@ class LValue {
MatrixElt // This is a matrix element, use getVector*
} LVType;
- llvm::Value *V;
- llvm::Type *ElementType;
+ union {
+ Address Addr = Address::invalid();
+ llvm::Value *V;
+ };
union {
// Index into a vector subscript: V[i]
@@ -197,10 +209,6 @@ class LValue {
// 'const' is unused here
Qualifiers Quals;
- // The alignment to use when accessing this lvalue. (For vector elements,
- // this is the alignment of the whole vector.)
- unsigned Alignment;
-
// objective-c's ivar
bool Ivar:1;
@@ -234,23 +242,19 @@ class LValue {
Expr *BaseIvarExp;
private:
- void Initialize(QualType Type, Qualifiers Quals, CharUnits Alignment,
+ void Initialize(QualType Type, Qualifiers Quals, Address Addr,
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- assert((!Alignment.isZero() || Type->isIncompleteType()) &&
- "initializing l-value with zero alignment!");
- if (isGlobalReg())
- assert(ElementType == nullptr && "Global reg does not store elem type");
- else
- assert(ElementType != nullptr && "Must have elem type");
-
this->Type = Type;
this->Quals = Quals;
const unsigned MaxAlign = 1U << 31;
- this->Alignment = Alignment.getQuantity() <= MaxAlign
- ? Alignment.getQuantity()
- : MaxAlign;
- assert(this->Alignment == Alignment.getQuantity() &&
- "Alignment exceeds allowed max!");
+ CharUnits Alignment = Addr.getAlignment();
+ assert((isGlobalReg() || !Alignment.isZero() || Type->isIncompleteType()) &&
+ "initializing l-value with zero alignment!");
+ if (Alignment.getQuantity() > MaxAlign) {
+ assert(false && "Alignment exceeds allowed max!");
+ Alignment = CharUnits::fromQuantity(MaxAlign);
+ }
+ this->Addr = Addr;
this->BaseInfo = BaseInfo;
this->TBAAInfo = TBAAInfo;
@@ -259,9 +263,20 @@ private:
this->ImpreciseLifetime = false;
this->Nontemporal = false;
this->ThreadLocalRef = false;
+ this->IsKnownNonNull = false;
this->BaseIvarExp = nullptr;
}
+ void initializeSimpleLValue(Address Addr, QualType Type,
+ LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo,
+ ASTContext &Context) {
+ Qualifiers QS = Type.getQualifiers();
+ QS.setObjCGCAttr(Context.getObjCGCAttrKind(Type));
+ LVType = Simple;
+ Initialize(Type, QS, Addr, BaseInfo, TBAAInfo);
+ assert(Addr.getBasePointer()->getType()->isPointerTy());
+ }
+
public:
bool isSimple() const { return LVType == Simple; }
bool isVectorElt() const { return LVType == VectorElt; }
@@ -328,8 +343,8 @@ public:
LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
- CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
- void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+ CharUnits getAlignment() const { return Addr.getAlignment(); }
+ void setAlignment(CharUnits A) { Addr.setAlignment(A); }
LValueBaseInfo getBaseInfo() const { return BaseInfo; }
void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; }
@@ -345,28 +360,32 @@ public:
// simple lvalue
llvm::Value *getPointer(CodeGenFunction &CGF) const {
assert(isSimple());
- return V;
+ return Addr.getBasePointer();
}
- Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), ElementType, getAlignment(),
- isKnownNonNull());
- }
- void setAddress(Address address) {
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const {
assert(isSimple());
- V = address.getPointer();
- ElementType = address.getElementType();
- Alignment = address.getAlignment().getQuantity();
- IsKnownNonNull = address.isKnownNonNull();
+ return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
}
+ Address getAddress(CodeGenFunction &CGF) const {
+ // FIXME: remove parameter.
+ return Addr;
+ }
+
+ void setAddress(Address address) { Addr = address; }
+
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isVectorElt());
+ return Addr;
+ }
+ llvm::Value *getRawVectorPointer(CodeGenFunction &CGF) const {
+ assert(isVectorElt());
+ return Addr.emitRawPointer(CGF);
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
- return V;
+ return Addr.getBasePointer();
}
llvm::Value *getVectorIdx() const {
assert(isVectorElt());
@@ -374,12 +393,12 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isMatrixElt());
+ return Addr;
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
- return V;
+ return Addr.getBasePointer();
}
llvm::Value *getMatrixIdx() const {
assert(isMatrixElt());
@@ -388,12 +407,12 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isExtVectorElt());
+ return Addr;
}
- llvm::Value *getExtVectorPointer() const {
+ llvm::Value *getRawExtVectorPointer(CodeGenFunction &CGF) const {
assert(isExtVectorElt());
- return V;
+ return Addr.emitRawPointer(CGF);
}
llvm::Constant *getExtVectorElts() const {
assert(isExtVectorElt());
@@ -402,10 +421,14 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), ElementType, getAlignment(),
- (KnownNonNull_t)isKnownNonNull());
+ assert(isBitField());
+ return Addr;
+ }
+ llvm::Value *getRawBitFieldPointer(CodeGenFunction &CGF) const {
+ assert(isBitField());
+ return Addr.emitRawPointer(CGF);
}
- llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
+
const CGBitFieldInfo &getBitFieldInfo() const {
assert(isBitField());
return *BitFieldInfo;
@@ -414,18 +437,13 @@ public:
// global register lvalue
llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
- static LValue MakeAddr(Address address, QualType type, ASTContext &Context,
+ static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context,
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- Qualifiers qs = type.getQualifiers();
- qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
-
LValue R;
R.LVType = Simple;
- assert(address.getPointer()->getType()->isPointerTy());
- R.V = address.getPointer();
- R.ElementType = address.getElementType();
- R.IsKnownNonNull = address.isKnownNonNull();
- R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
+ R.initializeSimpleLValue(Addr, type, BaseInfo, TBAAInfo, Context);
+ R.Addr = Addr;
+ assert(Addr.getType()->isPointerTy());
return R;
}
@@ -434,26 +452,18 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = VectorElt;
- R.V = vecAddress.getPointer();
- R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
- R.IsKnownNonNull = vecAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), vecAddress, BaseInfo, TBAAInfo);
return R;
}
- static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts,
+ static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts,
QualType type, LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = ExtVectorElt;
- R.V = vecAddress.getPointer();
- R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
- R.IsKnownNonNull = vecAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo);
return R;
}
@@ -468,12 +478,8 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = BitField;
- R.V = Addr.getPointer();
- R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
- R.IsKnownNonNull = Addr.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
- TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), Addr, BaseInfo, TBAAInfo);
return R;
}
@@ -481,11 +487,9 @@ public:
QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = V;
- R.ElementType = nullptr;
- R.IsKnownNonNull = true;
- R.Initialize(type, type.getQualifiers(), alignment,
+ R.Initialize(type, type.getQualifiers(), Address::invalid(),
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
+ R.V = V;
return R;
}
@@ -494,12 +498,8 @@ public:
TBAAAccessInfo TBAAInfo) {
LValue R;
R.LVType = MatrixElt;
- R.V = matAddress.getPointer();
- R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
- R.IsKnownNonNull = matAddress.isKnownNonNull();
- R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
- BaseInfo, TBAAInfo);
+ R.Initialize(type, type.getQualifiers(), matAddress, BaseInfo, TBAAInfo);
return R;
}
@@ -643,17 +643,17 @@ public:
return NeedsGCBarriers_t(ObjCGCFlag);
}
- llvm::Value *getPointer() const {
- return Addr.getPointer();
+ llvm::Value *getPointer(QualType PointeeTy, CodeGenFunction &CGF) const;
+
+ llvm::Value *emitRawPointer(CodeGenFunction &CGF) const {
+ return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
}
Address getAddress() const {
return Addr;
}
- bool isIgnored() const {
- return !Addr.isValid();
- }
+ bool isIgnored() const { return !Addr.isValid(); }
CharUnits getAlignment() const {
return Addr.getAlignment();
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index fad26c43da3d..44103884940f 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -193,26 +193,35 @@ CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
CGF.Builder.setDefaultConstrainedRounding(OldRounding);
}
-LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+static LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
+ bool ForPointeeType,
+ CodeGenFunction &CGF) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
- CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
- Address Addr(V, ConvertTypeForMem(T), Alignment);
- return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
+ CharUnits Alignment =
+ CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
+ Address Addr = Address(V, CGF.ConvertTypeForMem(T), Alignment);
+ return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
+}
+
+LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this);
}
-/// Given a value of type T* that may not be to a complete object,
-/// construct an l-value with the natural pointee alignment of T.
LValue
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
- LValueBaseInfo BaseInfo;
- TBAAAccessInfo TBAAInfo;
- CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
- /* forPointeeType= */ true);
- Address Addr(V, ConvertTypeForMem(T), Align);
- return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
+ return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this);
+}
+
+LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V,
+ QualType T) {
+ return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false, *this);
}
+LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V,
+ QualType T) {
+ return ::MakeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true, *this);
+}
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -525,7 +534,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
ReturnBlock.getBlock()->eraseFromParent();
}
if (ReturnValue.isValid()) {
- auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
+ auto *RetAlloca =
+ dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
if (RetAlloca && RetAlloca->use_empty()) {
RetAlloca->eraseFromParent();
ReturnValue = Address::invalid();
@@ -1122,13 +1132,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue =
- Address(&*AI, ConvertType(RetTy),
- CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
+ ReturnValue = makeNaturalAddressForPointer(
+ &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
+ nullptr, nullptr, KnownNonNull);
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
- ReturnValuePointer = CreateDefaultAlignTempAlloca(
- ReturnValue.getPointer()->getType(), "result.ptr");
- Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer);
+ ReturnValuePointer =
+ CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
+ Builder.CreateStore(ReturnValue.emitRawPointer(*this),
+ ReturnValuePointer);
}
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
@@ -1189,8 +1200,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// or contains the address of the enclosing object).
LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- // If the enclosing object was captured by value, just use its address.
- CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
+ // If the enclosing object was captured by value, just use its
+ // address. Sign this pointer.
+ CXXThisValue = ThisFieldLValue.getPointer(*this);
} else {
// Load the lvalue pointed to by the field, since '*this' was captured
// by reference.
@@ -2012,8 +2024,9 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
Address begin = dest.withElementType(CGF.Int8Ty);
- llvm::Value *end = Builder.CreateInBoundsGEP(
- begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
+ begin.emitRawPointer(CGF),
+ sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
@@ -2024,7 +2037,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
CGF.EmitBlock(loopBB);
llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
- cur->addIncoming(begin.getPointer(), originBB);
+ cur->addIncoming(begin.emitRawPointer(CGF), originBB);
CharUnits curAlign =
dest.getAlignment().alignmentOfArrayElement(baseSize);
@@ -2189,8 +2202,8 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
dyn_cast<llvm::ArrayType>(addr.getElementType());
while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
- assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
- == llvmArrayType->getNumElements());
+ assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
+ llvmArrayType->getNumElements());
gepIndices.push_back(zero);
countFromCLAs *= llvmArrayType->getNumElements();
@@ -2208,8 +2221,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// as some other type (probably a packed struct). Compute the array
// size, and just emit the 'begin' expression as a bitcast.
while (arrayType) {
- countFromCLAs *=
- cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
+ countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
eltType = arrayType->getElementType();
arrayType = getContext().getAsArrayType(eltType);
}
@@ -2218,10 +2230,10 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
addr = addr.withElementType(baseType);
} else {
// Create the actual GEP.
- addr = Address(Builder.CreateInBoundsGEP(
- addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
- ConvertTypeForMem(eltType),
- addr.getAlignment());
+ addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
+ addr.emitRawPointer(*this),
+ gepIndices, "array.begin"),
+ ConvertTypeForMem(eltType), addr.getAlignment());
}
baseType = eltType;
@@ -2562,7 +2574,7 @@ void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
Address Addr) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
- llvm::Value *V = Addr.getPointer();
+ llvm::Value *V = Addr.emitRawPointer(*this);
llvm::Type *VTy = V->getType();
auto *PTy = dyn_cast<llvm::PointerType>(VTy);
unsigned AS = PTy ? PTy->getAddressSpace() : 0;
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index e8f8aa601ed0..e2a7e28c8211 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -151,6 +151,9 @@ struct DominatingLLVMValue {
/// Answer whether the given value needs extra work to be saved.
static bool needsSaving(llvm::Value *value) {
+ if (!value)
+ return false;
+
// If it's not an instruction, we don't need to save.
if (!isa<llvm::Instruction>(value)) return false;
@@ -177,21 +180,28 @@ template <> struct DominatingValue<Address> {
typedef Address type;
struct saved_type {
- DominatingLLVMValue::saved_type SavedValue;
+ DominatingLLVMValue::saved_type BasePtr;
llvm::Type *ElementType;
CharUnits Alignment;
+ DominatingLLVMValue::saved_type Offset;
+ llvm::PointerType *EffectiveType;
};
static bool needsSaving(type value) {
- return DominatingLLVMValue::needsSaving(value.getPointer());
+ if (DominatingLLVMValue::needsSaving(value.getBasePointer()) ||
+ DominatingLLVMValue::needsSaving(value.getOffset()))
+ return true;
+ return false;
}
static saved_type save(CodeGenFunction &CGF, type value) {
- return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getElementType(), value.getAlignment() };
+ return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
+ value.getElementType(), value.getAlignment(),
+ DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
}
static type restore(CodeGenFunction &CGF, saved_type value) {
- return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.ElementType, value.Alignment);
+ return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
+ value.ElementType, value.Alignment,
+ DominatingLLVMValue::restore(CGF, value.Offset));
}
};
@@ -201,14 +211,26 @@ template <> struct DominatingValue<RValue> {
class saved_type {
enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
AggregateAddress, ComplexAddress };
-
- llvm::Value *Value;
- llvm::Type *ElementType;
+ union {
+ struct {
+ DominatingLLVMValue::saved_type first, second;
+ } Vals;
+ DominatingValue<Address>::saved_type AggregateAddr;
+ };
LLVM_PREFERRED_TYPE(Kind)
unsigned K : 3;
- unsigned Align : 29;
- saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0)
- : Value(v), ElementType(e), K(k), Align(a) {}
+ unsigned IsVolatile : 1;
+
+ saved_type(DominatingLLVMValue::saved_type Val1, unsigned K)
+ : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
+
+ saved_type(DominatingLLVMValue::saved_type Val1,
+ DominatingLLVMValue::saved_type Val2)
+ : Vals{Val1, Val2}, K(ComplexAddress) {}
+
+ saved_type(DominatingValue<Address>::saved_type AggregateAddr,
+ bool IsVolatile, unsigned K)
+ : AggregateAddr(AggregateAddr), K(K) {}
public:
static bool needsSaving(RValue value);
@@ -659,7 +681,7 @@ public:
llvm::Value *Size;
public:
- CallLifetimeEnd(Address addr, llvm::Value *size)
+ CallLifetimeEnd(RawAddress addr, llvm::Value *size)
: Addr(addr.getPointer()), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -684,7 +706,7 @@ public:
};
/// i32s containing the indexes of the cleanup destinations.
- Address NormalCleanupDest = Address::invalid();
+ RawAddress NormalCleanupDest = RawAddress::invalid();
unsigned NextCleanupDestIndex = 1;
@@ -819,10 +841,10 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
if (!isInConditionalBranch())
- return pushCleanupAfterFullExprWithActiveFlag<T>(Kind, Address::invalid(),
- A...);
+ return pushCleanupAfterFullExprWithActiveFlag<T>(
+ Kind, RawAddress::invalid(), A...);
- Address ActiveFlag = createCleanupActiveFlag();
+ RawAddress ActiveFlag = createCleanupActiveFlag();
assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
"cleanup active flag should never need saving");
@@ -835,7 +857,7 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
- Address ActiveFlag, As... A) {
+ RawAddress ActiveFlag, As... A) {
LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
ActiveFlag.isValid()};
@@ -850,7 +872,7 @@ public:
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
if (Header.IsConditional)
- new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
+ new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
}
/// Set up the last cleanup that was pushed as a conditional
@@ -859,8 +881,8 @@ public:
initFullExprCleanupWithFlag(createCleanupActiveFlag());
}
- void initFullExprCleanupWithFlag(Address ActiveFlag);
- Address createCleanupActiveFlag();
+ void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
+ RawAddress createCleanupActiveFlag();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
@@ -1048,7 +1070,7 @@ public:
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
Address Temp = CGF.CreateMemTemp(VarTy);
- CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
+ CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
TempAddr = Temp;
}
SavedTempAddresses.try_emplace(LocalVD, TempAddr);
@@ -1243,10 +1265,12 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
- void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr,
+ CodeGenFunction &CGF) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ auto store =
+ new llvm::StoreInst(value, addr.emitRawPointer(CGF), &block->back());
store->setAlignment(addr.getAlignment().getAsAlign());
}
@@ -1601,7 +1625,7 @@ public:
/// If \p StepV is null, the default increment is 1.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E) {
if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
- PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr);
+ PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
PGO.setCurrentStmt(E);
}
}
@@ -1609,7 +1633,7 @@ public:
/// Update the MCDC temp value with the condition's evaluated result.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
if (isMCDCCoverageEnabled()) {
- PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val);
+ PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
PGO.setCurrentStmt(E);
}
}
@@ -1704,7 +1728,7 @@ public:
: CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
OldCXXThisAlignment(CGF.CXXThisAlignment),
SourceLocScope(E, CGF.CurSourceLocExprScope) {
- CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
@@ -2090,7 +2114,7 @@ public:
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
- Address getNormalCleanupDestSlot();
+ RawAddress getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -2579,10 +2603,40 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
+ Address mergeAddressesInConditionalExpr(Address LHS, Address RHS,
+ llvm::BasicBlock *LHSBlock,
+ llvm::BasicBlock *RHSBlock,
+ llvm::BasicBlock *MergeBlock,
+ QualType MergedType) {
+ Builder.SetInsertPoint(MergeBlock);
+ llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
+ PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
+ PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
+ LHS.replaceBasePointer(PtrPhi);
+ LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
+ return LHS;
+ }
+
+ /// Construct an address with the natural alignment of T. If a pointer to T
+ /// is expected to be signed, the pointer passed to this function must have
+ /// been signed, and the returned Address will have the pointer authentication
+ /// information needed to authenticate the signed pointer.
+ Address makeNaturalAddressForPointer(
+ llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
+ bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
+ if (Alignment.isZero())
+ Alignment =
+ CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
+ return Address(Ptr, ConvertTypeForMem(T), Alignment, nullptr,
+ IsKnownNonNull);
+ }
+
LValue MakeAddrLValue(Address Addr, QualType T,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
- CGM.getTBAAAccessInfo(T));
+ return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo,
@@ -2592,6 +2646,14 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
+ return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
+ LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ }
+
+ /// Same as MakeAddrLValue above except that the pointer is known to be
+ /// unsigned.
+ LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource Source = AlignmentSource::Type) {
Address Addr(V, ConvertTypeForMem(T), Alignment);
return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
CGM.getTBAAAccessInfo(T));
@@ -2604,9 +2666,18 @@ public:
TBAAAccessInfo());
}
+ /// Given a value of type T* that may not be to a complete object, construct
+ /// an l-value with the natural pointee alignment of T.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
+
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+ /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
+ /// to be unsigned.
+ LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
+
+ LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
+
Address EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo = nullptr,
TBAAAccessInfo *PointeeTBAAInfo = nullptr);
@@ -2655,13 +2726,13 @@ public:
/// more efficient if the caller knows that the address will not be exposed.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr);
- Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr,
- Address *Alloca = nullptr);
- Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr);
+ RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr,
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -2673,8 +2744,8 @@ public:
/// not hand this address off to arbitrary IRGen routines, and especially
/// do not pass it as an argument to a function that might expect a
/// properly ABI-aligned value.
- Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
- const Twine &Name = "tmp");
+ RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
@@ -2684,32 +2755,31 @@ public:
///
/// That is, this is exactly equivalent to CreateMemTemp, but calling
/// ConvertType instead of ConvertTypeForMem.
- Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen and cast it to the default address space. Returns
/// the original alloca instruction by \p Alloca if it is not nullptr.
- Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
- Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, CharUnits Align,
+ const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen without casting it to the default address space.
- Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
- Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
- const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
+ const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr) {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),
- T.getQualifiers(),
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased,
- AggValueSlot::DoesNotOverlap);
+ RawAddress *Alloca = nullptr) {
+ return AggValueSlot::forAddr(
+ CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
+ AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
@@ -3083,6 +3153,25 @@ public:
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV,
+ QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
+ SkippedChecks, ArraySize);
+ }
+
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr,
+ QualType Type, CharUnits Alignment = CharUnits::Zero(),
+ SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
+ SkippedChecks, ArraySize);
+ }
+
/// Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type
/// (or if ArraySize is provided, for an array of that bound).
@@ -3183,17 +3272,17 @@ public:
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
- Address AllocaAddr;
+ RawAddress AllocaAddr;
struct Invalid {};
AutoVarEmission(Invalid)
: Variable(nullptr), Addr(Address::invalid()),
- AllocaAddr(Address::invalid()) {}
+ AllocaAddr(RawAddress::invalid()) {}
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
+ SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -3216,7 +3305,7 @@ public:
}
/// Returns the address for the original alloca instruction.
- Address getOriginalAllocatedAddress() const { return AllocaAddr; }
+ RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
@@ -3246,23 +3335,32 @@ public:
llvm::GlobalValue::LinkageTypes Linkage);
class ParamValue {
- llvm::Value *Value;
- llvm::Type *ElementType;
- unsigned Alignment;
- ParamValue(llvm::Value *V, llvm::Type *T, unsigned A)
- : Value(V), ElementType(T), Alignment(A) {}
+ union {
+ Address Addr;
+ llvm::Value *Value;
+ };
+
+ bool IsIndirect;
+
+ ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
+ ParamValue(Address A) : Addr(A), IsIndirect(true) {}
+
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, nullptr, 0);
+ return ParamValue(value);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getElementType(),
- addr.getAlignment().getQuantity());
+ return ParamValue(addr);
}
- bool isIndirect() const { return Alignment != 0; }
- llvm::Value *getAnyValue() const { return Value; }
+ bool isIndirect() const { return IsIndirect; }
+ llvm::Value *getAnyValue() const {
+ if (!isIndirect())
+ return Value;
+ assert(!Addr.hasOffset() && "unexpected offset");
+ return Addr.getBasePointer();
+ }
llvm::Value *getDirectValue() const {
assert(!isIndirect());
@@ -3271,8 +3369,7 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, ElementType, CharUnits::fromQuantity(Alignment),
- KnownNonNull);
+ return Addr;
}
};
@@ -4183,6 +4280,9 @@ public:
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const Twine &name = "");
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<Address> args,
+ const Twine &name = "");
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
ArrayRef<llvm::Value *> args,
const Twine &name = "");
@@ -4208,6 +4308,12 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
+ return Addr.getBasePointer();
+ }
+
+ bool isPointerKnownNonNull(const Expr *E);
+
// Return the copy constructor name with the prefix "__copy_constructor_"
// removed.
static std::string getNonTrivialCopyConstructorStr(QualType QT,
@@ -4780,6 +4886,11 @@ public:
SourceLocation Loc,
const Twine &Name = "");
+ Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *elementType, bool SignedIndices,
+ bool IsSubtraction, SourceLocation Loc,
+ CharUnits Align, const Twine &Name = "");
+
/// Specifies which type of sanitizer check to apply when handling a
/// particular builtin.
enum BuiltinCheckKind {
@@ -4842,6 +4953,10 @@ public:
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
AbstractCallee AC, unsigned ParmNum);
+ void EmitNonNullArgCheck(Address Addr, QualType ArgType,
+ SourceLocation ArgLoc, AbstractCallee AC,
+ unsigned ParmNum);
+
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -4870,6 +4985,25 @@ public:
llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
unsigned NumElementsDst,
const llvm::Twine &Name = "");
+ // Adds a convergence_ctrl token to |Input| and emits the required parent
+ // convergence instructions.
+ llvm::CallBase *addControlledConvergenceToken(llvm::CallBase *Input);
+
+private:
+ // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
+ // as it's parent convergence instr.
+ llvm::IntrinsicInst *emitConvergenceLoopToken(llvm::BasicBlock *BB,
+ llvm::Value *ParentToken);
+ // Adds a convergence_ctrl token with |ParentToken| as parent convergence
+ // instr to the call |Input|.
+ llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input,
+ llvm::Value *ParentToken);
+ // Find the convergence_entry instruction |F|, or emits ones if none exists.
+ // Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceEntryToken(llvm::Function *F);
+ // Find the convergence_loop instruction for the loop defined by |LI|, or
+ // emits one if none exists. Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceLoopToken(const LoopInfo *LI);
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
@@ -5050,7 +5184,7 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
- return saved_type(alloca.getPointer(), true);
+ return saved_type(alloca.emitRawPointer(CGF), true);
}
inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index cb153066b28d..00b3bfcaa0bc 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -3711,7 +3711,9 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Forward declarations are emitted lazily on first use.
if (!FD->doesThisDeclarationHaveABody()) {
- if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition() &&
+ (!FD->isMultiVersion() ||
+ !FD->getASTContext().getTargetInfo().getTriple().isAArch64()))
return;
StringRef MangledName = getMangledName(GD);
@@ -3993,10 +3995,11 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
auto *Spec = FD->getAttr<CPUSpecificAttr>();
for (unsigned I = 0; I < Spec->cpus_size(); ++I)
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
- } else if (FD->isTargetClonesMultiVersion()) {
- auto *Clone = FD->getAttr<TargetClonesAttr>();
- for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
- if (Clone->isFirstOfVersion(I))
+ } else if (auto *TC = FD->getAttr<TargetClonesAttr>()) {
+ for (unsigned I = 0; I < TC->featuresStrs_size(); ++I)
+ // AArch64 favors the default target version over the clone if any.
+ if ((!TC->isDefaultVersion(I) || !getTarget().getTriple().isAArch64()) &&
+ TC->isFirstOfVersion(I))
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
// Ensure that the resolver function is also emitted.
GetOrCreateMultiVersionResolver(GD);
@@ -4092,6 +4095,23 @@ llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::WeakODRLinkage;
}
+static FunctionDecl *createDefaultTargetVersionFrom(const FunctionDecl *FD) {
+ DeclContext *DeclCtx = FD->getASTContext().getTranslationUnitDecl();
+ TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
+ StorageClass SC = FD->getStorageClass();
+ DeclarationName Name = FD->getNameInfo().getName();
+
+ FunctionDecl *NewDecl =
+ FunctionDecl::Create(FD->getASTContext(), DeclCtx, FD->getBeginLoc(),
+ FD->getEndLoc(), Name, TInfo->getType(), TInfo, SC);
+
+ NewDecl->setIsMultiVersion();
+ NewDecl->addAttr(TargetVersionAttr::CreateImplicit(
+ NewDecl->getASTContext(), "default", NewDecl->getSourceRange()));
+
+ return NewDecl;
+}
+
void CodeGenModule::emitMultiVersionFunctions() {
std::vector<GlobalDecl> MVFuncsToEmit;
MultiVersionFuncs.swap(MVFuncsToEmit);
@@ -4099,96 +4119,79 @@ void CodeGenModule::emitMultiVersionFunctions() {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Expected a FunctionDecl");
- bool EmitResolver = !FD->isTargetVersionMultiVersion();
- SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- if (FD->isTargetMultiVersion()) {
- getContext().forEachMultiversionedFunctionVersion(
- FD, [this, &GD, &Options, &EmitResolver](const FunctionDecl *CurFD) {
- GlobalDecl CurGD{
- (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (CurFD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
- } else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(GD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
- }
- assert(Func && "This should have just been created");
- }
- if (CurFD->getMultiVersionKind() == MultiVersionKind::Target) {
- const auto *TA = CurFD->getAttr<TargetAttr>();
- llvm::SmallVector<StringRef, 8> Feats;
- TA->getAddedFeatures(Feats);
- Options.emplace_back(cast<llvm::Function>(Func),
- TA->getArchitecture(), Feats);
- } else {
- const auto *TVA = CurFD->getAttr<TargetVersionAttr>();
- if (CurFD->isUsed() || (TVA->isDefaultVersion() &&
- CurFD->doesThisDeclarationHaveABody()))
- EmitResolver = true;
- llvm::SmallVector<StringRef, 8> Feats;
- TVA->getFeatures(Feats);
- Options.emplace_back(cast<llvm::Function>(Func),
- /*Architecture*/ "", Feats);
- }
- });
- } else if (FD->isTargetClonesMultiVersion()) {
- const auto *TC = FD->getAttr<TargetClonesAttr>();
- for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
- ++VersionIndex) {
- if (!TC->isFirstOfVersion(VersionIndex))
- continue;
- GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
- VersionIndex};
- StringRef Version = TC->getFeatureStr(VersionIndex);
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (FD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
- } else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(CurGD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
- }
- assert(Func && "This should have just been created");
+ auto createFunction = [&](const FunctionDecl *Decl, unsigned MVIdx = 0) {
+ GlobalDecl CurGD{Decl->isDefined() ? Decl->getDefinition() : Decl, MVIdx};
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (Decl->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(CurGD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
}
+ assert(Func && "This should have just been created");
+ }
+ return cast<llvm::Function>(Func);
+ };
- StringRef Architecture;
- llvm::SmallVector<StringRef, 1> Feature;
+ bool HasDefaultDecl = !FD->isTargetVersionMultiVersion();
+ bool ShouldEmitResolver =
+ !getContext().getTargetInfo().getTriple().isAArch64();
+ SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- if (getTarget().getTriple().isAArch64()) {
- if (Version != "default") {
- llvm::SmallVector<StringRef, 8> VerFeats;
- Version.split(VerFeats, "+");
- for (auto &CurFeat : VerFeats)
- Feature.push_back(CurFeat.trim());
- }
- } else {
- if (Version.starts_with("arch="))
- Architecture = Version.drop_front(sizeof("arch=") - 1);
- else if (Version != "default")
- Feature.push_back(Version);
- }
+ getContext().forEachMultiversionedFunctionVersion(
+ FD, [&](const FunctionDecl *CurFD) {
+ llvm::SmallVector<StringRef, 8> Feats;
+
+ if (const auto *TA = CurFD->getAttr<TargetAttr>()) {
+ TA->getAddedFeatures(Feats);
+ llvm::Function *Func = createFunction(CurFD);
+ Options.emplace_back(Func, TA->getArchitecture(), Feats);
+ } else if (const auto *TVA = CurFD->getAttr<TargetVersionAttr>()) {
+ bool HasDefaultDef = TVA->isDefaultVersion() &&
+ CurFD->doesThisDeclarationHaveABody();
+ HasDefaultDecl |= TVA->isDefaultVersion();
+ ShouldEmitResolver |= (CurFD->isUsed() || HasDefaultDef);
+ TVA->getFeatures(Feats);
+ llvm::Function *Func = createFunction(CurFD);
+ Options.emplace_back(Func, /*Architecture*/ "", Feats);
+ } else if (const auto *TC = CurFD->getAttr<TargetClonesAttr>()) {
+ ShouldEmitResolver |= CurFD->doesThisDeclarationHaveABody();
+ for (unsigned I = 0; I < TC->featuresStrs_size(); ++I) {
+ if (!TC->isFirstOfVersion(I))
+ continue;
+
+ llvm::Function *Func = createFunction(CurFD, I);
+ StringRef Architecture;
+ Feats.clear();
+ if (getTarget().getTriple().isAArch64())
+ TC->getFeatures(Feats, I);
+ else {
+ StringRef Version = TC->getFeatureStr(I);
+ if (Version.starts_with("arch="))
+ Architecture = Version.drop_front(sizeof("arch=") - 1);
+ else if (Version != "default")
+ Feats.push_back(Version);
+ }
+ Options.emplace_back(Func, Architecture, Feats);
+ }
+ } else
+ llvm_unreachable("unexpected MultiVersionKind");
+ });
- Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
- }
- } else {
- assert(0 && "Expected a target or target_clones multiversion function");
+ if (!ShouldEmitResolver)
continue;
- }
- if (!EmitResolver)
- continue;
+ if (!HasDefaultDecl) {
+ FunctionDecl *NewFD = createDefaultTargetVersionFrom(FD);
+ llvm::Function *Func = createFunction(NewFD);
+ llvm::SmallVector<StringRef, 1> Feats;
+ Options.emplace_back(Func, /*Architecture*/ "", Feats);
+ }
llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant)) {
@@ -4369,7 +4372,7 @@ void CodeGenModule::AddDeferredMultiVersionResolverToEmit(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
- if (FD->isTargetVersionMultiVersion()) {
+ if (FD->isTargetVersionMultiVersion() || FD->isTargetClonesMultiVersion()) {
std::string MangledName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
if (!DeferredResolversToEmit.insert(MangledName).second)
@@ -4480,7 +4483,10 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (FD->isMultiVersion()) {
UpdateMultiVersionNames(GD, FD, MangledName);
- if (!IsForDefinition)
+ if (FD->getASTContext().getTargetInfo().getTriple().isAArch64() &&
+ !FD->isUsed())
+ AddDeferredMultiVersionResolverToEmit(GD);
+ else if (!IsForDefinition)
return GetOrCreateMultiVersionResolver(GD);
}
}
@@ -6275,7 +6281,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
// Resize the string to the right size, which is indicated by its type.
const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
assert(CAT && "String literal not of constant array type!");
- Str.resize(CAT->getSize().getZExtValue());
+ Str.resize(CAT->getZExtSize());
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -7261,7 +7267,7 @@ void CodeGenFunction::EmitDeclMetadata() {
for (auto &I : LocalDeclMap) {
const Decl *D = I.first;
- llvm::Value *Addr = I.second.getPointer();
+ llvm::Value *Addr = I.second.emitRawPointer(*this);
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
Alloca->setMetadata(
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index 2619edfeb7dc..76704c4d7be4 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -1239,7 +1239,8 @@ void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
const Expr *S,
- Address MCDCCondBitmapAddr) {
+ Address MCDCCondBitmapAddr,
+ CodeGenFunction &CGF) {
if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
@@ -1262,7 +1263,7 @@ void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
Builder.getInt64(FunctionHash),
Builder.getInt32(RegionMCDCState->BitmapBytes),
Builder.getInt32(MCDCTestVectorBitmapOffset),
- MCDCCondBitmapAddr.getPointer()};
+ MCDCCondBitmapAddr.emitRawPointer(CGF)};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_tvbitmap_update), Args);
}
@@ -1283,7 +1284,8 @@ void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr,
- llvm::Value *Val) {
+ llvm::Value *Val,
+ CodeGenFunction &CGF) {
if (!canEmitMCDCCoverage(Builder) || !RegionMCDCState)
return;
@@ -1312,7 +1314,7 @@ void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
Builder.getInt32(Branch.ID),
- MCDCCondBitmapAddr.getPointer(), Val};
+ MCDCCondBitmapAddr.emitRawPointer(CGF), Val};
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_condbitmap_update),
Args);
diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h
index 036fbf6815a4..9d66ffad6f43 100644
--- a/clang/lib/CodeGen/CodeGenPGO.h
+++ b/clang/lib/CodeGen/CodeGenPGO.h
@@ -113,12 +113,14 @@ public:
void emitCounterSetOrIncrement(CGBuilderTy &Builder, const Stmt *S,
llvm::Value *StepV);
void emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
- Address MCDCCondBitmapAddr);
+ Address MCDCCondBitmapAddr,
+ CodeGenFunction &CGF);
void emitMCDCParameters(CGBuilderTy &Builder);
void emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
Address MCDCCondBitmapAddr);
void emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
- Address MCDCCondBitmapAddr, llvm::Value *Val);
+ Address MCDCCondBitmapAddr, llvm::Value *Val,
+ CodeGenFunction &CGF);
/// Return the region count for the counter at the given index.
uint64_t getRegionCount(const Stmt *S) {
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index a6b51bfef876..afadc29ab1b0 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -601,7 +601,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
EltTy = llvm::Type::getInt8Ty(getLLVMContext());
}
- ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ ResultType = llvm::ArrayType::get(EltTy, A->getZExtSize());
break;
}
case Type::ExtVector:
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index bdd53a192f82..fd71317572f0 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -307,10 +307,6 @@ public:
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase);
- llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) override;
-
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
@@ -646,7 +642,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Apply the adjustment and cast back to the original struct type
// for consistency.
- llvm::Value *This = ThisAddr.getPointer();
+ llvm::Value *This = ThisAddr.emitRawPointer(CGF);
This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
ThisPtrForCall = This;
@@ -850,7 +846,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
CGBuilderTy &Builder = CGF.Builder;
// Apply the offset, which we assume is non-null.
- return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
+ return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr,
"memptr.offset");
}
@@ -1245,7 +1241,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
CGF.getPointerAlign());
// Apply the offset.
- llvm::Value *CompletePtr = Ptr.getPointer();
+ llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF);
CompletePtr =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
@@ -1482,7 +1478,8 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
- llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
+ llvm::Value *Args[] = {ThisAddr.emitRawPointer(CGF), SrcRTTI, DestRTTI,
+ OffsetHint};
llvm::Value *Value =
CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args);
@@ -1571,7 +1568,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
llvm::Value *Success = CGF.Builder.CreateICmpEQ(
VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
- llvm::Value *Result = ThisAddr.getPointer();
+ llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
if (!Offset->isZero())
Result = CGF.Builder.CreateInBoundsGEP(
CGF.CharTy, Result,
@@ -1611,7 +1608,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
- return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
+ return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF),
OffsetToTop);
}
@@ -1792,8 +1789,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
else
Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
- nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
+ ThisTy, VTT, VTTTy, nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1952,11 +1949,6 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
CGF.getPointerAlign());
}
-llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
- return getVTableAddressPoint(Base, VTableClass);
-}
-
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
@@ -2088,8 +2080,8 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
ThisTy = D->getDestroyedType();
}
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
- QualType(), nullptr);
+ CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
+ nullptr, QualType(), nullptr);
return nullptr;
}
@@ -2162,7 +2154,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
int64_t VirtualAdjustment,
bool IsReturnAdjustment) {
if (!NonVirtualAdjustment && !VirtualAdjustment)
- return InitialPtr.getPointer();
+ return InitialPtr.emitRawPointer(CGF);
Address V = InitialPtr.withElementType(CGF.Int8Ty);
@@ -2195,10 +2187,10 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
CGF.getPointerAlign());
}
// Adjust our pointer.
- ResultPtr = CGF.Builder.CreateInBoundsGEP(
- V.getElementType(), V.getPointer(), Offset);
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(),
+ V.emitRawPointer(CGF), Offset);
} else {
- ResultPtr = V.getPointer();
+ ResultPtr = V.emitRawPointer(CGF);
}
// In a derived-to-base conversion, the non-virtual adjustment is
@@ -2284,7 +2276,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
- CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
+ CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF));
}
// Finally, compute a pointer to the actual data buffer by skipping
@@ -2315,7 +2307,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
- return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
+ return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
}
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
@@ -2627,7 +2619,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Call __cxa_guard_release. This cannot throw.
CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
- guardAddr.getPointer());
+ guardAddr.emitRawPointer(CGF));
} else if (D.isLocalVarDecl()) {
// For local variables, store 1 into the first byte of the guard variable
// after the object initialization completes so that initialization is
@@ -3120,10 +3112,10 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
LValue LV;
if (VD->getType()->isReferenceType())
- LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
+ LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType);
else
- LV = CGF.MakeAddrLValue(CallVal, LValType,
- CGF.getContext().getDeclAlign(VD));
+ LV = CGF.MakeRawAddrLValue(CallVal, LValType,
+ CGF.getContext().getDeclAlign(VD));
// FIXME: need setObjCGCLValueClass?
return LV;
}
@@ -4604,7 +4596,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Casted, ExnPtrTmp);
// Bind the reference to the temporary.
- AdjustedExn = ExnPtrTmp.getPointer();
+ AdjustedExn = ExnPtrTmp.emitRawPointer(CGF);
}
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 172c4c937b97..d38a26940a3c 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -327,10 +327,6 @@ public:
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
- llvm::Constant *
- getVTableAddressPointForConstExpr(BaseSubobject Base,
- const CXXRecordDecl *VTableClass) override;
-
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
@@ -937,7 +933,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
}
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
- CPI->setArgOperand(2, var.getObjectAddress(CGF).getPointer());
+ CPI->setArgOperand(2, var.getObjectAddress(CGF).emitRawPointer(CGF));
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.EmitAutoVarCleanups(var);
}
@@ -974,7 +970,7 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
llvm::Value *Offset =
GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
- Value.getElementType(), Value.getPointer(), Offset);
+ Value.getElementType(), Value.emitRawPointer(CGF), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
return std::make_tuple(Address(Ptr, CGF.Int8Ty, VBaseAlign), Offset,
@@ -1011,7 +1007,7 @@ llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Type *StdTypeInfoPtrTy) {
std::tie(ThisPtr, std::ignore, std::ignore) =
performBaseAdjustment(CGF, ThisPtr, SrcRecordTy);
- llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.getPointer());
+ llvm::CallBase *Typeid = emitRTtypeidCall(CGF, ThisPtr.emitRawPointer(CGF));
return CGF.Builder.CreateBitCast(Typeid, StdTypeInfoPtrTy);
}
@@ -1033,7 +1029,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastCall(
llvm::Value *Offset;
std::tie(This, Offset, std::ignore) =
performBaseAdjustment(CGF, This, SrcRecordTy);
- llvm::Value *ThisPtr = This.getPointer();
+ llvm::Value *ThisPtr = This.emitRawPointer(CGF);
Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty);
// PVOID __RTDynamicCast(
@@ -1065,7 +1061,7 @@ llvm::Value *MicrosoftCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
llvm::FunctionCallee Function = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false),
"__RTCastToVoid");
- llvm::Value *Args[] = {Value.getPointer()};
+ llvm::Value *Args[] = {Value.emitRawPointer(CGF)};
return CGF.EmitRuntimeCall(Function, Args);
}
@@ -1493,7 +1489,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
llvm::Value *VBaseOffset =
GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
llvm::Value *VBasePtr = CGF.Builder.CreateInBoundsGEP(
- Result.getElementType(), Result.getPointer(), VBaseOffset);
+ Result.getElementType(), Result.emitRawPointer(CGF), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
Result = Address(VBasePtr, CGF.Int8Ty, VBaseAlign);
@@ -1660,7 +1656,8 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
llvm::Value *Implicit =
getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase,
Delegating); // = nullptr
- CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
+ CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy),
+ ThisTy,
/*ImplicitParam=*/Implicit,
/*ImplicitParamTy=*/QualType(), nullptr);
if (BaseDtorEndBB) {
@@ -1791,13 +1788,6 @@ MicrosoftCXXABI::getVTableAddressPoint(BaseSubobject Base,
return VFTablesMap[ID];
}
-llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr(
- BaseSubobject Base, const CXXRecordDecl *VTableClass) {
- llvm::Constant *VFTable = getVTableAddressPoint(Base, VTableClass);
- assert(VFTable && "Couldn't find a vftable for the given base?");
- return VFTable;
-}
-
llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) {
// getAddrOfVTable may return 0 if asked to get an address of a vtable which
@@ -2013,8 +2003,9 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
}
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
- RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
- ImplicitParam, Context.IntTy, CE);
+ RValue RV =
+ CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy,
+ ImplicitParam, Context.IntTy, CE);
return RV.getScalarVal();
}
@@ -2212,13 +2203,13 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
Address This,
const ThisAdjustment &TA) {
if (TA.isEmpty())
- return This.getPointer();
+ return This.emitRawPointer(CGF);
This = This.withElementType(CGF.Int8Ty);
llvm::Value *V;
if (TA.Virtual.isEmpty()) {
- V = This.getPointer();
+ V = This.emitRawPointer(CGF);
} else {
assert(TA.Virtual.Microsoft.VtordispOffset < 0);
// Adjust the this argument based on the vtordisp value.
@@ -2227,7 +2218,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
VtorDispPtr = VtorDispPtr.withElementType(CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
- V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(),
+ V = CGF.Builder.CreateGEP(This.getElementType(), This.emitRawPointer(CGF),
CGF.Builder.CreateNeg(VtorDisp));
// Unfortunately, having applied the vtordisp means that we no
@@ -2264,11 +2255,11 @@ llvm::Value *
MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
const ReturnAdjustment &RA) {
if (RA.isEmpty())
- return Ret.getPointer();
+ return Ret.emitRawPointer(CGF);
Ret = Ret.withElementType(CGF.Int8Ty);
- llvm::Value *V = Ret.getPointer();
+ llvm::Value *V = Ret.emitRawPointer(CGF);
if (RA.Virtual.Microsoft.VBIndex) {
assert(RA.Virtual.Microsoft.VBIndex > 0);
int32_t IntSize = CGF.getIntSize().getQuantity();
@@ -2583,7 +2574,7 @@ struct ResetGuardBit final : EHScopeStack::Cleanup {
struct CallInitThreadAbort final : EHScopeStack::Cleanup {
llvm::Value *Guard;
- CallInitThreadAbort(Address Guard) : Guard(Guard.getPointer()) {}
+ CallInitThreadAbort(RawAddress Guard) : Guard(Guard.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Calling _Init_thread_abort will reset the guard's state.
@@ -3123,8 +3114,8 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- llvm::Value *VBPtr = Builder.CreateInBoundsGEP(CGM.Int8Ty, This.getPointer(),
- VBPtrOffset, "vbptr");
+ llvm::Value *VBPtr = Builder.CreateInBoundsGEP(
+ CGM.Int8Ty, This.emitRawPointer(CGF), VBPtrOffset, "vbptr");
if (VBPtrOut)
*VBPtrOut = VBPtr;
@@ -3203,7 +3194,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
Builder.CreateBr(SkipAdjustBB);
CGF.EmitBlock(SkipAdjustBB);
llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base");
- Phi->addIncoming(Base.getPointer(), OriginalBB);
+ Phi->addIncoming(Base.emitRawPointer(CGF), OriginalBB);
Phi->addIncoming(AdjustedBase, VBaseAdjustBB);
return Phi;
}
@@ -3238,7 +3229,7 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
Addr = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset,
VBPtrOffset);
} else {
- Addr = Base.getPointer();
+ Addr = Base.emitRawPointer(CGF);
}
// Apply the offset, which we assume is non-null.
@@ -3526,7 +3517,7 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
ThisPtrForCall = AdjustVirtualBase(CGF, E, RD, This,
VirtualBaseAdjustmentOffset, VBPtrOffset);
} else {
- ThisPtrForCall = This.getPointer();
+ ThisPtrForCall = This.emitRawPointer(CGF);
}
if (NonVirtualBaseAdjustment)
@@ -4445,10 +4436,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
llvm::GlobalVariable *TI = getThrowInfo(ThrowType);
// Call into the runtime to throw the exception.
- llvm::Value *Args[] = {
- AI.getPointer(),
- TI
- };
+ llvm::Value *Args[] = {AI.emitRawPointer(CGF), TI};
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
}
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 16fbf52a517d..ab2e2bd0b306 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -78,7 +78,7 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
QualType eltType = arrayType->getElementType();
auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
- for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
+ for (uint64_t i = 0, e = arrayType->getZExtSize(); i != e; ++i) {
addTypedData(eltType, begin + i * eltSize);
}
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 6893b50a3cfe..b1dfe5bf8f27 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -295,6 +295,11 @@ public:
/// Get the AST address space for alloca.
virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; }
+ Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr,
+ LangAS SrcAddr, LangAS DestAddr,
+ llvm::Type *DestTy,
+ bool IsNonNull = false) const;
+
/// Perform address space cast of an expression of pointer type.
/// \param V is the LLVM value to be casted to another address space.
/// \param SrcAddr is the language address space of \p V.
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index 5d42e6286e52..885d9c77d0e7 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -671,7 +671,7 @@ bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
/// Return true if a type contains any 16-bit floating point vectors
bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
+ uint64_t NElements = AT->getZExtSize();
if (NElements == 0)
return false;
return containsAnyFP16Vectors(AT->getElementType());
diff --git a/clang/lib/CodeGen/Targets/LoongArch.cpp b/clang/lib/CodeGen/Targets/LoongArch.cpp
index 63b9a1fdb988..3f01d9ad90f1 100644
--- a/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -146,7 +146,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
}
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
+ uint64_t ArraySize = ATy->getZExtSize();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 8718f1ecf3a7..7dce5042c3dc 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -85,7 +85,7 @@ private:
LValue Src) {
llvm::Value *Handle = nullptr;
llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).emitRawPointer(CGF));
// Lookup `addrspacecast` through the constant pointer if any.
if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 00b04723f17d..174fddabbbdb 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -274,7 +274,7 @@ void AIXTargetCodeGenInfo::setTargetAttributes(
if (!isa<llvm::GlobalVariable>(GV))
return;
- auto *GVar = dyn_cast<llvm::GlobalVariable>(GV);
+ auto *GVar = cast<llvm::GlobalVariable>(GV);
auto GVId = GV->getName();
// Is this a global variable specified by the user as toc-data?
@@ -513,9 +513,10 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
llvm::Value *RegOffset =
Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
- RegAddr = Address(
- Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
- DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ RegAddr = Address(Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, RegAddr.emitRawPointer(CGF), RegOffset),
+ DirectTy,
+ RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
// Increase the used-register count.
NumRegs =
@@ -551,7 +552,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// Round up address of argument to alignment
CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
if (Align > OverflowAreaAlign) {
- llvm::Value *Ptr = OverflowArea.getPointer();
+ llvm::Value *Ptr = OverflowArea.emitRawPointer(CGF);
OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
OverflowArea.getElementType(), Align);
}
@@ -560,7 +561,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// Increase the overflow area.
OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
- Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ Builder.CreateStore(OverflowArea.emitRawPointer(CGF), OverflowAreaAddr);
CGF.EmitBranch(Cont);
}
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index 9a79424c4612..7b32c7972356 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -152,7 +152,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
}
if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
+ uint64_t ArraySize = ATy->getZExtSize();
QualType EltTy = ATy->getElementType();
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp
index a337a52a94ec..9025a633f328 100644
--- a/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -326,7 +326,7 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Update VAList.
Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
- Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+ Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr);
return ArgAddr.withElementType(ArgTy);
}
diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp
index 6eb0c6ef2f7d..deaafc85a315 100644
--- a/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -306,7 +306,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
- OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
+ OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF),
PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
@@ -382,10 +382,9 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address MemAddr = RawMemAddr.withElementType(DirectTy);
// Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
+ llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
+ OverflowArgArea.getElementType(), OverflowArgArea.emitRawPointer(CGF),
+ PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index 1ec0f159ebcb..1146a851a771 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -1993,7 +1993,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
// this, but it isn't worth it and would be harder to verify.
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
+ uint64_t ArraySize = AT->getZExtSize();
// The only case a 256-bit wide vector could be used is when the array
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
@@ -2295,7 +2295,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
- unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+ unsigned NumElts = (unsigned)AT->getZExtSize();
// Check each element to see if the element overlaps with the queried range.
for (unsigned i = 0; i != NumElts; ++i) {
@@ -2788,12 +2788,11 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
// memory), except in situations involving unions.
case X87Up:
case SSE:
+ ++neededSSE;
HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
-
- ++neededSSE;
break;
// AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp
index aeb48f851e16..88edb781a947 100644
--- a/clang/lib/CodeGen/Targets/XCore.cpp
+++ b/clang/lib/CodeGen/Targets/XCore.cpp
@@ -180,7 +180,7 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Increment the VAList.
if (!ArgSize.isZero()) {
Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
- Builder.CreateStore(APN.getPointer(), VAListAddr);
+ Builder.CreateStore(APN.emitRawPointer(CGF), VAListAddr);
}
return Val;
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 767c1cd47e8c..7a53764364ce 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -6203,28 +6203,35 @@ std::string Driver::GetStdModuleManifestPath(const Compilation &C,
switch (TC.GetCXXStdlibType(C.getArgs())) {
case ToolChain::CST_Libcxx: {
- std::string lib = GetFilePath("libc++.so", TC);
-
- // Note when there are multiple flavours of libc++ the module json needs to
- // look at the command-line arguments for the proper json.
- // These flavours do not exist at the moment, but there are plans to
- // provide a variant that is built with sanitizer instrumentation enabled.
-
- // For example
- // StringRef modules = [&] {
- // const SanitizerArgs &Sanitize = TC.getSanitizerArgs(C.getArgs());
- // if (Sanitize.needsAsanRt())
- // return "modules-asan.json";
- // return "modules.json";
- // }();
-
- SmallString<128> path(lib.begin(), lib.end());
- llvm::sys::path::remove_filename(path);
- llvm::sys::path::append(path, "modules.json");
- if (TC.getVFS().exists(path))
- return static_cast<std::string>(path);
+ auto evaluate = [&](const char *library) -> std::optional<std::string> {
+ std::string lib = GetFilePath(library, TC);
+
+ // Note when there are multiple flavours of libc++ the module json needs
+ // to look at the command-line arguments for the proper json. These
+ // flavours do not exist at the moment, but there are plans to provide a
+ // variant that is built with sanitizer instrumentation enabled.
+
+ // For example
+ // StringRef modules = [&] {
+ // const SanitizerArgs &Sanitize = TC.getSanitizerArgs(C.getArgs());
+ // if (Sanitize.needsAsanRt())
+ // return "libc++.modules-asan.json";
+ // return "libc++.modules.json";
+ // }();
+
+ SmallString<128> path(lib.begin(), lib.end());
+ llvm::sys::path::remove_filename(path);
+ llvm::sys::path::append(path, "libc++.modules.json");
+ if (TC.getVFS().exists(path))
+ return static_cast<std::string>(path);
+
+ return {};
+ };
- return error;
+ if (std::optional<std::string> result = evaluate("libc++.so"); result)
+ return *result;
+
+ return evaluate("libc++.a").value_or(error);
}
case ToolChain::CST_Libstdcxx:
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 6e089903a315..7a62b0f9aec4 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -471,7 +471,7 @@ static void addTocDataOptions(const llvm::opt::ArgList &Args,
// the global setting of tocdata in TOCDataGloballyinEffect.
// Those that have the opposite setting to TOCDataGloballyinEffect, are added
// to ExplicitlySpecifiedGlobals.
- llvm::StringSet<> ExplicitlySpecifiedGlobals;
+ std::set<llvm::StringRef> ExplicitlySpecifiedGlobals;
for (const auto Arg :
Args.filtered(options::OPT_mtocdata_EQ, options::OPT_mno_tocdata_EQ)) {
TOCDataSetting ArgTocDataSetting =
@@ -486,7 +486,7 @@ static void addTocDataOptions(const llvm::opt::ArgList &Args,
ExplicitlySpecifiedGlobals.erase(Val);
}
- auto buildExceptionList = [](const llvm::StringSet<> &ExplicitValues,
+ auto buildExceptionList = [](const std::set<llvm::StringRef> &ExplicitValues,
const char *OptionSpelling) {
std::string Option(OptionSpelling);
bool IsFirst = true;
@@ -495,7 +495,7 @@ static void addTocDataOptions(const llvm::opt::ArgList &Args,
Option += ",";
IsFirst = false;
- Option += E.first();
+ Option += E.str();
}
return Option;
};
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index bc9cc8ce6cf5..3bcacff7724c 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1776,6 +1776,9 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
}
AddUnalignedAccessWarning(CmdArgs);
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fptrauth_intrinsics,
+ options::OPT_fno_ptrauth_intrinsics);
}
void Clang::AddLoongArchTargetArgs(const ArgList &Args,
@@ -5863,8 +5866,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (Triple.getArch() == llvm::Triple::x86_64) {
Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
CM);
- } else if (Triple.isNVPTX() || Triple.isAMDGPU()) {
- // NVPTX/AMDGPU does not care about the code model and will accept
+ } else if (Triple.isNVPTX() || Triple.isAMDGPU() || Triple.isSPIRV()) {
+ // NVPTX/AMDGPU/SPIRV does not care about the code model and will accept
// whatever works for the host.
Ok = true;
} else if (Triple.isSPARC64()) {
@@ -7258,10 +7261,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fno-common is the default, set -fcommon only when that flag is set.
Args.addOptInFlag(CmdArgs, options::OPT_fcommon, options::OPT_fno_common);
- if (Args.hasFlag(options::OPT_fptrauth_intrinsics,
- options::OPT_fno_ptrauth_intrinsics, false))
- CmdArgs.push_back("-fptrauth-intrinsics");
-
// -fsigned-bitfields is default, and clang doesn't yet support
// -funsigned-bitfields.
if (!Args.hasFlag(options::OPT_fsigned_bitfields,
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 447886531363..ace4fb99581e 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -758,15 +758,15 @@ bool tools::isTLSDESCEnabled(const ToolChain &TC,
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
- const bool IsOSAIX = ToolChain.getTriple().isOSAIX();
- const bool IsAMDGCN = ToolChain.getTriple().isAMDGCN();
+ const llvm::Triple &Triple = ToolChain.getTriple();
+ const bool IsOSAIX = Triple.isOSAIX();
+ const bool IsAMDGCN = Triple.isAMDGCN();
const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
const Driver &D = ToolChain.getDriver();
const bool IsFatLTO = Args.hasArg(options::OPT_ffat_lto_objects);
const bool IsUnifiedLTO = Args.hasArg(options::OPT_funified_lto);
if (llvm::sys::path::filename(Linker) != "ld.lld" &&
- llvm::sys::path::stem(Linker) != "ld.lld" &&
- !ToolChain.getTriple().isOSOpenBSD()) {
+ llvm::sys::path::stem(Linker) != "ld.lld" && !Triple.isOSOpenBSD()) {
// Tell the linker to load the plugin. This has to come before
// AddLinkerInputs as gold requires -plugin and AIX ld requires -bplugin to
// come before any -plugin-opt/-bplugin_opt that -Wl might forward.
@@ -835,7 +835,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
// the plugin.
// Handle flags for selecting CPU variants.
- std::string CPU = getCPUName(D, Args, ToolChain.getTriple());
+ std::string CPU = getCPUName(D, Args, Triple);
if (!CPU.empty())
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash + "mcpu=" + CPU));
@@ -966,10 +966,9 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
bool HasRoptr = Args.hasFlag(options::OPT_mxcoff_roptr,
options::OPT_mno_xcoff_roptr, false);
StringRef OptStr = HasRoptr ? "-mxcoff-roptr" : "-mno-xcoff-roptr";
-
if (!IsOSAIX)
D.Diag(diag::err_drv_unsupported_opt_for_target)
- << OptStr << ToolChain.getTriple().str();
+ << OptStr << Triple.str();
if (HasRoptr) {
// The data sections option is on by default on AIX. We only need to error
@@ -1032,7 +1031,7 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
}
if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
- ToolChain.getTriple().hasDefaultEmulatedTLS())) {
+ Triple.hasDefaultEmulatedTLS())) {
CmdArgs.push_back(
Args.MakeArgString(Twine(PluginOptPrefix) + "-emulated-tls"));
}
@@ -1142,7 +1141,11 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
options::OPT_fno_rtlib_add_rpath, false))
return;
- for (const auto &CandidateRPath : TC.getArchSpecificLibPaths()) {
+ SmallVector<std::string> CandidateRPaths(TC.getArchSpecificLibPaths());
+ if (const auto CandidateRPath = TC.getStdlibPath())
+ CandidateRPaths.emplace_back(*CandidateRPath);
+
+ for (const auto &CandidateRPath : CandidateRPaths) {
if (TC.getVFS().exists(CandidateRPath)) {
CmdArgs.push_back("-rpath");
CmdArgs.push_back(Args.MakeArgString(CandidateRPath));
diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp
index 05aac9caa7fb..1169b5d8c92d 100644
--- a/clang/lib/Driver/ToolChains/HLSL.cpp
+++ b/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -255,9 +255,7 @@ HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
if (!DAL->hasArg(options::OPT_O_Group)) {
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_O), "3");
}
- // FIXME: add validation for enable_16bit_types should be after HLSL 2018 and
- // shader model 6.2.
- // See: https://github.com/llvm/llvm-project/issues/57876
+
return DAL;
}
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index dc534a33e6d0..fbf2f45b5438 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -79,6 +79,11 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
+ if (Args.hasArg(options::OPT_marm64x))
+ CmdArgs.push_back("-machine:arm64x");
+ else if (TC.getTriple().isWindowsArm64EC())
+ CmdArgs.push_back("-machine:arm64ec");
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
!C.getDriver().IsCLMode() && !C.getDriver().IsFlangMode()) {
CmdArgs.push_back("-defaultlib:libcmt");
@@ -1017,4 +1022,7 @@ void MSVCToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fno_rtti, options::OPT_frtti,
/*Default=*/false))
CC1Args.push_back("-D_HAS_STATIC_RTTI=0");
+
+ if (Arg *A = DriverArgs.getLastArgNoClaim(options::OPT_marm64x))
+ A->ignoreTargetSpecific();
}
diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
index 349b93e2a232..545860acb7db 100644
--- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
+++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -208,6 +208,7 @@ StringRef getLanguageName(Language Lang) {
case Language::Unknown:
case Language::Asm:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("Unsupported language kind");
}
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 63ec3a88978d..46ed5baaeace 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -895,6 +895,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveShortCaseStatements",
Style.AlignConsecutiveShortCaseStatements);
+ IO.mapOptional("AlignConsecutiveTableGenBreakingDAGArgColons",
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons);
IO.mapOptional("AlignConsecutiveTableGenCondOperatorColons",
Style.AlignConsecutiveTableGenCondOperatorColons);
IO.mapOptional("AlignConsecutiveTableGenDefinitionColons",
@@ -1408,6 +1410,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignConsecutiveDeclarations = {};
LLVMStyle.AlignConsecutiveMacros = {};
LLVMStyle.AlignConsecutiveShortCaseStatements = {};
+ LLVMStyle.AlignConsecutiveTableGenBreakingDAGArgColons = {};
LLVMStyle.AlignConsecutiveTableGenCondOperatorColons = {};
LLVMStyle.AlignConsecutiveTableGenDefinitionColons = {};
LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index 06f567059c35..2ddcd5259446 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -152,6 +152,7 @@ namespace format {
TYPE(TableGenCondOperatorComma) \
TYPE(TableGenDAGArgCloser) \
TYPE(TableGenDAGArgListColon) \
+ TYPE(TableGenDAGArgListColonToAlign) \
TYPE(TableGenDAGArgListComma) \
TYPE(TableGenDAGArgListCommaToBreak) \
TYPE(TableGenDAGArgOpener) \
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 94d2266555f6..b9144cf55452 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -975,12 +975,15 @@ private:
// DagArg ::= Value [":" TokVarName] | TokVarName
// Appears as a part of SimpleValue6.
- bool parseTableGenDAGArg() {
+ bool parseTableGenDAGArg(bool AlignColon = false) {
if (tryToParseTableGenTokVar())
return true;
if (parseTableGenValue()) {
if (CurrentToken && CurrentToken->is(tok::colon)) {
- CurrentToken->setType(TT_TableGenDAGArgListColon);
+ if (AlignColon)
+ CurrentToken->setType(TT_TableGenDAGArgListColonToAlign);
+ else
+ CurrentToken->setType(TT_TableGenDAGArgListColon);
skipToNextNonComment();
return tryToParseTableGenTokVar();
}
@@ -1051,8 +1054,11 @@ private:
skipToNextNonComment();
return true;
}
- if (!parseTableGenDAGArg())
+ if (!parseTableGenDAGArg(
+ BreakInside &&
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons.Enabled)) {
return false;
+ }
FirstDAGArgListElm = false;
}
return false;
@@ -2747,10 +2753,9 @@ private:
}
// Heuristically try to determine whether the parentheses contain a type.
- auto IsQualifiedPointerOrReference = [this](FormatToken *T) {
+ auto IsQualifiedPointerOrReference = [](FormatToken *T, bool IsCpp) {
// This is used to handle cases such as x = (foo *const)&y;
assert(!T->isTypeName(IsCpp) && "Should have already been checked");
- (void)IsCpp; // Avoid -Wunused-lambda-capture when assertion is disabled.
// Strip trailing qualifiers such as const or volatile when checking
// whether the parens could be a cast to a pointer/reference type.
while (T) {
@@ -2783,7 +2788,7 @@ private:
!Tok.Previous ||
Tok.Previous->isOneOf(TT_TemplateCloser, TT_TypeDeclarationParen) ||
Tok.Previous->isTypeName(IsCpp) ||
- IsQualifiedPointerOrReference(Tok.Previous);
+ IsQualifiedPointerOrReference(Tok.Previous, IsCpp);
bool ParensCouldEndDecl =
Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace, tok::greater);
if (ParensAreType && !ParensCouldEndDecl)
@@ -4351,9 +4356,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::kw_auto) && Right.isOneOf(tok::l_paren, tok::l_brace))
return false;
+ const auto *BeforeLeft = Left.Previous;
+
// operator co_await(x)
- if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && Left.Previous &&
- Left.Previous->is(tok::kw_operator)) {
+ if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && BeforeLeft &&
+ BeforeLeft->is(tok::kw_operator)) {
return false;
}
// co_await (x), co_yield (x), co_return (x)
@@ -4388,8 +4395,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
}
if (Left.is(tok::colon))
return Left.isNot(TT_ObjCMethodExpr);
- if (Left.is(tok::coloncolon))
- return false;
+ if (Left.is(tok::coloncolon)) {
+ return Right.is(tok::star) && Right.is(TT_PointerOrReference) &&
+ Style.PointerAlignment != FormatStyle::PAS_Left;
+ }
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less)) {
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto &&
@@ -4404,8 +4413,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
}
if (Right.is(tok::ellipsis)) {
- return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous &&
- Left.Previous->is(tok::kw_case));
+ return Left.Tok.isLiteral() || (Left.is(tok::identifier) && BeforeLeft &&
+ BeforeLeft->is(tok::kw_case));
}
if (Left.is(tok::l_square) && Right.is(tok::amp))
return Style.SpacesInSquareBrackets;
@@ -4473,8 +4482,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_brace) && Right.is(BK_Block))
return true;
// for (auto a = 0, b = 0; const auto& c : {1, 2, 3})
- if (Left.Previous && Left.Previous->isTypeOrIdentifier(IsCpp) &&
- Right.Next && Right.Next->is(TT_RangeBasedForLoopColon)) {
+ if (BeforeLeft && BeforeLeft->isTypeOrIdentifier(IsCpp) && Right.Next &&
+ Right.Next->is(TT_RangeBasedForLoopColon)) {
return getTokenPointerOrReferenceAlignment(Left) !=
FormatStyle::PAS_Right;
}
@@ -4496,12 +4505,17 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
startsWithInitStatement(Line)))) {
return false;
}
- return Left.Previous && !Left.Previous->isOneOf(
- tok::l_paren, tok::coloncolon, tok::l_square);
+ if (!BeforeLeft)
+ return false;
+ if (BeforeLeft->is(tok::coloncolon)) {
+ return Left.is(tok::star) &&
+ Style.PointerAlignment != FormatStyle::PAS_Right;
+ }
+ return !BeforeLeft->isOneOf(tok::l_paren, tok::l_square);
}
// Ensure right pointer alignment with ellipsis e.g. int *...P
- if (Left.is(tok::ellipsis) && Left.Previous &&
- Left.Previous->isPointerOrReference()) {
+ if (Left.is(tok::ellipsis) && BeforeLeft &&
+ BeforeLeft->isPointerOrReference()) {
return Style.PointerAlignment != FormatStyle::PAS_Right;
}
@@ -4663,13 +4677,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
}
- if (!Left.Previous || !Left.Previous->isOneOf(tok::period, tok::arrow)) {
+ if (!BeforeLeft || !BeforeLeft->isOneOf(tok::period, tok::arrow)) {
if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
}
if (Left.isOneOf(tok::kw_new, tok::kw_delete)) {
- return ((!Line.MightBeFunctionDecl || !Left.Previous) &&
+ return ((!Line.MightBeFunctionDecl || !BeforeLeft) &&
Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
spaceRequiredBeforeParens(Right);
}
@@ -4813,6 +4827,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Right.is(TT_TemplateOpener)) {
return true;
}
+ if (Left.is(tok::identifier) && Right.is(tok::numeric_constant) &&
+ Right.TokenText[0] == '.') {
+ return false;
+ }
} else if (Style.isProto()) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
@@ -5130,8 +5148,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::r_brace) && Right.is(tok::r_square))
return true;
// Do not insert around colon in DAGArg and cond operator.
- if (Right.is(TT_TableGenDAGArgListColon) ||
- Left.is(TT_TableGenDAGArgListColon)) {
+ if (Right.isOneOf(TT_TableGenDAGArgListColon,
+ TT_TableGenDAGArgListColonToAlign) ||
+ Left.isOneOf(TT_TableGenDAGArgListColon,
+ TT_TableGenDAGArgListColonToAlign)) {
return false;
}
if (Right.is(TT_TableGenCondOperatorColon))
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 753be25bfd67..d06c42d5f4c5 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -112,6 +112,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
alignConsecutiveBitFields();
alignConsecutiveAssignments();
if (Style.isTableGen()) {
+ alignConsecutiveTableGenBreakingDAGArgColons();
alignConsecutiveTableGenCondOperatorColons();
alignConsecutiveTableGenDefinitions();
}
@@ -463,10 +464,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
- // If PointerAlignment is PAS_Right, keep *s or &s next to the token
+ // If PointerAlignment is PAS_Right, keep *s or &s next to the token,
+ // except if the token is equal, then a space is needed.
if ((Style.PointerAlignment == FormatStyle::PAS_Right ||
Style.ReferenceAlignment == FormatStyle::RAS_Right) &&
- CurrentChange.Spaces != 0) {
+ CurrentChange.Spaces != 0 && CurrentChange.Tok->isNot(tok::equal)) {
const bool ReferenceNotRightAligned =
Style.ReferenceAlignment != FormatStyle::RAS_Right &&
Style.ReferenceAlignment != FormatStyle::RAS_Pointer;
@@ -981,6 +983,11 @@ void WhitespaceManager::alignConsecutiveShortCaseStatements() {
Changes);
}
+void WhitespaceManager::alignConsecutiveTableGenBreakingDAGArgColons() {
+ alignConsecutiveColons(Style.AlignConsecutiveTableGenBreakingDAGArgColons,
+ TT_TableGenDAGArgListColonToAlign);
+}
+
void WhitespaceManager::alignConsecutiveTableGenCondOperatorColons() {
alignConsecutiveColons(Style.AlignConsecutiveTableGenCondOperatorColons,
TT_TableGenCondOperatorColon);
@@ -1485,7 +1492,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
: Cell);
// Go to the next non-comment and ensure there is a break in front
const auto *NextNonComment = C.Tok->getNextNonComment();
- while (NextNonComment->is(tok::comma))
+ while (NextNonComment && NextNonComment->is(tok::comma))
NextNonComment = NextNonComment->getNextNonComment();
auto j = i;
while (j < End && Changes[j].Tok != NextNonComment)
diff --git a/clang/lib/Format/WhitespaceManager.h b/clang/lib/Format/WhitespaceManager.h
index 0ebc6cf8377c..98cf4a260cc4 100644
--- a/clang/lib/Format/WhitespaceManager.h
+++ b/clang/lib/Format/WhitespaceManager.h
@@ -235,6 +235,9 @@ private:
/// Align consecutive short case statements over all \c Changes.
void alignConsecutiveShortCaseStatements();
+ /// Align consecutive TableGen DAGArg colon over all \c Changes.
+ void alignConsecutiveTableGenBreakingDAGArgColons();
+
/// Align consecutive TableGen cond operator colon over all \c Changes.
void alignConsecutiveTableGenCondOperatorColons();
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 019f847ccbaa..6e3baf838644 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -1206,16 +1206,6 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Note the name of the module we're building.
Invocation->getLangOpts().CurrentModule = std::string(ModuleName);
- // Make sure that the failed-module structure has been allocated in
- // the importing instance, and propagate the pointer to the newly-created
- // instance.
- PreprocessorOptions &ImportingPPOpts
- = ImportingInstance.getInvocation().getPreprocessorOpts();
- if (!ImportingPPOpts.FailedModules)
- ImportingPPOpts.FailedModules =
- std::make_shared<PreprocessorOptions::FailedModulesSet>();
- PPOpts.FailedModules = ImportingPPOpts.FailedModules;
-
// If there is a module map file, build the module using the module map.
// Set up the inputs/outputs so that we build the module from its umbrella
// header.
@@ -1269,6 +1259,13 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
SourceMgr.pushModuleBuildStack(ModuleName,
FullSourceLoc(ImportLoc, ImportingInstance.getSourceManager()));
+ // Make sure that the failed-module structure has been allocated in
+ // the importing instance, and propagate the pointer to the newly-created
+ // instance.
+ if (!ImportingInstance.hasFailedModulesSet())
+ ImportingInstance.createFailedModulesSet();
+ Instance.setFailedModulesSet(ImportingInstance.getFailedModulesSetPtr());
+
// If we're collecting module dependencies, we need to share a collector
// between all of the module CompilerInstances. Other than that, we don't
// want to produce any dependency output from the module build.
@@ -1337,9 +1334,24 @@ static bool compileModule(CompilerInstance &ImportingInstance,
// Get or create the module map that we'll use to build this module.
ModuleMap &ModMap
= ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+ SourceManager &SourceMgr = ImportingInstance.getSourceManager();
bool Result;
- if (OptionalFileEntryRef ModuleMapFile =
- ModMap.getContainingModuleMapFile(Module)) {
+ if (FileID ModuleMapFID = ModMap.getContainingModuleMapFileID(Module);
+ ModuleMapFID.isValid()) {
+ // We want to use the top-level module map. If we don't, the compiling
+ // instance may think the containing module map is a top-level one, while
+ // the importing instance knows it's included from a parent module map via
+ // the extern directive. This mismatch could bite us later.
+ SourceLocation Loc = SourceMgr.getIncludeLoc(ModuleMapFID);
+ while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
+ ModuleMapFID = SourceMgr.getFileID(Loc);
+ Loc = SourceMgr.getIncludeLoc(ModuleMapFID);
+ }
+
+ OptionalFileEntryRef ModuleMapFile =
+ SourceMgr.getFileEntryRefForID(ModuleMapFID);
+ assert(ModuleMapFile && "Top-level module map with no FileID");
+
// Canonicalize compilation to start with the public module map. This is
// vital for submodules declarations in the private module maps to be
// correctly parsed when depending on a top level module in the public one.
@@ -1977,10 +1989,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
return nullptr;
}
- // Check whether we have already attempted to build this module (but
- // failed).
- if (getPreprocessorOpts().FailedModules &&
- getPreprocessorOpts().FailedModules->hasAlreadyFailed(ModuleName)) {
+ // Check whether we have already attempted to build this module (but failed).
+ if (FailedModules && FailedModules->hasAlreadyFailed(ModuleName)) {
getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_built)
<< ModuleName << SourceRange(ImportLoc, ModuleNameLoc);
return nullptr;
@@ -1991,8 +2001,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
ModuleFilename)) {
assert(getDiagnostics().hasErrorOccurred() &&
"undiagnosed error in compileModuleAndReadAST");
- if (getPreprocessorOpts().FailedModules)
- getPreprocessorOpts().FailedModules->addFailed(ModuleName);
+ if (FailedModules)
+ FailedModules->addFailed(ModuleName);
return nullptr;
}
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index 0df6a82ccd89..f1bd3cd66e97 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -2757,6 +2757,9 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
case Language::HLSL:
Lang = "hlsl";
break;
+ case Language::CIR:
+ Lang = "cir";
+ break;
}
GenerateArg(Consumer, OPT_x,
@@ -2958,6 +2961,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Cases("ast", "pcm", "precompiled-header",
InputKind(Language::Unknown, InputKind::Precompiled))
.Case("ir", Language::LLVM_IR)
+ .Case("cir", Language::CIR)
.Default(Language::Unknown);
if (DashX.isUnknown())
@@ -3323,6 +3327,7 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
switch (IK.getLanguage()) {
case Language::Unknown:
case Language::LLVM_IR:
+ case Language::CIR:
llvm_unreachable("should not parse language flags for this input");
case Language::C:
@@ -3388,6 +3393,8 @@ static StringRef GetInputKindName(InputKind IK) {
return "Asm";
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::CIR:
+ return "Clang IR";
case Language::HLSL:
return "HLSL";
@@ -3403,7 +3410,8 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
const llvm::Triple &T,
InputKind IK) {
if (IK.getFormat() == InputKind::Precompiled ||
- IK.getLanguage() == Language::LLVM_IR) {
+ IK.getLanguage() == Language::LLVM_IR ||
+ IK.getLanguage() == Language::CIR) {
if (Opts.ObjCAutoRefCount)
GenerateArg(Consumer, OPT_fobjc_arc);
if (Opts.PICLevel != 0)
@@ -3689,7 +3697,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
unsigned NumErrorsBefore = Diags.getNumErrors();
if (IK.getFormat() == InputKind::Precompiled ||
- IK.getLanguage() == Language::LLVM_IR) {
+ IK.getLanguage() == Language::LLVM_IR ||
+ IK.getLanguage() == Language::CIR) {
// ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
// PassManager in BackendUtil.cpp. They need to be initialized no matter
// what the input type is.
@@ -4275,11 +4284,30 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
<< ShaderModel << T.getOSName() << T.str();
}
+ // Validate that if fnative-half-type is given, that
+ // the language standard is at least hlsl2018, and that
+ // the target shader model is at least 6.2.
+ if (Args.getLastArg(OPT_fnative_half_type)) {
+ const LangStandard &Std =
+ LangStandard::getLangStandardForKind(Opts.LangStd);
+ if (!(Opts.LangStd >= LangStandard::lang_hlsl2018 &&
+ T.getOSVersion() >= VersionTuple(6, 2)))
+ Diags.Report(diag::err_drv_hlsl_16bit_types_unsupported)
+ << "-enable-16bit-types" << true << Std.getName()
+ << T.getOSVersion().getAsString();
+ }
} else if (T.isSPIRVLogical()) {
if (!T.isVulkanOS() || T.getVulkanVersion() == VersionTuple(0)) {
Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
<< VulkanEnv << T.getOSName() << T.str();
}
+ if (Args.getLastArg(OPT_fnative_half_type)) {
+ const LangStandard &Std =
+ LangStandard::getLangStandardForKind(Opts.LangStd);
+ if (!(Opts.LangStd >= LangStandard::lang_hlsl2018))
+ Diags.Report(diag::err_drv_hlsl_16bit_types_unsupported)
+ << "-fnative-half-type" << false << Std.getName();
+ }
} else {
llvm_unreachable("expected DXIL or SPIR-V target");
}
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index b9fd9b8897b7..b7c9967316f0 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -535,8 +535,14 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
if (*OriginalModuleMap != CI.getSourceManager().getFileEntryRefForID(
CI.getSourceManager().getMainFileID())) {
M->IsInferred = true;
- CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()
- .setInferredModuleAllowedBy(M, *OriginalModuleMap);
+ auto FileCharacter =
+ M->IsSystem ? SrcMgr::C_System_ModuleMap : SrcMgr::C_User_ModuleMap;
+ FileID OriginalModuleMapFID = CI.getSourceManager().getOrCreateFileID(
+ *OriginalModuleMap, FileCharacter);
+ CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .setInferredModuleAllowedBy(M, OriginalModuleMapFID);
}
}
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 81fcd8d5ae9b..0bc26b694cfc 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -69,7 +69,10 @@ void InitOnlyAction::ExecuteAction() {
// Basically PreprocessOnlyAction::ExecuteAction.
void ReadPCHAndPreprocessAction::ExecuteAction() {
- Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ CompilerInstance &CI = getCompilerInstance();
+ AdjustCI(CI);
+
+ Preprocessor &PP = CI.getPreprocessor();
// Ignore unknown pragmas.
PP.IgnorePragmas();
@@ -1083,6 +1086,7 @@ void PrintPreambleAction::ExecuteAction() {
case Language::CUDA:
case Language::HIP:
case Language::HLSL:
+ case Language::CIR:
break;
case Language::Unknown:
@@ -1187,6 +1191,8 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
void GetDependenciesByModuleNameAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
+ AdjustCI(CI);
+
Preprocessor &PP = CI.getPreprocessor();
SourceManager &SM = PP.getSourceManager();
FileID MainFileID = SM.getMainFileID();
diff --git a/clang/lib/Frontend/FrontendOptions.cpp b/clang/lib/Frontend/FrontendOptions.cpp
index bf83b27c1367..32ed99571e85 100644
--- a/clang/lib/Frontend/FrontendOptions.cpp
+++ b/clang/lib/Frontend/FrontendOptions.cpp
@@ -34,5 +34,6 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
.Case("hlsl", Language::HLSL)
+ .Case("cir", Language::CIR)
.Default(Language::Unknown);
}
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index a8882e82e171..be7a0b247e03 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -207,6 +207,8 @@ _mm256_div_ps(__m256 __a, __m256 __b)
/// Compares two 256-bit vectors of [4 x double] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPD </c> instruction.
@@ -226,6 +228,8 @@ _mm256_max_pd(__m256d __a, __m256d __b)
/// Compares two 256-bit vectors of [8 x float] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPS </c> instruction.
@@ -245,6 +249,8 @@ _mm256_max_ps(__m256 __a, __m256 __b)
/// Compares two 256-bit vectors of [4 x double] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPD </c> instruction.
@@ -264,6 +270,8 @@ _mm256_min_pd(__m256d __a, __m256d __b)
/// Compares two 256-bit vectors of [8 x float] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPS </c> instruction.
@@ -1604,9 +1612,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Returns a [2 x double] vector consisting of two doubles corresponding to
-/// the two comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1663,9 +1671,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// [4 x float], using the operation specified by the immediate integer
/// operand.
///
-/// Returns a [4 x float] vector consisting of four floats corresponding to
-/// the four comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1721,9 +1729,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 256-bit vectors of [4 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Returns a [4 x double] vector consisting of four doubles corresponding to
-/// the four comparison results: zero if the comparison is false, and all 1's
-/// if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1781,9 +1789,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// [8 x float], using the operation specified by the immediate integer
/// operand.
///
-/// Returns a [8 x float] vector consisting of eight floats corresponding to
-/// the eight comparison results: zero if the comparison is false, and all
-/// 1's if the comparison is true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1842,8 +1850,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// two 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// If the result is true, all 64 bits of the destination vector are set;
-/// otherwise they are cleared.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -1900,8 +1909,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// vectors of [4 x float], using the operation specified by the immediate
/// integer operand.
///
-/// If the result is true, all 32 bits of the destination vector are set;
-/// otherwise they are cleared.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index f0c2db752195..e85bfc47aa5c 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -259,6 +259,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
/// result. The upper 64 bits of the result are copied from the upper
/// double-precision value of the first operand.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
@@ -278,9 +280,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
}
/// Performs element-by-element comparison of the two 128-bit vectors of
-/// [2 x double] and returns the vector containing the lesser of each pair of
+/// [2 x double] and returns a vector containing the lesser of each pair of
/// values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
@@ -301,6 +305,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
/// result. The upper 64 bits of the result are copied from the upper
/// double-precision value of the first operand.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
@@ -320,9 +326,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
}
/// Performs element-by-element comparison of the two 128-bit vectors of
-/// [2 x double] and returns the vector containing the greater of each pair
+/// [2 x double] and returns a vector containing the greater of each pair
/// of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
@@ -412,7 +420,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] for equality.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -432,7 +441,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -452,7 +462,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -472,7 +483,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -492,7 +504,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -512,8 +525,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
-/// A pair of double-precision values are "ordered" with respect to each
-/// other if neither value is a NaN. Each comparison yields 0x0 for false,
+/// A pair of double-precision values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
/// 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
@@ -534,8 +547,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
-/// A pair of double-precision values are "unordered" with respect to each
-/// other if one or both values are NaN. Each comparison yields 0x0 for
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
/// false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
@@ -557,7 +570,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unequal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -577,7 +591,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -597,7 +612,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -617,7 +633,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -637,7 +654,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -656,7 +674,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -680,7 +699,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -704,7 +724,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -728,7 +749,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -753,7 +775,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -775,11 +798,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
-/// the value in the first parameter is "ordered" with respect to the
+/// the value in the first parameter is ordered with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-/// of double-precision values are "ordered" with respect to each other if
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are ordered with respect to each other if
/// neither value is a NaN.
///
/// \headerfile <x86intrin.h>
@@ -801,11 +824,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
-/// the value in the first parameter is "unordered" with respect to the
+/// the value in the first parameter is unordered with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-/// of double-precision values are "unordered" with respect to each other if
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are unordered with respect to each other if
/// one or both values are NaN.
///
/// \headerfile <x86intrin.h>
@@ -831,7 +854,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -855,7 +879,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
/// the value in the first parameter is not less than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -879,7 +904,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
/// the value in the first parameter is not less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -903,7 +929,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
/// the value in the first parameter is not greater than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -928,7 +955,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
/// the value in the first parameter is not greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -951,8 +979,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -975,8 +1003,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -999,8 +1027,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1023,8 +1051,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1047,8 +1075,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1071,8 +1099,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -1093,8 +1121,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1117,8 +1145,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1141,8 +1169,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1165,8 +1193,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1189,8 +1217,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1213,8 +1241,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -3033,7 +3061,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
/// Compares each of the corresponding 8-bit values of the 128-bit
/// integer vectors for equality.
///
-/// Each comparison yields 0x0 for false, 0xFF for true.
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3052,7 +3080,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
/// Compares each of the corresponding 16-bit values of the 128-bit
/// integer vectors for equality.
///
-/// Each comparison yields 0x0 for false, 0xFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3071,7 +3099,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
/// Compares each of the corresponding 32-bit values of the 128-bit
/// integer vectors for equality.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3091,7 +3119,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
/// integer vectors to determine if the values in the first operand are
/// greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFF for true.
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3113,7 +3141,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3133,7 +3161,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3153,7 +3181,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
/// integer vectors to determine if the values in the first operand are less
/// than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFF for true.
+/// Each comparison returns 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3173,7 +3201,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3193,7 +3221,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -4777,7 +4805,9 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
/// 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -4811,7 +4841,9 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
/// two 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 5e703772b7ee..9fb6204f90c9 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -243,15 +243,6 @@ float3 ceil(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
float4 ceil(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double ceil(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double2 ceil(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double3 ceil(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
-double4 ceil(double4);
-
//===----------------------------------------------------------------------===//
// clamp builtins
//===----------------------------------------------------------------------===//
@@ -392,15 +383,6 @@ float3 cos(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
float4 cos(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double cos(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double2 cos(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double3 cos(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
-double4 cos(double4);
-
//===----------------------------------------------------------------------===//
// dot product builtins
//===----------------------------------------------------------------------===//
@@ -594,15 +576,6 @@ float3 floor(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
float4 floor(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double floor(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double2 floor(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double3 floor(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
-double4 floor(double4);
-
//===----------------------------------------------------------------------===//
// frac builtins
//===----------------------------------------------------------------------===//
@@ -737,15 +710,6 @@ float3 log(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
float4 log(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double log(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double2 log(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double3 log(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
-double4 log(double4);
-
//===----------------------------------------------------------------------===//
// log10 builtins
//===----------------------------------------------------------------------===//
@@ -779,15 +743,6 @@ float3 log10(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
float4 log10(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double log10(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double2 log10(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double3 log10(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
-double4 log10(double4);
-
//===----------------------------------------------------------------------===//
// log2 builtins
//===----------------------------------------------------------------------===//
@@ -821,15 +776,6 @@ float3 log2(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
float4 log2(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double log2(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double2 log2(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double3 log2(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
-double4 log2(double4);
-
//===----------------------------------------------------------------------===//
// mad builtins
//===----------------------------------------------------------------------===//
@@ -1174,15 +1120,6 @@ float3 pow(float3, float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
float4 pow(float4, float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double pow(double, double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double2 pow(double2, double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double3 pow(double3, double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
-double4 pow(double4, double4);
-
//===----------------------------------------------------------------------===//
// reversebits builtins
//===----------------------------------------------------------------------===//
@@ -1194,19 +1131,6 @@ double4 pow(double4, double4);
#ifdef __HLSL_ENABLE_16_BIT
_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t reversebits(int16_t);
-_HLSL_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t2 reversebits(int16_t2);
-_HLSL_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t3 reversebits(int16_t3);
-_HLSL_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int16_t4 reversebits(int16_t4);
-
-_HLSL_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint16_t reversebits(uint16_t);
_HLSL_AVAILABILITY(shadermodel, 6.2)
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
@@ -1220,15 +1144,6 @@ uint16_t4 reversebits(uint16_t4);
#endif
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int reversebits(int);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int2 reversebits(int2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int3 reversebits(int3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int4 reversebits(int4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint reversebits(uint);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint2 reversebits(uint2);
@@ -1238,15 +1153,6 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint4 reversebits(uint4);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t reversebits(int64_t);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t2 reversebits(int64_t2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t3 reversebits(int64_t3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
-int64_t4 reversebits(int64_t4);
-
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t reversebits(uint64_t);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
uint64_t2 reversebits(uint64_t2);
@@ -1342,25 +1248,25 @@ float4 rsqrt(float4);
/// rounded to the nearest even value.
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
half round(half);
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
half2 round(half2);
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
half3 round(half3);
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
half4 round(half4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
float round(float);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
float2 round(float2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
float3 round(float3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_round)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_roundeven)
float4 round(float4);
//===----------------------------------------------------------------------===//
@@ -1393,15 +1299,6 @@ float3 sin(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
float4 sin(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double sin(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double2 sin(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double3 sin(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
-double4 sin(double4);
-
//===----------------------------------------------------------------------===//
// sqrt builtins
//===----------------------------------------------------------------------===//
@@ -1411,14 +1308,26 @@ double4 sin(double4);
/// \param Val The input value.
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
-_HLSL_BUILTIN_ALIAS(__builtin_sqrtf16)
-half sqrt(half In);
-
-_HLSL_BUILTIN_ALIAS(__builtin_sqrtf)
-float sqrt(float In);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half sqrt(half);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half2 sqrt(half2);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half3 sqrt(half3);
+_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+half4 sqrt(half4);
-_HLSL_BUILTIN_ALIAS(__builtin_sqrt)
-double sqrt(double In);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float sqrt(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float2 sqrt(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float3 sqrt(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt)
+float4 sqrt(float4);
//===----------------------------------------------------------------------===//
// trunc builtins
@@ -1450,15 +1359,6 @@ float3 trunc(float3);
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
float4 trunc(float4);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double trunc(double);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double2 trunc(double2);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double3 trunc(double3);
-_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
-double4 trunc(double4);
-
//===----------------------------------------------------------------------===//
// Wave* builtins
//===----------------------------------------------------------------------===//
@@ -1471,7 +1371,12 @@ double4 trunc(double4);
/// true, across all active lanes in the current wave.
_HLSL_AVAILABILITY(shadermodel, 6.0)
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_count_bits)
-uint WaveActiveCountBits(bool Val);
+__attribute__((convergent)) uint WaveActiveCountBits(bool Val);
+
+/// \brief Returns the index of the current lane within the current wave.
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_get_lane_index)
+__attribute__((convergent)) uint WaveGetLaneIndex();
} // namespace hlsl
#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h
index 962d24738e7a..4e154e2d8593 100644
--- a/clang/lib/Headers/mmintrin.h
+++ b/clang/lib/Headers/mmintrin.h
@@ -1141,7 +1141,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
/// [8 x i8] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFF for true.
+/// Each comparison returns 0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1163,7 +1163,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
/// [4 x i16] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1185,7 +1185,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
/// [2 x i32] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1207,7 +1207,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
/// [8 x i8] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFF for true.
+/// Each comparison returns 0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1229,7 +1229,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
/// [4 x i16] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1251,7 +1251,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
/// [2 x i32] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
-/// The comparison yields 0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 9fb9cc9b0134..b3fec474e35a 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1188,7 +1188,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
/// Compares each of the corresponding 64-bit values of the 128-bit
/// integer vectors for equality.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -2303,7 +2303,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
/// integer vectors to determine if the values in the first operand are
/// greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index b2c68c3b7be9..1ef89de9c9f5 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -316,6 +316,8 @@ _mm_rsqrt_ps(__m128 __a)
/// operands and returns the lesser value in the low-order bits of the
/// vector of [4 x float].
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINSS / MINSS </c> instructions.
@@ -338,6 +340,8 @@ _mm_min_ss(__m128 __a, __m128 __b)
/// Compares two 128-bit vectors of [4 x float] and returns the lesser
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMINPS / MINPS </c> instructions.
@@ -358,6 +362,8 @@ _mm_min_ps(__m128 __a, __m128 __b)
/// operands and returns the greater value in the low-order bits of a 128-bit
/// vector of [4 x float].
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXSS / MAXSS </c> instructions.
@@ -380,6 +386,8 @@ _mm_max_ss(__m128 __a, __m128 __b)
/// Compares two 128-bit vectors of [4 x float] and returns the greater
/// of each pair of values.
///
+/// If either value in a comparison is NaN, returns the value from \a __b.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VMAXPS / MAXPS </c> instructions.
@@ -476,8 +484,9 @@ _mm_xor_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands for equality.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -500,7 +509,8 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b)
/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for equality.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -521,8 +531,9 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is less than the
/// corresponding value in the second operand.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -546,7 +557,8 @@ _mm_cmplt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -567,8 +579,9 @@ _mm_cmplt_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is less than or
/// equal to the corresponding value in the second operand.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in
+/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in
/// the low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -592,7 +605,8 @@ _mm_cmple_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -613,8 +627,9 @@ _mm_cmple_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is greater than
/// the corresponding value in the second operand.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -640,7 +655,8 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -661,8 +677,9 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is greater than
/// or equal to the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -688,7 +705,8 @@ _mm_cmpge_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns false.
///
/// \headerfile <x86intrin.h>
///
@@ -708,8 +726,9 @@ _mm_cmpge_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both operands
/// for inequality.
///
-/// The comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -733,7 +752,8 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b)
/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for inequality.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -755,8 +775,9 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is not less than
/// the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -781,7 +802,8 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -803,8 +825,9 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is not less than
/// or equal to the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -829,7 +852,8 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -851,8 +875,9 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is not greater
/// than the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -879,7 +904,8 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -901,8 +927,9 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is not greater
/// than or equal to the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the
/// low-order bits of a vector of [4 x float].
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -929,7 +956,8 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, returns true.
///
/// \headerfile <x86intrin.h>
///
@@ -951,8 +979,9 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is ordered with
/// respect to the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
-/// low-order bits of a vector of [4 x float].
+/// A pair of floating-point values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
+/// 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -977,7 +1006,9 @@ _mm_cmpord_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// A pair of floating-point values are ordered with respect to each
+/// other if neither value is a NaN. Each comparison returns 0x0 for false,
+/// 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -999,8 +1030,9 @@ _mm_cmpord_ps(__m128 __a, __m128 __b)
/// operands to determine if the value in the first operand is unordered
/// with respect to the corresponding value in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true, in the
-/// low-order bits of a vector of [4 x float].
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
+/// false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1025,7 +1057,9 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b)
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// A pair of double-precision values are unordered with respect to each
+/// other if one or both values are NaN. Each comparison returns 0x0 for
+/// false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -1046,8 +1080,8 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
/// Compares two 32-bit float values in the low-order bits of both
/// operands for equality.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1071,8 +1105,8 @@ _mm_comieq_ss(__m128 __a, __m128 __b)
/// operands to determine if the first operand is less than the second
/// operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1096,8 +1130,8 @@ _mm_comilt_ss(__m128 __a, __m128 __b)
/// operands to determine if the first operand is less than or equal to the
/// second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1120,8 +1154,8 @@ _mm_comile_ss(__m128 __a, __m128 __b)
/// operands to determine if the first operand is greater than the second
/// operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1144,8 +1178,8 @@ _mm_comigt_ss(__m128 __a, __m128 __b)
/// operands to determine if the first operand is greater than or equal to
/// the second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1168,8 +1202,8 @@ _mm_comige_ss(__m128 __a, __m128 __b)
/// operands to determine if the first operand is not equal to the second
/// operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 1.
///
/// \headerfile <x86intrin.h>
///
@@ -1191,8 +1225,8 @@ _mm_comineq_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine equality.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1215,8 +1249,8 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b)
/// the low-order bits of both operands to determine if the first operand is
/// less than the second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1239,8 +1273,8 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b)
/// the low-order bits of both operands to determine if the first operand is
/// less than or equal to the second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1263,8 +1297,8 @@ _mm_ucomile_ss(__m128 __a, __m128 __b)
/// the low-order bits of both operands to determine if the first operand is
/// greater than the second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1287,8 +1321,8 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b)
/// the low-order bits of both operands to determine if the first operand is
/// greater than or equal to the second operand.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -1310,8 +1344,8 @@ _mm_ucomige_ss(__m128 __a, __m128 __b)
/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine inequality.
///
-/// The comparison returns 0 for false, 1 for true. If either of the two
-/// lower floating-point values is NaN, returns 0.
+/// The comparison returns 0 for false, 1 for true. If either value in a
+/// comparison is NaN, returns 0.
///
/// \headerfile <x86intrin.h>
///
@@ -3027,7 +3061,9 @@ _mm_movemask_ps(__m128 __a)
/// [4 x float], using the operation specified by the immediate integer
/// operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
@@ -3060,7 +3096,9 @@ _mm_movemask_ps(__m128 __a)
/// vectors of [4 x float], using the operation specified by the immediate
/// integer operand.
///
-/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
+/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true.
+/// If either value in a comparison is NaN, comparisons that are ordered
+/// return false, and comparisons that are unordered return true.
///
/// \headerfile <x86intrin.h>
///
diff --git a/clang/lib/InstallAPI/CMakeLists.txt b/clang/lib/InstallAPI/CMakeLists.txt
index 894db699578f..e0bc8d969ecb 100644
--- a/clang/lib/InstallAPI/CMakeLists.txt
+++ b/clang/lib/InstallAPI/CMakeLists.txt
@@ -1,6 +1,7 @@
set(LLVM_LINK_COMPONENTS
Support
TextAPI
+ TextAPIBinaryReader
Demangle
Core
)
diff --git a/clang/lib/InstallAPI/DylibVerifier.cpp b/clang/lib/InstallAPI/DylibVerifier.cpp
index 24e0d0addf2f..c0eda1d81b9b 100644
--- a/clang/lib/InstallAPI/DylibVerifier.cpp
+++ b/clang/lib/InstallAPI/DylibVerifier.cpp
@@ -1,7 +1,16 @@
+//===- DylibVerifier.cpp ----------------------------------------*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
#include "clang/InstallAPI/DylibVerifier.h"
#include "clang/InstallAPI/FrontendRecords.h"
#include "clang/InstallAPI/InstallAPIDiagnostic.h"
#include "llvm/Demangle/Demangle.h"
+#include "llvm/TextAPI/DylibReader.h"
using namespace llvm::MachO;
@@ -27,6 +36,14 @@ struct DylibVerifier::SymbolContext {
bool Inlined = false;
};
+struct DylibVerifier::DWARFContext {
+ // Track whether DSYM parsing has already been attempted to avoid re-parsing.
+ bool ParsedDSYM{false};
+
+ // Lookup table for source locations by symbol name.
+ DylibReader::SymbolToSourceLocMap SourceLocs{};
+};
+
static bool isCppMangled(StringRef Name) {
// InstallAPI currently only supports itanium manglings.
return (Name.starts_with("_Z") || Name.starts_with("__Z") ||
@@ -66,17 +83,15 @@ std::string DylibVerifier::getAnnotatedName(const Record *R,
Annotation += "(tlv) ";
// Check if symbol represents only part of a @interface declaration.
- const bool IsAnnotatedObjCClass =
- ((SymCtx.ObjCIFKind != ObjCIFSymbolKind::None) &&
- (SymCtx.ObjCIFKind <= ObjCIFSymbolKind::EHType));
-
- if (IsAnnotatedObjCClass) {
- if (SymCtx.ObjCIFKind == ObjCIFSymbolKind::EHType)
- Annotation += "Exception Type of ";
- if (SymCtx.ObjCIFKind == ObjCIFSymbolKind::MetaClass)
- Annotation += "Metaclass of ";
- if (SymCtx.ObjCIFKind == ObjCIFSymbolKind::Class)
- Annotation += "Class of ";
+ switch (SymCtx.ObjCIFKind) {
+ default:
+ break;
+ case ObjCIFSymbolKind::EHType:
+ return Annotation + "Exception Type of " + PrettyName;
+ case ObjCIFSymbolKind::MetaClass:
+ return Annotation + "Metaclass of " + PrettyName;
+ case ObjCIFSymbolKind::Class:
+ return Annotation + "Class of " + PrettyName;
}
// Only print symbol type prefix or leading "_" if there is no source location
@@ -90,9 +105,6 @@ std::string DylibVerifier::getAnnotatedName(const Record *R,
return Annotation + PrettyName;
}
- if (IsAnnotatedObjCClass)
- return Annotation + PrettyName;
-
switch (SymCtx.Kind) {
case EncodeKind::GlobalSymbol:
return Annotation + PrettyName;
@@ -332,9 +344,9 @@ bool DylibVerifier::compareSymbolFlags(const Record *R, SymbolContext &SymCtx,
}
if (!DR->isThreadLocalValue() && R->isThreadLocalValue()) {
Ctx.emitDiag([&]() {
- SymCtx.FA->D->getLocation(),
- Ctx.Diag->Report(diag::err_header_symbol_flags_mismatch)
- << getAnnotatedName(DR, SymCtx) << R->isThreadLocalValue();
+ Ctx.Diag->Report(SymCtx.FA->D->getLocation(),
+ diag::err_header_symbol_flags_mismatch)
+ << getAnnotatedName(R, SymCtx) << R->isThreadLocalValue();
});
return false;
}
@@ -508,17 +520,187 @@ DylibVerifier::Result DylibVerifier::verify(GlobalRecord *R,
return verifyImpl(R, SymCtx);
}
-void DylibVerifier::VerifierContext::emitDiag(
- llvm::function_ref<void()> Report) {
+void DylibVerifier::VerifierContext::emitDiag(llvm::function_ref<void()> Report,
+ RecordLoc *Loc) {
if (!DiscoveredFirstError) {
Diag->Report(diag::warn_target)
<< (PrintArch ? getArchitectureName(Target.Arch)
: getTargetTripleName(Target));
DiscoveredFirstError = true;
}
+ if (Loc && Loc->isValid())
+ llvm::errs() << Loc->File << ":" << Loc->Line << ":" << 0 << ": ";
Report();
}
+// The existence of weak-defined RTTI can not always be inferred from the
+// header files because they can be generated as part of an implementation
+// file.
+// InstallAPI doesn't warn about weak-defined RTTI, because this doesn't affect
+// static linking and so can be ignored for text-api files.
+static bool shouldIgnoreCpp(StringRef Name, bool IsWeakDef) {
+ return (IsWeakDef &&
+ (Name.starts_with("__ZTI") || Name.starts_with("__ZTS")));
+}
+void DylibVerifier::visitSymbolInDylib(const Record &R, SymbolContext &SymCtx) {
+ // Undefined symbols should not be in InstallAPI generated text-api files.
+ if (R.isUndefined()) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ // Internal symbols should not be in InstallAPI generated text-api files.
+ if (R.isInternal()) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ // Allow zippered symbols with potentially mismatching availability
+ // between macOS and macCatalyst in the final text-api file.
+ const StringRef SymbolName(SymCtx.SymbolName);
+ if (const Symbol *Sym = Exports->findSymbol(SymCtx.Kind, SymCtx.SymbolName,
+ SymCtx.ObjCIFKind)) {
+ if (Sym->hasArchitecture(Ctx.Target.Arch)) {
+ updateState(Result::Ignore);
+ return;
+ }
+ }
+
+ if (shouldIgnoreCpp(SymbolName, R.isWeakDefined())) {
+ updateState(Result::Valid);
+ return;
+ }
+
+ const bool IsLinkerSymbol = SymbolName.starts_with("$ld$");
+
+ // All checks at this point classify as some kind of violation.
+ // The different verification modes dictate whether they are reported to the
+ // user.
+ if (IsLinkerSymbol || (Mode > VerificationMode::ErrorsOnly))
+ accumulateSrcLocForDylibSymbols();
+ RecordLoc Loc = DWARFCtx->SourceLocs.lookup(SymCtx.SymbolName);
+
+ // Regardless of verification mode, error out on mismatched special linker
+ // symbols.
+ if (IsLinkerSymbol) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::err_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Invalid);
+ return;
+ }
+
+ // Missing declarations for exported symbols are hard errors on Pedantic mode.
+ if (Mode == VerificationMode::Pedantic) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::err_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Invalid);
+ return;
+ }
+
+ // Missing declarations for exported symbols are warnings on ErrorsAndWarnings
+ // mode.
+ if (Mode == VerificationMode::ErrorsAndWarnings) {
+ Ctx.emitDiag(
+ [&]() {
+ Ctx.Diag->Report(diag::warn_header_symbol_missing)
+ << getAnnotatedName(&R, SymCtx, Loc.isValid());
+ },
+ &Loc);
+ updateState(Result::Ignore);
+ return;
+ }
+
+ // Missing declarations are dropped for ErrorsOnly mode. It is the last
+ // remaining mode.
+ updateState(Result::Ignore);
+ return;
+}
+
+void DylibVerifier::visitGlobal(const GlobalRecord &R) {
+ if (R.isVerified())
+ return;
+ SymbolContext SymCtx;
+ SimpleSymbol Sym = parseSymbol(R.getName());
+ SymCtx.SymbolName = Sym.Name;
+ SymCtx.Kind = Sym.Kind;
+ visitSymbolInDylib(R, SymCtx);
+}
+
+void DylibVerifier::visitObjCIVar(const ObjCIVarRecord &R,
+ const StringRef Super) {
+ if (R.isVerified())
+ return;
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = ObjCIVarRecord::createScopedName(Super, R.getName());
+ SymCtx.Kind = EncodeKind::ObjectiveCInstanceVariable;
+ visitSymbolInDylib(R, SymCtx);
+}
+
+void DylibVerifier::accumulateSrcLocForDylibSymbols() {
+ if (DSYMPath.empty())
+ return;
+
+ assert(DWARFCtx != nullptr && "Expected an initialized DWARFContext");
+ if (DWARFCtx->ParsedDSYM)
+ return;
+ DWARFCtx->ParsedDSYM = true;
+ DWARFCtx->SourceLocs =
+ DylibReader::accumulateSourceLocFromDSYM(DSYMPath, Ctx.Target);
+}
+
+void DylibVerifier::visitObjCInterface(const ObjCInterfaceRecord &R) {
+ if (R.isVerified())
+ return;
+ SymbolContext SymCtx;
+ SymCtx.SymbolName = R.getName();
+ SymCtx.ObjCIFKind = assignObjCIFSymbolKind(&R);
+ if (SymCtx.ObjCIFKind > ObjCIFSymbolKind::EHType) {
+ if (R.hasExceptionAttribute()) {
+ SymCtx.Kind = EncodeKind::ObjectiveCClassEHType;
+ visitSymbolInDylib(R, SymCtx);
+ }
+ SymCtx.Kind = EncodeKind::ObjectiveCClass;
+ visitSymbolInDylib(R, SymCtx);
+ } else {
+ SymCtx.Kind = R.hasExceptionAttribute() ? EncodeKind::ObjectiveCClassEHType
+ : EncodeKind::ObjectiveCClass;
+ visitSymbolInDylib(R, SymCtx);
+ }
+
+ for (const ObjCIVarRecord *IV : R.getObjCIVars())
+ visitObjCIVar(*IV, R.getName());
+}
+
+void DylibVerifier::visitObjCCategory(const ObjCCategoryRecord &R) {
+ for (const ObjCIVarRecord *IV : R.getObjCIVars())
+ visitObjCIVar(*IV, R.getSuperClassName());
+}
+
+DylibVerifier::Result DylibVerifier::verifyRemainingSymbols() {
+ if (getState() == Result::NoVerify)
+ return Result::NoVerify;
+ assert(!Dylib.empty() && "No binary to verify against");
+
+ DWARFContext DWARFInfo;
+ DWARFCtx = &DWARFInfo;
+ Ctx.DiscoveredFirstError = false;
+ Ctx.PrintArch = true;
+ for (std::shared_ptr<RecordsSlice> Slice : Dylib) {
+ Ctx.Target = Slice->getTarget();
+ Ctx.DylibSlice = Slice.get();
+ Slice->visit(*this);
+ }
+ return getState();
+}
+
} // namespace installapi
} // namespace clang
diff --git a/clang/lib/InstallAPI/Frontend.cpp b/clang/lib/InstallAPI/Frontend.cpp
index 12cd5fcbc22b..e07ccb14e0b8 100644
--- a/clang/lib/InstallAPI/Frontend.cpp
+++ b/clang/lib/InstallAPI/Frontend.cpp
@@ -138,6 +138,8 @@ std::unique_ptr<MemoryBuffer> createInputBuffer(InstallAPIContext &Ctx) {
SmallString<4096> Contents;
raw_svector_ostream OS(Contents);
for (const HeaderFile &H : Ctx.InputHeaders) {
+ if (H.isExcluded())
+ continue;
if (H.getType() != Ctx.Type)
continue;
if (Ctx.LangMode == Language::C || Ctx.LangMode == Language::CXX)
diff --git a/clang/lib/InstallAPI/HeaderFile.cpp b/clang/lib/InstallAPI/HeaderFile.cpp
index c2d8372741ee..0b7041ec8147 100644
--- a/clang/lib/InstallAPI/HeaderFile.cpp
+++ b/clang/lib/InstallAPI/HeaderFile.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/InstallAPI/HeaderFile.h"
+#include "llvm/TextAPI/Utils.h"
using namespace llvm;
namespace clang::installapi {
@@ -34,4 +35,54 @@ std::optional<std::string> createIncludeHeaderName(const StringRef FullPath) {
return Matches[1].drop_front(Matches[1].rfind('/') + 1).str() + "/" +
Matches[3].str();
}
+
+bool isHeaderFile(StringRef Path) {
+ return StringSwitch<bool>(sys::path::extension(Path))
+ .Cases(".h", ".H", ".hh", ".hpp", ".hxx", true)
+ .Default(false);
+}
+
+llvm::Expected<PathSeq> enumerateFiles(FileManager &FM, StringRef Directory) {
+ PathSeq Files;
+ std::error_code EC;
+ auto &FS = FM.getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator i(FS, Directory, EC), ie;
+ i != ie; i.increment(EC)) {
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Skip files that do not exist. This usually happens for broken symlinks.
+ if (FS.status(i->path()) == std::errc::no_such_file_or_directory)
+ continue;
+
+ StringRef Path = i->path();
+ if (isHeaderFile(Path))
+ Files.emplace_back(Path);
+ }
+
+ return Files;
+}
+
+HeaderGlob::HeaderGlob(StringRef GlobString, Regex &&Rule, HeaderType Type)
+ : GlobString(GlobString), Rule(std::move(Rule)), Type(Type) {}
+
+bool HeaderGlob::match(const HeaderFile &Header) {
+ if (Header.getType() != Type)
+ return false;
+
+ bool Match = Rule.match(Header.getPath());
+ if (Match)
+ FoundMatch = true;
+ return Match;
+}
+
+Expected<std::unique_ptr<HeaderGlob>> HeaderGlob::create(StringRef GlobString,
+ HeaderType Type) {
+ auto Rule = MachO::createRegexFromGlob(GlobString);
+ if (!Rule)
+ return Rule.takeError();
+
+ return std::make_unique<HeaderGlob>(GlobString, std::move(*Rule), Type);
+}
+
} // namespace clang::installapi
diff --git a/clang/lib/InstallAPI/Visitor.cpp b/clang/lib/InstallAPI/Visitor.cpp
index 452c8f2fb1e4..6476c5107cb5 100644
--- a/clang/lib/InstallAPI/Visitor.cpp
+++ b/clang/lib/InstallAPI/Visitor.cpp
@@ -205,9 +205,10 @@ bool InstallAPIVisitor::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
const ObjCInterfaceDecl *InterfaceD = D->getClassInterface();
const StringRef InterfaceName = InterfaceD->getName();
- auto [Category, FA] = Ctx.Slice->addObjCCategory(InterfaceName, CategoryName,
- Avail, D, *Access);
- recordObjCInstanceVariables(D->getASTContext(), Category, InterfaceName,
+ std::pair<ObjCCategoryRecord *, FrontendAttrs *> Category =
+ Ctx.Slice->addObjCCategory(InterfaceName, CategoryName, Avail, D,
+ *Access);
+ recordObjCInstanceVariables(D->getASTContext(), Category.first, InterfaceName,
D->ivars());
return true;
}
@@ -254,7 +255,7 @@ bool InstallAPIVisitor::VisitFunctionDecl(const FunctionDecl *D) {
return true;
// Skip methods in CXX RecordDecls.
- for (auto P : D->getASTContext().getParents(*M)) {
+ for (const DynTypedNode &P : D->getASTContext().getParents(*M)) {
if (P.get<CXXRecordDecl>())
return true;
}
diff --git a/clang/lib/Interpreter/IncrementalExecutor.cpp b/clang/lib/Interpreter/IncrementalExecutor.cpp
index 40bcef94797d..6f036107c14a 100644
--- a/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -20,6 +20,7 @@
#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupport.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
@@ -36,26 +37,28 @@ LLVM_ATTRIBUTE_USED void linkComponents() {
namespace clang {
+llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+IncrementalExecutor::createDefaultJITBuilder(
+ llvm::orc::JITTargetMachineBuilder JTMB) {
+ auto JITBuilder = std::make_unique<llvm::orc::LLJITBuilder>();
+ JITBuilder->setJITTargetMachineBuilder(std::move(JTMB));
+ JITBuilder->setPrePlatformSetup([](llvm::orc::LLJIT &J) {
+ // Try to enable debugging of JIT'd code (only works with JITLink for
+ // ELF and MachO).
+ consumeError(llvm::orc::enableDebuggerSupport(J));
+ return llvm::Error::success();
+ });
+ return std::move(JITBuilder);
+}
+
IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
- llvm::Error &Err,
- const clang::TargetInfo &TI)
+ llvm::orc::LLJITBuilder &JITBuilder,
+ llvm::Error &Err)
: TSCtx(TSC) {
using namespace llvm::orc;
llvm::ErrorAsOutParameter EAO(&Err);
- auto JTMB = JITTargetMachineBuilder(TI.getTriple());
- JTMB.addFeatures(TI.getTargetOpts().Features);
- LLJITBuilder Builder;
- Builder.setJITTargetMachineBuilder(JTMB);
- Builder.setPrePlatformSetup(
- [](LLJIT &J) {
- // Try to enable debugging of JIT'd code (only works with JITLink for
- // ELF and MachO).
- consumeError(enableDebuggerSupport(J));
- return llvm::Error::success();
- });
-
- if (auto JitOrErr = Builder.create())
+ if (auto JitOrErr = JITBuilder.create())
Jit = std::move(*JitOrErr);
else {
Err = JitOrErr.takeError();
diff --git a/clang/lib/Interpreter/IncrementalExecutor.h b/clang/lib/Interpreter/IncrementalExecutor.h
index dd0a210a0614..b4347209e14f 100644
--- a/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/clang/lib/Interpreter/IncrementalExecutor.h
@@ -23,7 +23,9 @@
namespace llvm {
class Error;
namespace orc {
+class JITTargetMachineBuilder;
class LLJIT;
+class LLJITBuilder;
class ThreadSafeContext;
} // namespace orc
} // namespace llvm
@@ -44,8 +46,8 @@ class IncrementalExecutor {
public:
enum SymbolNameKind { IRName, LinkerName };
- IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, llvm::Error &Err,
- const clang::TargetInfo &TI);
+ IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
+ llvm::orc::LLJITBuilder &JITBuilder, llvm::Error &Err);
~IncrementalExecutor();
llvm::Error addModule(PartialTranslationUnit &PTU);
@@ -56,6 +58,9 @@ public:
getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const;
llvm::orc::LLJIT &GetExecutionEngine() { return *Jit; }
+
+ static llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+ createDefaultJITBuilder(llvm::orc::JITTargetMachineBuilder JTMB);
};
} // end namespace clang
diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp
index 370bcbfee8b0..5eec2a2fd6d1 100644
--- a/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/clang/lib/Interpreter/IncrementalParser.cpp
@@ -375,16 +375,22 @@ void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
TranslationUnitDecl *MostRecentTU = PTU.TUPart;
TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
if (StoredDeclsMap *Map = FirstTU->getPrimaryContext()->getLookupPtr()) {
- for (auto I = Map->begin(); I != Map->end(); ++I) {
- StoredDeclsList &List = I->second;
+ for (auto &&[Key, List] : *Map) {
DeclContextLookupResult R = List.getLookupResult();
+ std::vector<NamedDecl *> NamedDeclsToRemove;
+ bool RemoveAll = true;
for (NamedDecl *D : R) {
- if (D->getTranslationUnitDecl() == MostRecentTU) {
+ if (D->getTranslationUnitDecl() == MostRecentTU)
+ NamedDeclsToRemove.push_back(D);
+ else
+ RemoveAll = false;
+ }
+ if (LLVM_LIKELY(RemoveAll)) {
+ Map->erase(Key);
+ } else {
+ for (NamedDecl *D : NamedDeclsToRemove)
List.remove(D);
- }
}
- if (List.isNull())
- Map->erase(I);
}
}
}
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 7fa52f2f15fc..cf31456b6950 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -372,15 +372,35 @@ Interpreter::Parse(llvm::StringRef Code) {
return IncrParser->Parse(Code);
}
+static llvm::Expected<llvm::orc::JITTargetMachineBuilder>
+createJITTargetMachineBuilder(const std::string &TT) {
+ if (TT == llvm::sys::getProcessTriple())
+ // This fails immediately if the target backend is not registered
+ return llvm::orc::JITTargetMachineBuilder::detectHost();
+
+ // If the target backend is not registered, LLJITBuilder::create() will fail
+ return llvm::orc::JITTargetMachineBuilder(llvm::Triple(TT));
+}
+
+llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+Interpreter::CreateJITBuilder(CompilerInstance &CI) {
+ auto JTMB = createJITTargetMachineBuilder(CI.getTargetOpts().Triple);
+ if (!JTMB)
+ return JTMB.takeError();
+ return IncrementalExecutor::createDefaultJITBuilder(std::move(*JTMB));
+}
+
llvm::Error Interpreter::CreateExecutor() {
- const clang::TargetInfo &TI =
- getCompilerInstance()->getASTContext().getTargetInfo();
if (IncrExecutor)
return llvm::make_error<llvm::StringError>("Operation failed. "
"Execution engine exists",
std::error_code());
+ llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>> JB =
+ CreateJITBuilder(*getCompilerInstance());
+ if (!JB)
+ return JB.takeError();
llvm::Error Err = llvm::Error::success();
- auto Executor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
+ auto Executor = std::make_unique<IncrementalExecutor>(*TSCtx, **JB, Err);
if (!Err)
IncrExecutor = std::move(Executor);
diff --git a/clang/lib/Interpreter/Value.cpp b/clang/lib/Interpreter/Value.cpp
index 1d6b2da087e9..eb2ce9c9fd33 100644
--- a/clang/lib/Interpreter/Value.cpp
+++ b/clang/lib/Interpreter/Value.cpp
@@ -1,4 +1,4 @@
-//===--- Interpreter.h - Incremental Compiation and Execution---*- C++ -*-===//
+//===------------ Value.cpp - Definition of interpreter value -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -22,8 +22,6 @@
#include <cstdint>
#include <utility>
-using namespace clang;
-
namespace {
// This is internal buffer maintained by Value, used to hold temporaries.
@@ -61,7 +59,7 @@ public:
void Release() {
assert(RefCnt > 0 && "Can't release if reference count is already zero");
if (--RefCnt == 0) {
- // We hace a non-trivial dtor.
+ // We have a non-trivial dtor.
if (Dtor && IsAlive()) {
assert(Elements && "We at least should have 1 element in Value");
size_t Stride = AllocSize / Elements;
@@ -97,6 +95,8 @@ private:
};
} // namespace
+namespace clang {
+
static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
if (Ctx.hasSameType(QT, Ctx.VoidTy))
return Value::K_Void;
@@ -265,3 +265,5 @@ void Value::print(llvm::raw_ostream &Out) const {
assert(OpaqueType != nullptr && "Can't print default Value");
Out << "Not implement yet.\n";
}
+
+} // namespace clang
diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp
index 10c475f617d4..eed7eca2e735 100644
--- a/clang/lib/Lex/ModuleMap.cpp
+++ b/clang/lib/Lex/ModuleMap.cpp
@@ -648,8 +648,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File) {
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
- OptionalFileEntryRef UmbrellaModuleMap =
- getModuleMapFileForUniquing(UmbrellaModule);
+ FileID UmbrellaModuleMap = getModuleMapFileIDForUniquing(UmbrellaModule);
// Infer submodules for each of the directories we found between
// the directory of the umbrella header and the directory where
@@ -1021,7 +1020,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
// If the framework has a parent path from which we're allowed to infer
// a framework module, do so.
- OptionalFileEntryRef ModuleMapFile;
+ FileID ModuleMapFID;
if (!Parent) {
// Determine whether we're allowed to infer a module map.
bool canInfer = false;
@@ -1060,7 +1059,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Attrs.IsExhaustive |= inferred->second.Attrs.IsExhaustive;
Attrs.NoUndeclaredIncludes |=
inferred->second.Attrs.NoUndeclaredIncludes;
- ModuleMapFile = inferred->second.ModuleMapFile;
+ ModuleMapFID = inferred->second.ModuleMapFID;
}
}
}
@@ -1069,7 +1068,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
if (!canInfer)
return nullptr;
} else {
- ModuleMapFile = getModuleMapFileForUniquing(Parent);
+ ModuleMapFID = getModuleMapFileIDForUniquing(Parent);
}
// Look for an umbrella header.
@@ -1086,7 +1085,7 @@ Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Module *Result = new Module(ModuleName, SourceLocation(), Parent,
/*IsFramework=*/true, /*IsExplicit=*/false,
NumCreatedModules++);
- InferredModuleAllowedBy[Result] = ModuleMapFile;
+ InferredModuleAllowedBy[Result] = ModuleMapFID;
Result->IsInferred = true;
if (!Parent) {
if (LangOpts.CurrentModule == ModuleName)
@@ -1307,28 +1306,34 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
Cb->moduleMapAddHeader(Header.Entry.getName());
}
-OptionalFileEntryRef
-ModuleMap::getContainingModuleMapFile(const Module *Module) const {
+FileID ModuleMap::getContainingModuleMapFileID(const Module *Module) const {
if (Module->DefinitionLoc.isInvalid())
- return std::nullopt;
+ return {};
- return SourceMgr.getFileEntryRefForID(
- SourceMgr.getFileID(Module->DefinitionLoc));
+ return SourceMgr.getFileID(Module->DefinitionLoc);
}
OptionalFileEntryRef
-ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ModuleMap::getContainingModuleMapFile(const Module *Module) const {
+ return SourceMgr.getFileEntryRefForID(getContainingModuleMapFileID(Module));
+}
+
+FileID ModuleMap::getModuleMapFileIDForUniquing(const Module *M) const {
if (M->IsInferred) {
assert(InferredModuleAllowedBy.count(M) && "missing inferred module map");
return InferredModuleAllowedBy.find(M)->second;
}
- return getContainingModuleMapFile(M);
+ return getContainingModuleMapFileID(M);
+}
+
+OptionalFileEntryRef
+ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+ return SourceMgr.getFileEntryRefForID(getModuleMapFileIDForUniquing(M));
}
-void ModuleMap::setInferredModuleAllowedBy(Module *M,
- OptionalFileEntryRef ModMap) {
+void ModuleMap::setInferredModuleAllowedBy(Module *M, FileID ModMapFID) {
assert(M->IsInferred && "module not inferred");
- InferredModuleAllowedBy[M] = ModMap;
+ InferredModuleAllowedBy[M] = ModMapFID;
}
std::error_code
@@ -1517,7 +1522,7 @@ namespace clang {
ModuleMap &Map;
/// The current module map file.
- FileEntryRef ModuleMapFile;
+ FileID ModuleMapFID;
/// Source location of most recent parsed module declaration
SourceLocation CurrModuleDeclLoc;
@@ -1585,13 +1590,12 @@ namespace clang {
bool parseOptionalAttributes(Attributes &Attrs);
public:
- explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
- const TargetInfo *Target, DiagnosticsEngine &Diags,
- ModuleMap &Map, FileEntryRef ModuleMapFile,
- DirectoryEntryRef Directory, bool IsSystem)
+ ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ const TargetInfo *Target, DiagnosticsEngine &Diags,
+ ModuleMap &Map, FileID ModuleMapFID,
+ DirectoryEntryRef Directory, bool IsSystem)
: L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
- ModuleMapFile(ModuleMapFile), Directory(Directory),
- IsSystem(IsSystem) {
+ ModuleMapFID(ModuleMapFID), Directory(Directory), IsSystem(IsSystem) {
Tok.clear();
consumeToken();
}
@@ -2011,11 +2015,13 @@ void ModuleMapParser::parseModuleDecl() {
}
if (TopLevelModule &&
- ModuleMapFile != Map.getContainingModuleMapFile(TopLevelModule)) {
- assert(ModuleMapFile != Map.getModuleMapFileForUniquing(TopLevelModule) &&
+ ModuleMapFID != Map.getContainingModuleMapFileID(TopLevelModule)) {
+ assert(ModuleMapFID !=
+ Map.getModuleMapFileIDForUniquing(TopLevelModule) &&
"submodule defined in same file as 'module *' that allowed its "
"top-level module");
- Map.addAdditionalModuleMapFile(TopLevelModule, ModuleMapFile);
+ Map.addAdditionalModuleMapFile(
+ TopLevelModule, *SourceMgr.getFileEntryRefForID(ModuleMapFID));
}
}
@@ -2120,7 +2126,8 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
- StringRef MapFileName(ModuleMapFile.getName());
+ StringRef MapFileName(
+ SourceMgr.getFileEntryRefForID(ModuleMapFID)->getName());
if (MapFileName.ends_with("module.private.modulemap") ||
MapFileName.ends_with("module_private.map")) {
ActiveModule->ModuleMapIsPrivate = true;
@@ -2906,7 +2913,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
// We'll be inferring framework modules for this directory.
Map.InferredDirectories[Directory].InferModules = true;
Map.InferredDirectories[Directory].Attrs = Attrs;
- Map.InferredDirectories[Directory].ModuleMapFile = ModuleMapFile;
+ Map.InferredDirectories[Directory].ModuleMapFID = ModuleMapFID;
// FIXME: Handle the 'framework' keyword.
}
@@ -3139,8 +3146,7 @@ bool ModuleMap::parseModuleMapFile(FileEntryRef File, bool IsSystem,
Buffer->getBufferStart() + (Offset ? *Offset : 0),
Buffer->getBufferEnd());
SourceLocation Start = L.getSourceLocation();
- ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, File, Dir,
- IsSystem);
+ ModuleMapParser Parser(L, SourceMgr, Target, Diags, *this, ID, Dir, IsSystem);
bool Result = Parser.parseModuleMapFile();
ParsedModuleMap[File] = Result;
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index 3b1b6df1dbae..a0cc2b516574 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -93,16 +93,10 @@ bool Preprocessor::EnterSourceFile(FileID FID, ConstSearchDirIterator CurDir,
}
Lexer *TheLexer = new Lexer(FID, *InputFile, *this, IsFirstIncludeOfFile);
- if (getPreprocessorOpts().DependencyDirectivesForFile &&
- FID != PredefinesFileID) {
- if (OptionalFileEntryRef File = SourceMgr.getFileEntryRefForID(FID)) {
- if (std::optional<ArrayRef<dependency_directives_scan::Directive>>
- DepDirectives =
- getPreprocessorOpts().DependencyDirectivesForFile(*File)) {
+ if (DependencyDirectivesForFile && FID != PredefinesFileID)
+ if (OptionalFileEntryRef File = SourceMgr.getFileEntryRefForID(FID))
+ if (auto DepDirectives = DependencyDirectivesForFile(*File))
TheLexer->DepDirectives = *DepDirectives;
- }
- }
- }
EnterSourceFileWithLexer(TheLexer, CurDir);
return false;
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 77d2382ea6d9..63fe678cbb29 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -140,6 +140,14 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SkipUntil(tok::semi);
return nullptr;
}
+ if (!ExtraNSs.empty()) {
+ Diag(ExtraNSs.front().NamespaceLoc,
+ diag::err_unexpected_qualified_namespace_alias)
+ << SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
if (attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_namespace_attributes_alias);
if (InlineLoc.isValid())
diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp
index 88c3a1469e8e..ae23cb432c43 100644
--- a/clang/lib/Parse/ParseExpr.cpp
+++ b/clang/lib/Parse/ParseExpr.cpp
@@ -1823,7 +1823,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
}
goto ExpectedExpression;
case tok::l_square:
- if (getLangOpts().CPlusPlus11) {
+ if (getLangOpts().CPlusPlus) {
if (getLangOpts().ObjC) {
// C++11 lambda expressions and Objective-C message sends both start with a
// square bracket. There are three possibilities here:
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index 9471f6f725ef..73c85c585baa 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -806,9 +806,8 @@ ExprResult Parser::ParseLambdaExpression() {
///
/// If we are not looking at a lambda expression, returns ExprError().
ExprResult Parser::TryParseLambdaExpression() {
- assert(getLangOpts().CPlusPlus11
- && Tok.is(tok::l_square)
- && "Not at the start of a possible lambda expression.");
+ assert(getLangOpts().CPlusPlus && Tok.is(tok::l_square) &&
+ "Not at the start of a possible lambda expression.");
const Token Next = NextToken();
if (Next.is(tok::eof)) // Nothing else to lookup here...
@@ -1326,7 +1325,9 @@ static void DiagnoseStaticSpecifierRestrictions(Parser &P,
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro) {
SourceLocation LambdaBeginLoc = Intro.Range.getBegin();
- Diag(LambdaBeginLoc, diag::warn_cxx98_compat_lambda);
+ Diag(LambdaBeginLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_lambda
+ : diag::ext_lambda);
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
"lambda expression parsing");
diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp
index 637f21176792..423497bfcb66 100644
--- a/clang/lib/Parse/ParseInit.cpp
+++ b/clang/lib/Parse/ParseInit.cpp
@@ -35,7 +35,7 @@ bool Parser::MayBeDesignationStart() {
return true;
case tok::l_square: { // designator: array-designator
- if (!PP.getLangOpts().CPlusPlus11)
+ if (!PP.getLangOpts().CPlusPlus)
return true;
// C++11 lambda expressions and C99 designators can be ambiguous all the
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index cd0c42d5ffba..72393bea6205 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -135,6 +135,7 @@ namespace sema {
class SemaPPCallbacks : public PPCallbacks {
Sema *S = nullptr;
llvm::SmallVector<SourceLocation, 8> IncludeStack;
+ llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
public:
void set(Sema &S) { this->S = &S; }
@@ -153,8 +154,8 @@ public:
if (IncludeLoc.isValid()) {
if (llvm::timeTraceProfilerEnabled()) {
OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
- llvm::timeTraceProfilerBegin("Source", FE ? FE->getName()
- : StringRef("<unknown>"));
+ ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin(
+ "Source", FE ? FE->getName() : StringRef("<unknown>")));
}
IncludeStack.push_back(IncludeLoc);
@@ -167,7 +168,7 @@ public:
case ExitFile:
if (!IncludeStack.empty()) {
if (llvm::timeTraceProfilerEnabled())
- llvm::timeTraceProfilerEnd();
+ llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val());
S->DiagnoseNonDefaultPragmaAlignPack(
Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
@@ -2064,8 +2065,11 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
- if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType())
- checkRVVTypeSupport(Ty, Loc, D);
+ if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
+ }
// Don't allow SVE types in functions without a SVE target.
if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
@@ -2206,7 +2210,7 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
// block copy/destroy functions. Resolve it here.
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DD = RD->getDestructor()) {
- auto *FPT = DD->getType()->getAs<FunctionProtoType>();
+ auto *FPT = DD->getType()->castAs<FunctionProtoType>();
S.ResolveExceptionSpec(Loc, FPT);
}
}
diff --git a/clang/lib/Sema/SemaAPINotes.cpp b/clang/lib/Sema/SemaAPINotes.cpp
index 836c633e9d20..a3128306c664 100644
--- a/clang/lib/Sema/SemaAPINotes.cpp
+++ b/clang/lib/Sema/SemaAPINotes.cpp
@@ -52,49 +52,54 @@ static void applyNullability(Sema &S, Decl *D, NullabilityKind Nullability,
if (!Metadata.IsActive)
return;
- auto IsModified = [&](Decl *D, QualType QT,
- NullabilityKind Nullability) -> bool {
+ auto GetModified =
+ [&](Decl *D, QualType QT,
+ NullabilityKind Nullability) -> std::optional<QualType> {
QualType Original = QT;
S.CheckImplicitNullabilityTypeSpecifier(QT, Nullability, D->getLocation(),
isa<ParmVarDecl>(D),
/*OverrideExisting=*/true);
- return QT.getTypePtr() != Original.getTypePtr();
+ return (QT.getTypePtr() != Original.getTypePtr()) ? std::optional(QT)
+ : std::nullopt;
};
if (auto Function = dyn_cast<FunctionDecl>(D)) {
- if (IsModified(D, Function->getReturnType(), Nullability)) {
- QualType FnType = Function->getType();
- Function->setType(FnType);
+ if (auto Modified =
+ GetModified(D, Function->getReturnType(), Nullability)) {
+ const FunctionType *FnType = Function->getType()->castAs<FunctionType>();
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(FnType))
+ Function->setType(S.Context.getFunctionType(
+ *Modified, proto->getParamTypes(), proto->getExtProtoInfo()));
+ else
+ Function->setType(
+ S.Context.getFunctionNoProtoType(*Modified, FnType->getExtInfo()));
}
} else if (auto Method = dyn_cast<ObjCMethodDecl>(D)) {
- QualType Type = Method->getReturnType();
- if (IsModified(D, Type, Nullability)) {
- Method->setReturnType(Type);
+ if (auto Modified = GetModified(D, Method->getReturnType(), Nullability)) {
+ Method->setReturnType(*Modified);
// Make it a context-sensitive keyword if we can.
- if (!isIndirectPointerType(Type))
+ if (!isIndirectPointerType(*Modified))
Method->setObjCDeclQualifier(Decl::ObjCDeclQualifier(
Method->getObjCDeclQualifier() | Decl::OBJC_TQ_CSNullability));
}
} else if (auto Value = dyn_cast<ValueDecl>(D)) {
- QualType Type = Value->getType();
- if (IsModified(D, Type, Nullability)) {
- Value->setType(Type);
+ if (auto Modified = GetModified(D, Value->getType(), Nullability)) {
+ Value->setType(*Modified);
// Make it a context-sensitive keyword if we can.
if (auto Parm = dyn_cast<ParmVarDecl>(D)) {
- if (Parm->isObjCMethodParameter() && !isIndirectPointerType(Type))
+ if (Parm->isObjCMethodParameter() && !isIndirectPointerType(*Modified))
Parm->setObjCDeclQualifier(Decl::ObjCDeclQualifier(
Parm->getObjCDeclQualifier() | Decl::OBJC_TQ_CSNullability));
}
}
} else if (auto Property = dyn_cast<ObjCPropertyDecl>(D)) {
- QualType Type = Property->getType();
- if (IsModified(D, Type, Nullability)) {
- Property->setType(Type, Property->getTypeSourceInfo());
+ if (auto Modified = GetModified(D, Property->getType(), Nullability)) {
+ Property->setType(*Modified, Property->getTypeSourceInfo());
// Make it a property attribute if we can.
- if (!isIndirectPointerType(Type))
+ if (!isIndirectPointerType(*Modified))
Property->setPropertyAttributes(
ObjCPropertyAttribute::kind_null_resettable);
}
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index ef3ab16ba29b..11401b6f56c0 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -1098,7 +1098,7 @@ static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
const ConstantArrayType *T =
Context.getAsConstantArrayType(Format->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
// In case there's a null byte somewhere.
StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
return true;
@@ -2399,6 +2399,48 @@ static bool SemaBuiltinPopcountg(Sema &S, CallExpr *TheCall) {
return false;
}
+/// Checks that __builtin_{clzg,ctzg} was called with a first argument, which is
+/// an unsigned integer, and an optional second argument, which is promoted to
+/// an 'int'.
+static bool SemaBuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) {
+ if (checkArgCountRange(S, TheCall, 1, 2))
+ return true;
+
+ ExprResult Arg0Res = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (Arg0Res.isInvalid())
+ return true;
+
+ Expr *Arg0 = Arg0Res.get();
+ TheCall->setArg(0, Arg0);
+
+ QualType Arg0Ty = Arg0->getType();
+
+ if (!Arg0Ty->isUnsignedIntegerType()) {
+ S.Diag(Arg0->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*unsigned integer ty*/ 7 << Arg0Ty;
+ return true;
+ }
+
+ if (TheCall->getNumArgs() > 1) {
+ ExprResult Arg1Res = S.UsualUnaryConversions(TheCall->getArg(1));
+ if (Arg1Res.isInvalid())
+ return true;
+
+ Expr *Arg1 = Arg1Res.get();
+ TheCall->setArg(1, Arg1);
+
+ QualType Arg1Ty = Arg1->getType();
+
+ if (!Arg1Ty->isSpecificBuiltinType(BuiltinType::Int)) {
+ S.Diag(Arg1->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 2 << /*'int' ty*/ 8 << Arg1Ty;
+ return true;
+ }
+ }
+
+ return false;
+}
+
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@@ -3187,6 +3229,11 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinPopcountg(*this, TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_clzg:
+ case Builtin::BI__builtin_ctzg:
+ if (SemaBuiltinCountZeroBitsGeneric(*this, TheCall))
+ return ExprError();
+ break;
}
if (getLangOpts().HLSL && CheckHLSLBuiltinFunctionCall(BuiltinID, TheCall))
@@ -5494,6 +5541,14 @@ bool CheckNoDoubleVectors(Sema *S, CallExpr *TheCall) {
checkDoubleVector);
}
+bool CheckUnsignedIntRepresentation(Sema *S, CallExpr *TheCall) {
+ auto checkAllUnsignedTypes = [](clang::QualType PassedType) -> bool {
+ return !PassedType->hasUnsignedIntegerRepresentation();
+ };
+ return CheckArgsTypesAreCorrect(S, TheCall, S->Context.UnsignedIntTy,
+ checkAllUnsignedTypes);
+}
+
void SetElementTypeAsReturnType(Sema *S, CallExpr *TheCall,
QualType ReturnType) {
auto *VecTyA = TheCall->getArg(0)->getType()->getAs<VectorType>();
@@ -5577,6 +5632,31 @@ bool Sema::CheckHLSLBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TheCall, /*CheckForFloatArgs*/
TheCall->getArg(0)->getType()->hasFloatingRepresentation()))
return true;
+ break;
+ }
+ // Note these are llvm builtins that we want to catch invalid intrinsic
+ // generation. Normal handling of these builitns will occur elsewhere.
+ case Builtin::BI__builtin_elementwise_bitreverse: {
+ if (CheckUnsignedIntRepresentation(this, TheCall))
+ return true;
+ break;
+ }
+ case Builtin::BI__builtin_elementwise_ceil:
+ case Builtin::BI__builtin_elementwise_cos:
+ case Builtin::BI__builtin_elementwise_exp:
+ case Builtin::BI__builtin_elementwise_exp2:
+ case Builtin::BI__builtin_elementwise_floor:
+ case Builtin::BI__builtin_elementwise_log:
+ case Builtin::BI__builtin_elementwise_log2:
+ case Builtin::BI__builtin_elementwise_log10:
+ case Builtin::BI__builtin_elementwise_pow:
+ case Builtin::BI__builtin_elementwise_roundeven:
+ case Builtin::BI__builtin_elementwise_sin:
+ case Builtin::BI__builtin_elementwise_sqrt:
+ case Builtin::BI__builtin_elementwise_trunc: {
+ if (CheckFloatOrHalfRepresentations(this, TheCall))
+ return true;
+ break;
}
}
return false;
@@ -5698,57 +5778,6 @@ static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
- // CodeGenFunction can also detect this, but this gives a better error
- // message.
- bool FeatureMissing = false;
- SmallVector<StringRef> ReqFeatures;
- StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
- Features.split(ReqFeatures, ',', -1, false);
-
- // Check if each required feature is included
- for (StringRef F : ReqFeatures) {
- SmallVector<StringRef> ReqOpFeatures;
- F.split(ReqOpFeatures, '|');
-
- if (llvm::none_of(ReqOpFeatures,
- [&TI](StringRef OF) { return TI.hasFeature(OF); })) {
- std::string FeatureStrs;
- bool IsExtension = true;
- for (StringRef OF : ReqOpFeatures) {
- // If the feature is 64bit, alter the string so it will print better in
- // the diagnostic.
- if (OF == "64bit") {
- assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone");
- OF = "RV64";
- IsExtension = false;
- }
- if (OF == "32bit") {
- assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone");
- OF = "RV32";
- IsExtension = false;
- }
-
- // Convert features like "zbr" and "experimental-zbr" to "Zbr".
- OF.consume_front("experimental-");
- std::string FeatureStr = OF.str();
- FeatureStr[0] = std::toupper(FeatureStr[0]);
- // Combine strings.
- FeatureStrs += FeatureStrs.empty() ? "" : ", ";
- FeatureStrs += "'";
- FeatureStrs += FeatureStr;
- FeatureStrs += "'";
- }
- // Error message
- FeatureMissing = true;
- Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
- << IsExtension
- << TheCall->getSourceRange() << StringRef(FeatureStrs);
- }
- }
-
- if (FeatureMissing)
- return true;
-
// vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
// vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
switch (BuiltinID) {
@@ -6652,36 +6681,35 @@ bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
return false;
}
-void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) {
- const TargetInfo &TI = Context.getTargetInfo();
-
+void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
+ const llvm::StringMap<bool> &FeatureMap) {
ASTContext::BuiltinVectorTypeInfo Info =
Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
unsigned EltSize = Context.getTypeSize(Info.ElementType);
unsigned MinElts = Info.EC.getKnownMinValue();
if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
- !TI.hasFeature("zve64d"))
+ !FeatureMap.lookup("zve64d"))
Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
// (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
// least zve64x
else if (((EltSize == 64 && Info.ElementType->isIntegerType()) ||
MinElts == 1) &&
- !TI.hasFeature("zve64x"))
+ !FeatureMap.lookup("zve64x"))
Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
- else if (Info.ElementType->isFloat16Type() && !TI.hasFeature("zvfh") &&
- !TI.hasFeature("zvfhmin"))
+ else if (Info.ElementType->isFloat16Type() && !FeatureMap.lookup("zvfh") &&
+ !FeatureMap.lookup("zvfhmin"))
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfh or zvfhmin";
else if (Info.ElementType->isBFloat16Type() &&
- !TI.hasFeature("experimental-zvfbfmin"))
+ !FeatureMap.lookup("experimental-zvfbfmin"))
Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
- !TI.hasFeature("zve32f"))
+ !FeatureMap.lookup("zve32f"))
Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
// Given that caller already checked isRVVType() before calling this function,
// if we don't have at least zve32x supported, then we need to emit error.
- else if (!TI.hasFeature("zve32x"))
+ else if (!FeatureMap.lookup("zve32x"))
Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
}
@@ -9647,7 +9675,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
// vector argument can be supported in all of them.
if (ElementTy->isVectorType() && IsFPClass) {
VectorResultTy = GetSignedVectorType(ElementTy);
- ElementTy = ElementTy->getAs<VectorType>()->getElementType();
+ ElementTy = ElementTy->castAs<VectorType>()->getElementType();
}
// This operation requires a non-_Complex floating-point number.
@@ -12418,6 +12446,19 @@ isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
S.Context.getFloatingTypeOrder(From, To) < 0;
}
+static analyze_format_string::ArgType::MatchKind
+handleFormatSignedness(analyze_format_string::ArgType::MatchKind Match,
+ DiagnosticsEngine &Diags, SourceLocation Loc) {
+ if (Match == analyze_format_string::ArgType::NoMatchSignedness) {
+ Match =
+ Diags.isIgnored(
+ diag::warn_format_conversion_argument_type_mismatch_signedness, Loc)
+ ? analyze_format_string::ArgType::Match
+ : analyze_format_string::ArgType::NoMatch;
+ }
+ return Match;
+}
+
bool
CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
@@ -12461,6 +12502,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
+ ArgType::MatchKind OrigMatch = Match;
+
+ Match = handleFormatSignedness(Match, S.getDiagnostics(), E->getExprLoc());
if (Match == ArgType::Match)
return true;
@@ -12484,6 +12528,14 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ICE->getType() == S.Context.UnsignedIntTy) {
// All further checking is done on the subexpression
ImplicitMatch = AT.matchesType(S.Context, ExprTy);
+ if (OrigMatch == ArgType::NoMatchSignedness &&
+ ImplicitMatch != ArgType::NoMatchSignedness)
+ // If the original match was a signedness match this match on the
+ // implicit cast type also need to be signedness match otherwise we
+ // might introduce new unexpected warnings from -Wformat-signedness.
+ return true;
+ ImplicitMatch = handleFormatSignedness(
+ ImplicitMatch, S.getDiagnostics(), E->getExprLoc());
if (ImplicitMatch == ArgType::Match)
return true;
}
@@ -12605,6 +12657,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
case ArgType::Match:
case ArgType::MatchPromotion:
case ArgType::NoMatchPromotionTypeConfusion:
+ case ArgType::NoMatchSignedness:
llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
@@ -12640,8 +12693,10 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
SmallVector<FixItHint,4> Hints;
- if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match ||
- ShouldNotPrintDirectly)
+ ArgType::MatchKind IntendedMatch = AT.matchesType(S.Context, IntendedTy);
+ IntendedMatch = handleFormatSignedness(IntendedMatch, S.getDiagnostics(),
+ E->getExprLoc());
+ if ((IntendedMatch != ArgType::Match) || ShouldNotPrintDirectly)
Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
@@ -12710,6 +12765,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
case ArgType::Match:
case ArgType::MatchPromotion:
case ArgType::NoMatchPromotionTypeConfusion:
+ case ArgType::NoMatchSignedness:
llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
@@ -12921,6 +12977,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
analyze_format_string::ArgType::MatchKind Match =
AT.matchesType(S.Context, Ex->getType());
+ Match = handleFormatSignedness(Match, S.getDiagnostics(), Ex->getExprLoc());
bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
if (Match == analyze_format_string::ArgType::Match)
return true;
@@ -12982,7 +13039,7 @@ static void CheckFormatString(
const ConstantArrayType *T =
S.Context.getAsConstantArrayType(FExpr->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
const unsigned numDataArgs = Args.size() - firstDataArg;
@@ -13042,7 +13099,7 @@ bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
// Account for cases where the string literal is truncated in a declaration.
const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType());
assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
+ size_t TypeSize = T->getZExtSize();
size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size());
return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen,
getLangOpts(),
@@ -14005,7 +14062,7 @@ static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
// Only handle constant-sized or VLAs, but not flexible members.
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) {
// Only issue the FIXIT for arrays of size > 1.
- if (CAT->getSize().getSExtValue() <= 1)
+ if (CAT->getZExtSize() <= 1)
return false;
} else if (!Ty->isVariableArrayType()) {
return false;
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index 73e6baa52782..83ebcaf9e765 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/QualTypeNames.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
@@ -5678,6 +5679,10 @@ QualType getApproximateType(const Expr *E) {
return getApproximateType(VD->getInit());
}
}
+ if (const auto *UO = llvm::dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UnaryOperatorKind::UO_Deref)
+ return UO->getSubExpr()->getType()->getPointeeType();
+ }
return Unresolved;
}
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 1c546e9f5894..b2986c5012ea 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -1269,10 +1269,20 @@ substituteParameterMappings(Sema &S, NormalizedConstraint &N,
: SourceLocation()));
Atomic.ParameterMapping.emplace(TempArgs, OccurringIndices.count());
}
+ SourceLocation InstLocBegin =
+ ArgsAsWritten->arguments().empty()
+ ? ArgsAsWritten->getLAngleLoc()
+ : ArgsAsWritten->arguments().front().getSourceRange().getBegin();
+ SourceLocation InstLocEnd =
+ ArgsAsWritten->arguments().empty()
+ ? ArgsAsWritten->getRAngleLoc()
+ : ArgsAsWritten->arguments().front().getSourceRange().getEnd();
Sema::InstantiatingTemplate Inst(
- S, ArgsAsWritten->arguments().front().getSourceRange().getBegin(),
+ S, InstLocBegin,
Sema::InstantiatingTemplate::ParameterMappingSubstitution{}, Concept,
- ArgsAsWritten->arguments().front().getSourceRange());
+ {InstLocBegin, InstLocEnd});
+ if (Inst.isInvalid())
+ return true;
if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
return true;
@@ -1346,6 +1356,8 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
S, CSE->getExprLoc(),
Sema::InstantiatingTemplate::ConstraintNormalization{}, D,
CSE->getSourceRange());
+ if (Inst.isInvalid())
+ return std::nullopt;
// C++ [temp.constr.normal]p1.1
// [...]
// The normal form of an id-expression of the form C<A1, A2, ..., AN>,
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 8ceb79555fb5..5027deda0d7e 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -8837,7 +8837,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
const auto *ATy = dyn_cast<ConstantArrayType>(T.getTypePtr());
- if (!ATy || ATy->getSize().getSExtValue() != 0) {
+ if (!ATy || ATy->getZExtSize() != 0) {
Diag(NewVD->getLocation(),
diag::err_typecheck_wasm_table_must_have_zero_length);
NewVD->setInvalidDecl();
@@ -8962,8 +8962,13 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
}
- if (T->isRVVSizelessBuiltinType())
- checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext));
+ if (T->isRVVSizelessBuiltinType() && isa<FunctionDecl>(CurContext)) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext),
+ CallerFeatureMap);
+ }
}
/// Perform semantic checking on a newly-created variable
@@ -9910,7 +9915,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// FIXME: We need a better way to separate C++ standard and clang modules.
bool ImplicitInlineCXX20 = !getLangOpts().CPlusPlusModules ||
!NewFD->getOwningModule() ||
- NewFD->getOwningModule()->isGlobalModule() ||
+ NewFD->isFromExplicitGlobalModule() ||
NewFD->getOwningModule()->isHeaderLikeModule();
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
@@ -11260,11 +11265,13 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
return Diagnose(S, A);
break;
case attr::TargetVersion:
- if (MVKind != MultiVersionKind::TargetVersion)
+ if (MVKind != MultiVersionKind::TargetVersion &&
+ MVKind != MultiVersionKind::TargetClones)
return Diagnose(S, A);
break;
case attr::TargetClones:
- if (MVKind != MultiVersionKind::TargetClones)
+ if (MVKind != MultiVersionKind::TargetClones &&
+ MVKind != MultiVersionKind::TargetVersion)
return Diagnose(S, A);
break;
default:
@@ -11441,9 +11448,9 @@ static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD) {
"Function lacks multiversion attribute");
const auto *TA = FD->getAttr<TargetAttr>();
const auto *TVA = FD->getAttr<TargetVersionAttr>();
- // Target and target_version only causes MV if it is default, otherwise this
- // is a normal function.
- if ((TA && !TA->isDefaultVersion()) || (TVA && !TVA->isDefaultVersion()))
+ // The target attribute only causes MV if this declaration is the default,
+ // otherwise it is treated as a normal function.
+ if (TA && !TA->isDefaultVersion())
return false;
if ((TA || TVA) && CheckMultiVersionValue(S, FD)) {
@@ -11469,6 +11476,22 @@ static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
return false;
}
+static void patchDefaultTargetVersion(FunctionDecl *From, FunctionDecl *To) {
+ if (!From->getASTContext().getTargetInfo().getTriple().isAArch64())
+ return;
+
+ MultiVersionKind MVKindFrom = From->getMultiVersionKind();
+ MultiVersionKind MVKindTo = To->getMultiVersionKind();
+
+ if (MVKindTo == MultiVersionKind::None &&
+ (MVKindFrom == MultiVersionKind::TargetVersion ||
+ MVKindFrom == MultiVersionKind::TargetClones)) {
+ To->setIsMultiVersion();
+ To->addAttr(TargetVersionAttr::CreateImplicit(
+ To->getASTContext(), "default", To->getSourceRange()));
+ }
+}
+
static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
FunctionDecl *NewFD,
bool &Redeclaration,
@@ -11479,10 +11502,7 @@ static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
// The definitions should be allowed in any order. If we have discovered
// a new target version and the preceeding was the default, then add the
// corresponding attribute to it.
- if (OldFD->getMultiVersionKind() == MultiVersionKind::None &&
- NewFD->getMultiVersionKind() == MultiVersionKind::TargetVersion)
- OldFD->addAttr(TargetVersionAttr::CreateImplicit(S.Context, "default",
- OldFD->getSourceRange()));
+ patchDefaultTargetVersion(NewFD, OldFD);
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
@@ -11583,36 +11603,60 @@ static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
return false;
}
-static bool MultiVersionTypesCompatible(MultiVersionKind Old,
- MultiVersionKind New) {
- if (Old == New || Old == MultiVersionKind::None ||
- New == MultiVersionKind::None)
+static bool MultiVersionTypesCompatible(FunctionDecl *Old, FunctionDecl *New) {
+ MultiVersionKind OldKind = Old->getMultiVersionKind();
+ MultiVersionKind NewKind = New->getMultiVersionKind();
+
+ if (OldKind == NewKind || OldKind == MultiVersionKind::None ||
+ NewKind == MultiVersionKind::None)
return true;
- return (Old == MultiVersionKind::CPUDispatch &&
- New == MultiVersionKind::CPUSpecific) ||
- (Old == MultiVersionKind::CPUSpecific &&
- New == MultiVersionKind::CPUDispatch);
+ if (Old->getASTContext().getTargetInfo().getTriple().isAArch64()) {
+ switch (OldKind) {
+ case MultiVersionKind::TargetVersion:
+ return NewKind == MultiVersionKind::TargetClones;
+ case MultiVersionKind::TargetClones:
+ return NewKind == MultiVersionKind::TargetVersion;
+ default:
+ return false;
+ }
+ } else {
+ switch (OldKind) {
+ case MultiVersionKind::CPUDispatch:
+ return NewKind == MultiVersionKind::CPUSpecific;
+ case MultiVersionKind::CPUSpecific:
+ return NewKind == MultiVersionKind::CPUDispatch;
+ default:
+ return false;
+ }
+ }
}
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersionKind NewMVKind, const CPUDispatchAttr *NewCPUDisp,
- const CPUSpecificAttr *NewCPUSpec, const TargetClonesAttr *NewClones,
- bool &Redeclaration, NamedDecl *&OldDecl, LookupResult &Previous) {
- const auto *NewTA = NewFD->getAttr<TargetAttr>();
- const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
- MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
+ const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
+ const TargetClonesAttr *NewClones, bool &Redeclaration, NamedDecl *&OldDecl,
+ LookupResult &Previous) {
+
// Disallow mixing of multiversioning types.
- if (!MultiVersionTypesCompatible(OldMVKind, NewMVKind)) {
+ if (!MultiVersionTypesCompatible(OldFD, NewFD)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
+ // Add the default target_version attribute if it's missing.
+ patchDefaultTargetVersion(OldFD, NewFD);
+ patchDefaultTargetVersion(NewFD, OldFD);
+
+ const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
+ MultiVersionKind NewMVKind = NewFD->getMultiVersionKind();
+ [[maybe_unused]] MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
+
ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = S.getASTContext().getTargetInfo().parseTargetAttr(
@@ -11641,19 +11685,6 @@ static bool CheckMultiVersionAdditionalDecl(
S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- if (NewMVKind == MultiVersionKind::None &&
- OldMVKind == MultiVersionKind::TargetVersion) {
- NewFD->addAttr(TargetVersionAttr::CreateImplicit(
- S.Context, "default", NewFD->getSourceRange()));
- NewFD->setIsMultiVersion();
- NewMVKind = MultiVersionKind::TargetVersion;
- if (!NewTVA) {
- NewTVA = NewFD->getAttr<TargetVersionAttr>();
- NewTVA->getFeatures(NewFeats);
- llvm::sort(NewFeats);
- }
- }
-
switch (NewMVKind) {
case MultiVersionKind::None:
assert(OldMVKind == MultiVersionKind::TargetClones &&
@@ -11681,43 +11712,81 @@ static bool CheckMultiVersionAdditionalDecl(
break;
}
case MultiVersionKind::TargetVersion: {
- const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>();
- if (CurTVA->getName() == NewTVA->getName()) {
- NewFD->setIsMultiVersion();
- Redeclaration = true;
- OldDecl = ND;
- return false;
- }
- llvm::SmallVector<StringRef, 8> CurFeats;
- if (CurTVA) {
+ if (const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>()) {
+ if (CurTVA->getName() == NewTVA->getName()) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+ llvm::SmallVector<StringRef, 8> CurFeats;
CurTVA->getFeatures(CurFeats);
llvm::sort(CurFeats);
- }
- if (CurFeats == NewFeats) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
- S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ } else if (const auto *CurClones = CurFD->getAttr<TargetClonesAttr>()) {
+ // Default
+ if (NewFeats.empty())
+ break;
+
+ for (unsigned I = 0; I < CurClones->featuresStrs_size(); ++I) {
+ llvm::SmallVector<StringRef, 8> CurFeats;
+ CurClones->getFeatures(CurFeats, I);
+ llvm::sort(CurFeats);
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
}
break;
}
case MultiVersionKind::TargetClones: {
- const auto *CurClones = CurFD->getAttr<TargetClonesAttr>();
+ assert(NewClones && "MultiVersionKind does not match attribute type");
+ if (const auto *CurClones = CurFD->getAttr<TargetClonesAttr>()) {
+ if (CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
+ !std::equal(CurClones->featuresStrs_begin(),
+ CurClones->featuresStrs_end(),
+ NewClones->featuresStrs_begin())) {
+ S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ } else if (const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>()) {
+ llvm::SmallVector<StringRef, 8> CurFeats;
+ CurTVA->getFeatures(CurFeats);
+ llvm::sort(CurFeats);
+
+ // Default
+ if (CurFeats.empty())
+ break;
+
+ for (unsigned I = 0; I < NewClones->featuresStrs_size(); ++I) {
+ NewFeats.clear();
+ NewClones->getFeatures(NewFeats, I);
+ llvm::sort(NewFeats);
+
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
+ break;
+ }
Redeclaration = true;
OldDecl = CurFD;
NewFD->setIsMultiVersion();
-
- if (CurClones && NewClones &&
- (CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
- !std::equal(CurClones->featuresStrs_begin(),
- CurClones->featuresStrs_end(),
- NewClones->featuresStrs_begin()))) {
- S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
- S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
- }
-
return false;
}
case MultiVersionKind::CPUSpecific:
@@ -11913,7 +11982,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
- return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, MVKind, NewCPUDisp,
+ return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, NewCPUDisp,
NewCPUSpec, NewClones, Redeclaration,
OldDecl, Previous);
}
@@ -16109,7 +16178,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
FD->setInvalidDecl();
}
}
- } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
+ } else if (getLangOpts().CPlusPlus && isLambdaCallOperator(FD)) {
// In C++11, we don't use 'auto' deduction rules for lambda call
// operators because we don't support return type deduction.
auto *LSI = getCurLambda();
@@ -18101,7 +18170,9 @@ CreateNewDecl:
cast_or_null<RecordDecl>(PrevDecl));
}
- if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus)
+ // Only C23 and later allow defining new types in 'offsetof()'.
+ if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus &&
+ !getLangOpts().C23)
Diag(New->getLocation(), diag::ext_type_defined_in_offsetof)
<< (OOK == OOK_Macro) << New->getSourceRange();
@@ -19360,15 +19431,11 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
} else if (Record->isUnion())
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_union_ms
- : getLangOpts().CPlusPlus
- ? diag::ext_flexible_array_union_gnu
- : diag::err_flexible_array_union;
+ : diag::ext_flexible_array_union_gnu;
else if (NumNamedMembers < 1)
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_empty_aggregate_ms
- : getLangOpts().CPlusPlus
- ? diag::ext_flexible_array_empty_aggregate_gnu
- : diag::err_flexible_array_empty_aggregate;
+ : diag::ext_flexible_array_empty_aggregate_gnu;
if (DiagID)
Diag(FD->getLocation(), DiagID)
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 0a62c656d824..f25f3afd0f4a 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -5271,6 +5271,9 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_PreserveNone:
D->addAttr(::new (S.Context) PreserveNoneAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_RISCVVectorCC:
+ D->addAttr(::new (S.Context) RISCVVectorCCAttr(S.Context, AL));
+ return;
default:
llvm_unreachable("unexpected attribute kind");
}
@@ -5475,6 +5478,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_PreserveNone:
CC = CC_PreserveNone;
break;
+ case ParsedAttr::AT_RISCVVectorCC:
+ CC = CC_RISCVVectorCall;
+ break;
default: llvm_unreachable("unexpected attribute kind");
}
@@ -9637,6 +9643,7 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_AMDGPUKernelCall:
case ParsedAttr::AT_M68kRTD:
case ParsedAttr::AT_PreserveNone:
+ case ParsedAttr::AT_RISCVVectorCC:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index e258a4f7c894..f32ff396f8a5 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -5282,7 +5282,7 @@ static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
return true;
while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) {
- if (!ArrayT->getSize())
+ if (ArrayT->isZeroSize())
return true;
T = ArrayT->getElementType();
@@ -9738,7 +9738,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
return false;
CXXRecordDecl *RD = MD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus11 || RD->isInvalidDecl())
+ if (!LangOpts.CPlusPlus || (!LangOpts.CPlusPlus11 && !RD->isLambda()) ||
+ RD->isInvalidDecl())
return false;
// C++11 [expr.lambda.prim]p19:
@@ -11352,7 +11353,7 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
if (ConvType->isUndeducedAutoType()) {
Diag(Conversion->getTypeSpecStartLoc(), diag::err_auto_not_allowed)
<< getReturnTypeLoc(Conversion).getSourceRange()
- << llvm::to_underlying(ConvType->getAs<AutoType>()->getKeyword())
+ << llvm::to_underlying(ConvType->castAs<AutoType>()->getKeyword())
<< /* in declaration of conversion function template= */ 24;
}
@@ -13588,6 +13589,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
Diag(UsingLoc, diag::err_alias_template_extra_headers)
<< SourceRange(TemplateParamLists[1]->getTemplateLoc(),
TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc());
+ Invalid = true;
}
TemplateParameterList *TemplateParams = TemplateParamLists[0];
@@ -16201,7 +16203,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
// Emit warning for non-trivial dtor in global scope (a real global,
// class-static, function-static).
- Diag(VD->getLocation(), diag::warn_exit_time_destructor);
+ if (!VD->hasAttr<AlwaysDestroyAttr>())
+ Diag(VD->getLocation(), diag::warn_exit_time_destructor);
// TODO: this should be re-enabled for static locals by !CXAAtExit
if (!VD->isStaticLocal())
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 5f03b9814282..091fc3e4836b 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -6857,9 +6857,8 @@ Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
ArgCAT->getElementType())) {
if (ArgCAT->getSize().ult(CAT->getSize())) {
Diag(CallLoc, diag::warn_static_array_too_small)
- << ArgExpr->getSourceRange()
- << (unsigned)ArgCAT->getSize().getZExtValue()
- << (unsigned)CAT->getSize().getZExtValue() << 0;
+ << ArgExpr->getSourceRange() << (unsigned)ArgCAT->getZExtSize()
+ << (unsigned)CAT->getZExtSize() << 0;
DiagnoseCalleeStaticArrayParam(*this, Param);
}
return;
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index c34a40fa7c81..51c8e04bee8c 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -6083,7 +6083,7 @@ static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
if (Matched && T->isArrayType()) {
if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T))
- return CAT->getSize().getLimitedValue();
+ return CAT->getLimitedSize();
}
}
return 0;
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index aa470adb30b4..dce225a7204d 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -213,7 +213,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
// Get the length of the string as parsed.
auto *ConstantArrayTy =
cast<ConstantArrayType>(Str->getType()->getAsArrayTypeUnsafe());
- uint64_t StrLength = ConstantArrayTy->getSize().getZExtValue();
+ uint64_t StrLength = ConstantArrayTy->getZExtSize();
if (CheckC23ConstexprInit)
if (const StringLiteral *SL = dyn_cast<StringLiteral>(Str->IgnoreParens()))
@@ -246,14 +246,13 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
}
// [dcl.init.string]p2
- if (StrLength > CAT->getSize().getZExtValue())
+ if (StrLength > CAT->getZExtSize())
S.Diag(Str->getBeginLoc(),
diag::err_initializer_string_for_char_array_too_long)
- << CAT->getSize().getZExtValue() << StrLength
- << Str->getSourceRange();
+ << CAT->getZExtSize() << StrLength << Str->getSourceRange();
} else {
// C99 6.7.8p14.
- if (StrLength-1 > CAT->getSize().getZExtValue())
+ if (StrLength - 1 > CAT->getZExtSize())
S.Diag(Str->getBeginLoc(),
diag::ext_initializer_string_for_char_array_too_long)
<< Str->getSourceRange();
@@ -879,7 +878,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
ElementType = AType->getElementType();
if (const auto *CAType = dyn_cast<ConstantArrayType>(AType))
- NumElements = CAType->getSize().getZExtValue();
+ NumElements = CAType->getZExtSize();
// For an array new with an unknown bound, ask for one additional element
// in order to populate the array filler.
if (Entity.isVariableLengthArrayNew())
@@ -1016,7 +1015,7 @@ int InitListChecker::numArrayElements(QualType DeclType) {
int maxElements = 0x7FFFFFFF;
if (const ConstantArrayType *CAT =
SemaRef.Context.getAsConstantArrayType(DeclType)) {
- maxElements = static_cast<int>(CAT->getSize().getZExtValue());
+ maxElements = static_cast<int>(CAT->getZExtSize());
}
return maxElements;
}
@@ -2330,11 +2329,11 @@ void InitListChecker::CheckStructUnionTypes(
break;
}
- // We've already initialized a member of a union. We're done.
+ // We've already initialized a member of a union. We can stop entirely.
if (InitializedSomething && RD->isUnion())
- break;
+ return;
- // If we've hit the flexible array member at the end, we're done.
+ // Stop if we've hit a flexible array member.
if (Field->getType()->isIncompleteArrayType())
break;
@@ -2457,6 +2456,11 @@ void InitListChecker::CheckStructUnionTypes(
else
CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
+
+ if (RD->isUnion() && StructuredList) {
+ // Initialize the first field within the union.
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
}
/// Expand a field designator that refers to a member of an
@@ -3101,7 +3105,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Get the length of the string.
uint64_t StrLen = SL->getLength();
if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -3124,7 +3128,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Get the length of the string.
uint64_t StrLen = Str.size();
if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen))
- StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue();
+ StrLen = cast<ConstantArrayType>(AT)->getZExtSize();
StructuredList->resizeInits(Context, StrLen);
// Build a literal for each character in the string, and put them into
@@ -3283,7 +3287,7 @@ InitListChecker::createInitListExpr(QualType CurrentObjectType,
if (const ArrayType *AType
= SemaRef.Context.getAsArrayType(CurrentObjectType)) {
if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) {
- NumElements = CAType->getSize().getZExtValue();
+ NumElements = CAType->getZExtSize();
// Simple heuristic so that we don't allocate a very large
// initializer with many empty entries at the end.
if (NumElements > ExpectedNumInits)
@@ -5492,7 +5496,7 @@ static void TryOrBuildParenListInitialization(
// having k elements.
if (const ConstantArrayType *CAT =
S.getASTContext().getAsConstantArrayType(Entity.getType())) {
- ArrayLength = CAT->getSize().getZExtValue();
+ ArrayLength = CAT->getZExtSize();
ResultType = Entity.getType();
} else if (const VariableArrayType *VAT =
S.getASTContext().getAsVariableArrayType(Entity.getType())) {
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index 4636d89ebf2b..f9e1ad0121e2 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -638,8 +638,6 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setInvalidDecl();
}
- ProcessDeclAttributes(S, PDecl, FD.D);
-
// Regardless of setter/getter attribute, we save the default getter/setter
// selector names in anticipation of declaration of setter/getter methods.
PDecl->setGetterName(GetterSel, GetterNameLoc);
@@ -647,6 +645,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setPropertyAttributesAsWritten(
makePropertyAttributesAsWritten(AttributesAsWritten));
+ ProcessDeclAttributes(S, PDecl, FD.D);
+
if (Attributes & ObjCPropertyAttribute::kind_readonly)
PDecl->setPropertyAttributes(ObjCPropertyAttribute::kind_readonly);
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index e9ad7bbde0f9..0ba54a3a9cae 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -21284,7 +21284,7 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
if (isa<ArraySubscriptExpr>(E) ||
(OASE && OASE->getColonLocFirst().isInvalid())) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// Size can't be evaluated statically.
return false;
}
@@ -21325,7 +21325,7 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
return false; // Can't get the integer value as a constant.
llvm::APSInt ConstLength = Result.Val.getInt();
- return CATy->getSize().getSExtValue() != ConstLength.getSExtValue();
+ return CATy->getSExtSize() != ConstLength.getSExtValue();
}
// Return true if it can be proven that the provided array expression (array
@@ -21350,7 +21350,7 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
// is pointer.
if (!Length) {
if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
- return ATy->getSize().getSExtValue() != 1;
+ return ATy->getSExtSize() != 1;
// We cannot assume anything.
return false;
}
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index f6bd85bdc646..51450e486eae 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -6865,6 +6865,32 @@ static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
return false;
}
+static bool isNonViableMultiVersionOverload(FunctionDecl *FD) {
+ if (FD->isTargetMultiVersionDefault())
+ return false;
+
+ if (!FD->getASTContext().getTargetInfo().getTriple().isAArch64())
+ return FD->isTargetMultiVersion();
+
+ if (!FD->isMultiVersion())
+ return false;
+
+ // Among multiple target versions consider either the default,
+ // or the first non-default in the absence of default version.
+ unsigned SeenAt = 0;
+ unsigned I = 0;
+ bool HasDefault = false;
+ FD->getASTContext().forEachMultiversionedFunctionVersion(
+ FD, [&](const FunctionDecl *CurFD) {
+ if (FD == CurFD)
+ SeenAt = I;
+ else if (CurFD->isTargetMultiVersionDefault())
+ HasDefault = true;
+ ++I;
+ });
+ return HasDefault || SeenAt != 0;
+}
+
/// AddOverloadCandidate - Adds the given function to the set of
/// candidate functions, using the given function call arguments. If
/// @p SuppressUserConversions, then don't allow user-defined
@@ -6970,11 +6996,7 @@ void Sema::AddOverloadCandidate(
}
}
- if (Function->isMultiVersion() &&
- ((Function->hasAttr<TargetAttr>() &&
- !Function->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Function->hasAttr<TargetVersionAttr>() &&
- !Function->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
return;
@@ -7637,11 +7659,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
return;
}
- if (Method->isMultiVersion() &&
- ((Method->hasAttr<TargetAttr>() &&
- !Method->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Method->hasAttr<TargetVersionAttr>() &&
- !Method->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
@@ -8127,11 +8145,7 @@ void Sema::AddConversionCandidate(
return;
}
- if (Conversion->isMultiVersion() &&
- ((Conversion->hasAttr<TargetAttr>() &&
- !Conversion->getAttr<TargetAttr>()->isDefaultVersion()) ||
- (Conversion->hasAttr<TargetVersionAttr>() &&
- !Conversion->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
+ if (isNonViableMultiVersionOverload(Conversion)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index ca0254d29e7f..18ebaa13346a 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -35,7 +35,7 @@ Sema::SemaDiagnosticBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) {
if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty))
- return CAT->getSize() == 0;
+ return CAT->isZeroSize();
return false;
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 005529a53270..9cd19d711af4 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -1836,7 +1836,27 @@ static TemplateParameterList *GetTemplateParameterList(TemplateDecl *TD) {
// Make sure we get the template parameter list from the most
// recent declaration, since that is the only one that is guaranteed to
// have all the default template argument information.
- return cast<TemplateDecl>(TD->getMostRecentDecl())->getTemplateParameters();
+ Decl *D = TD->getMostRecentDecl();
+ // C++11 N3337 [temp.param]p12:
+ // A default template argument shall not be specified in a friend class
+ // template declaration.
+ //
+ // Skip past friend *declarations* because they are not supposed to contain
+ // default template arguments. Moreover, these declarations may introduce
+ // template parameters living in different template depths than the
+ // corresponding template parameters in TD, causing unmatched constraint
+ // substitution.
+ //
+ // FIXME: Diagnose such cases within a class template:
+ // template <class T>
+ // struct S {
+ // template <class = void> friend struct C;
+ // };
+ // template struct S<int>;
+ while (D->getFriendObjectKind() != Decl::FriendObjectKind::FOK_None &&
+ D->getPreviousDecl())
+ D = D->getPreviousDecl();
+ return cast<TemplateDecl>(D)->getTemplateParameters();
}
DeclResult Sema::CheckClassTemplate(
@@ -2731,6 +2751,8 @@ bool hasDeclaredDeductionGuides(DeclarationName Name, DeclContext *DC) {
// Build deduction guides for a type alias template.
void DeclareImplicitDeductionGuidesForTypeAlias(
Sema &SemaRef, TypeAliasTemplateDecl *AliasTemplate, SourceLocation Loc) {
+ if (AliasTemplate->isInvalidDecl())
+ return;
auto &Context = SemaRef.Context;
// FIXME: if there is an explicit deduction guide after the first use of the
// type alias usage, we will not cover this explicit deduction guide. fix this
@@ -2974,7 +2996,7 @@ void DeclareImplicitDeductionGuidesForTypeAlias(
if (auto *FPrime = SemaRef.InstantiateFunctionDeclaration(
F, TemplateArgListForBuildingFPrime, AliasTemplate->getLocation(),
Sema::CodeSynthesisContext::BuildingDeductionGuides)) {
- auto *GG = dyn_cast<CXXDeductionGuideDecl>(FPrime);
+ auto *GG = cast<CXXDeductionGuideDecl>(FPrime);
buildDeductionGuide(SemaRef, AliasTemplate, FPrimeTemplateParamList,
GG->getCorrespondingConstructor(),
GG->getExplicitSpecifier(), GG->getTypeSourceInfo(),
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 97f8445bf819..9a55881f6442 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -5514,9 +5514,9 @@ FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
QualType Obj2Ty;
if (TPOC == TPOC_Call) {
const FunctionProtoType *Proto1 =
- FD1->getType()->getAs<FunctionProtoType>();
+ FD1->getType()->castAs<FunctionProtoType>();
const FunctionProtoType *Proto2 =
- FD2->getType()->getAs<FunctionProtoType>();
+ FD2->getType()->castAs<FunctionProtoType>();
// - In the context of a function call, the function parameter types are
// used.
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 7b14323b0674..fd94caa4e1d4 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -138,7 +138,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_PreserveMost: \
case ParsedAttr::AT_PreserveAll: \
case ParsedAttr::AT_M68kRTD: \
- case ParsedAttr::AT_PreserveNone
+ case ParsedAttr::AT_PreserveNone: \
+ case ParsedAttr::AT_RISCVVectorCC
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
@@ -1018,6 +1019,11 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
return type;
}
+ // Types that have __attribute__((NSObject)) are permitted.
+ if (typeArg->isObjCNSObjectType()) {
+ continue;
+ }
+
// Dependent types will be checked at instantiation time.
if (typeArg->isDependentType()) {
continue;
@@ -7934,6 +7940,8 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<M68kRTDAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveNone:
return createSimpleAttr<PreserveNoneAttr>(Ctx, Attr);
+ case ParsedAttr::AT_RISCVVectorCC:
+ return createSimpleAttr<RISCVVectorCCAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 221409d011a3..1e5734c9c834 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -193,17 +193,17 @@ std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
const ModuleMap &MM = HS.getModuleMap();
SourceManager &SourceMgr = PP.getSourceManager();
- std::set<const FileEntry *> ModuleMaps{};
- auto CollectIncludingModuleMaps = [&](FileEntryRef F) {
+ std::set<const FileEntry *> ModuleMaps;
+ auto CollectIncludingModuleMaps = [&](FileID FID, FileEntryRef F) {
if (!ModuleMaps.insert(F).second)
return;
- FileID FID = SourceMgr.translateFile(F);
SourceLocation Loc = SourceMgr.getIncludeLoc(FID);
// The include location of inferred module maps can point into the header
// file that triggered the inferring. Cut off the walk if that's the case.
while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
FID = SourceMgr.getFileID(Loc);
- if (!ModuleMaps.insert(*SourceMgr.getFileEntryRefForID(FID)).second)
+ F = *SourceMgr.getFileEntryRefForID(FID);
+ if (!ModuleMaps.insert(F).second)
break;
Loc = SourceMgr.getIncludeLoc(FID);
}
@@ -216,13 +216,13 @@ std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
break;
// The containing module map is affecting, because it's being pointed
// into by Module::DefinitionLoc.
- if (auto ModuleMapFile = MM.getContainingModuleMapFile(Mod))
- CollectIncludingModuleMaps(*ModuleMapFile);
+ if (FileID FID = MM.getContainingModuleMapFileID(Mod); FID.isValid())
+ CollectIncludingModuleMaps(FID, *SourceMgr.getFileEntryRefForID(FID));
// For inferred modules, the module map that allowed inferring is not in
// the include chain of the virtual containing module map file. It did
// affect the compilation, though.
- if (auto ModuleMapFile = MM.getModuleMapFileForUniquing(Mod))
- CollectIncludingModuleMaps(*ModuleMapFile);
+ if (FileID FID = MM.getModuleMapFileIDForUniquing(Mod); FID.isValid())
+ CollectIncludingModuleMaps(FID, *SourceMgr.getFileEntryRefForID(FID));
}
};
@@ -3195,6 +3195,10 @@ uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
if (DC->decls_empty())
return 0;
+ // In reduced BMI, we don't care the declarations in functions.
+ if (GeneratingReducedBMI && DC->isFunctionOrMethod())
+ return 0;
+
uint64_t Offset = Stream.GetCurrentBitNo();
SmallVector<uint32_t, 128> KindDeclPairs;
for (const auto *D : DC->decls()) {
@@ -4728,7 +4732,6 @@ void ASTWriter::computeNonAffectingInputFiles() {
continue;
if (!isModuleMap(File.getFileCharacteristic()) ||
- AffectingModuleMaps.empty() ||
llvm::is_contained(AffectingModuleMaps, *Cache->OrigEntry))
continue;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
index b4dee1e300e8..1b1226a7f1a7 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
@@ -220,11 +220,11 @@ CXXDeleteChecker::PtrCastVisitor::VisitNode(const ExplodedNode *N,
/*addPosRange=*/true);
}
-void ento::registerCXXArrayDeleteChecker(CheckerManager &mgr) {
+void ento::registerArrayDeleteChecker(CheckerManager &mgr) {
mgr.registerChecker<CXXArrayDeleteChecker>();
}
-bool ento::shouldRegisterCXXArrayDeleteChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterArrayDeleteChecker(const CheckerManager &mgr) {
return true;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index a50772f881f7..2cff97a591b8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -68,7 +68,7 @@ static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
FlexSize = Ctx.getTypeSizeInChars(ElemType);
if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
TypeSize -= FlexSize;
- else if (ArrayTy->getSize() != 0)
+ else if (!ArrayTy->isZeroSize())
return false;
} else if (RD->hasFlexibleArrayMember()) {
FlexSize = Ctx.getTypeSizeInChars(ElemType);
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 03cb7696707f..88fb42b6625a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -394,8 +394,10 @@ private:
const CallEvent &Call, CheckerContext &C)>;
const CallDescriptionMap<CheckFn> PreFnMap{
- {{{"getline"}, 3}, &MallocChecker::preGetdelim},
- {{{"getdelim"}, 4}, &MallocChecker::preGetdelim},
+ // NOTE: the following CallDescription also matches the C++ standard
+ // library function std::getline(); the callback will filter it out.
+ {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::preGetdelim},
+ {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::preGetdelim},
};
const CallDescriptionMap<CheckFn> FreeingMemFnMap{
@@ -446,8 +448,11 @@ private:
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
{{{"g_realloc_n"}, 3}, &MallocChecker::checkReallocN},
{{{"g_try_realloc_n"}, 3}, &MallocChecker::checkReallocN},
- {{{"getline"}, 3}, &MallocChecker::checkGetdelim},
- {{{"getdelim"}, 4}, &MallocChecker::checkGetdelim},
+
+ // NOTE: the following CallDescription also matches the C++ standard
+ // library function std::getline(); the callback will filter it out.
+ {{CDM::CLibrary, {"getline"}, 3}, &MallocChecker::checkGetdelim},
+ {{CDM::CLibrary, {"getdelim"}, 4}, &MallocChecker::checkGetdelim},
};
bool isMemCall(const CallEvent &Call) const;
@@ -1435,13 +1440,21 @@ void MallocChecker::checkGMallocN0(const CallEvent &Call,
C.addTransition(State);
}
+static bool isFromStdNamespace(const CallEvent &Call) {
+ const Decl *FD = Call.getDecl();
+ assert(FD && "a CallDescription cannot match a call without a Decl");
+ return FD->isInStdNamespace();
+}
+
void MallocChecker::preGetdelim(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isGlobalCFunction())
+ // Discard calls to the C++ standard library function std::getline(), which
+ // is completely unrelated to the POSIX getline() that we're checking.
+ if (isFromStdNamespace(Call))
return;
ProgramStateRef State = C.getState();
- const auto LinePtr = getPointeeDefVal(Call.getArgSVal(0), State);
+ const auto LinePtr = getPointeeVal(Call.getArgSVal(0), State);
if (!LinePtr)
return;
@@ -1458,7 +1471,9 @@ void MallocChecker::preGetdelim(const CallEvent &Call,
void MallocChecker::checkGetdelim(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isGlobalCFunction())
+ // Discard calls to the C++ standard library function std::getline(), which
+ // is completely unrelated to the POSIX getline() that we're checking.
+ if (isFromStdNamespace(Call))
return;
ProgramStateRef State = C.getState();
@@ -1470,8 +1485,10 @@ void MallocChecker::checkGetdelim(const CallEvent &Call,
SValBuilder &SVB = C.getSValBuilder();
- const auto LinePtr = getPointeeDefVal(Call.getArgSVal(0), State);
- const auto Size = getPointeeDefVal(Call.getArgSVal(1), State);
+ const auto LinePtr =
+ getPointeeVal(Call.getArgSVal(0), State)->getAs<DefinedSVal>();
+ const auto Size =
+ getPointeeVal(Call.getArgSVal(1), State)->getAs<DefinedSVal>();
if (!LinePtr || !Size || !LinePtr->getAsRegion())
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index eee9449f3180..4f35d9442ad9 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -117,7 +117,7 @@ public:
return;
uint64_t Elts = 0;
if (const ConstantArrayType *CArrTy = dyn_cast<ConstantArrayType>(ArrTy))
- Elts = CArrTy->getSize().getZExtValue();
+ Elts = CArrTy->getZExtSize();
if (Elts == 0)
return;
const RecordType *RT = ArrTy->getElementType()->getAs<RecordType>();
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 10972158f398..902c42a2799b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -1200,10 +1200,25 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
// Add transition for the successful state.
NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
- ProgramStateRef StateNotFailed =
- State->BindExpr(E.CE, C.getLocationContext(), RetVal);
+ ProgramStateRef StateNotFailed = E.bindReturnValue(State, C, RetVal);
StateNotFailed =
E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
+
+ // On success, a buffer is allocated.
+ auto NewLinePtr = getPointeeVal(Call.getArgSVal(0), State);
+ if (NewLinePtr && isa<DefinedOrUnknownSVal>(*NewLinePtr))
+ StateNotFailed = StateNotFailed->assume(
+ NewLinePtr->castAs<DefinedOrUnknownSVal>(), true);
+
+ // The buffer size `*n` must be enough to hold the whole line, and
+ // greater than the return value, since it has to account for '\0'.
+ SVal SizePtrSval = Call.getArgSVal(1);
+ auto NVal = getPointeeVal(SizePtrSval, State);
+ if (NVal && isa<NonLoc>(*NVal)) {
+ StateNotFailed = E.assumeBinOpNN(StateNotFailed, BO_GT,
+ NVal->castAs<NonLoc>(), RetVal);
+ StateNotFailed = E.bindReturnValue(StateNotFailed, C, RetVal);
+ }
if (!StateNotFailed)
return;
C.addTransition(StateNotFailed);
@@ -1217,6 +1232,10 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
E.isStreamEof() ? ErrorFEof : ErrorFEof | ErrorFError;
StateFailed = E.setStreamState(
StateFailed, StreamState::getOpened(Desc, NewES, !NewES.isFEof()));
+ // On failure, the content of the buffer is undefined.
+ if (auto NewLinePtr = getPointeeVal(Call.getArgSVal(0), State))
+ StateFailed = StateFailed->bindLoc(*NewLinePtr, UndefinedVal(),
+ C.getLocationContext());
C.addTransition(StateFailed, E.getFailureNoteTag(this, C));
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 19f1ca2dc824..da2d16ca9b5d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -17,8 +17,10 @@
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -41,25 +43,38 @@ enum class OpenVariant {
namespace {
class UnixAPIMisuseChecker
- : public Checker<check::PreStmt<CallExpr>,
- check::ASTDecl<TranslationUnitDecl>> {
+ : public Checker<check::PreCall, check::ASTDecl<TranslationUnitDecl>> {
const BugType BT_open{this, "Improper use of 'open'", categories::UnixAPI};
+ const BugType BT_getline{this, "Improper use of getdelim",
+ categories::UnixAPI};
const BugType BT_pthreadOnce{this, "Improper use of 'pthread_once'",
categories::UnixAPI};
+ const BugType BT_ArgumentNull{this, "NULL pointer", categories::UnixAPI};
mutable std::optional<uint64_t> Val_O_CREAT;
+ ProgramStateRef
+ EnsurePtrNotNull(SVal PtrVal, const Expr *PtrExpr, CheckerContext &C,
+ ProgramStateRef State, const StringRef PtrDescr,
+ std::optional<std::reference_wrapper<const BugType>> BT =
+ std::nullopt) const;
+
+ ProgramStateRef EnsureGetdelimBufferAndSizeCorrect(
+ SVal LinePtrPtrSVal, SVal SizePtrSVal, const Expr *LinePtrPtrExpr,
+ const Expr *SizePtrExpr, CheckerContext &C, ProgramStateRef State) const;
+
public:
void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager &Mgr,
BugReporter &BR) const;
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
- void CheckOpenAt(CheckerContext &C, const CallExpr *CE) const;
- void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void CheckOpen(CheckerContext &C, const CallEvent &Call) const;
+ void CheckOpenAt(CheckerContext &C, const CallEvent &Call) const;
+ void CheckGetDelim(CheckerContext &C, const CallEvent &Call) const;
+ void CheckPthreadOnce(CheckerContext &C, const CallEvent &Call) const;
- void CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE, OpenVariant Variant) const;
+ void CheckOpenVariant(CheckerContext &C, const CallEvent &Call,
+ OpenVariant Variant) const;
void ReportOpenBug(CheckerContext &C, ProgramStateRef State, const char *Msg,
SourceRange SR) const;
@@ -95,6 +110,30 @@ private:
} // end anonymous namespace
+ProgramStateRef UnixAPIMisuseChecker::EnsurePtrNotNull(
+ SVal PtrVal, const Expr *PtrExpr, CheckerContext &C, ProgramStateRef State,
+ const StringRef PtrDescr,
+ std::optional<std::reference_wrapper<const BugType>> BT) const {
+ const auto Ptr = PtrVal.getAs<DefinedSVal>();
+ if (!Ptr)
+ return State;
+
+ const auto [PtrNotNull, PtrNull] = State->assume(*Ptr);
+ if (!PtrNotNull && PtrNull) {
+ if (ExplodedNode *N = C.generateErrorNode(PtrNull)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT.value_or(std::cref(BT_ArgumentNull)),
+ (PtrDescr + " pointer might be NULL.").str(), N);
+ if (PtrExpr)
+ bugreporter::trackExpressionValue(N, PtrExpr, *R);
+ C.emitReport(std::move(R));
+ }
+ return nullptr;
+ }
+
+ return PtrNotNull;
+}
+
void UnixAPIMisuseChecker::checkASTDecl(const TranslationUnitDecl *TU,
AnalysisManager &Mgr,
BugReporter &) const {
@@ -113,9 +152,9 @@ void UnixAPIMisuseChecker::checkASTDecl(const TranslationUnitDecl *TU,
// "open" (man 2 open)
//===----------------------------------------------------------------------===/
-void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
+void UnixAPIMisuseChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const FunctionDecl *FD = dyn_cast_if_present<FunctionDecl>(Call.getDecl());
if (!FD || FD->getKind() != Decl::Function)
return;
@@ -130,13 +169,16 @@ void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
return;
if (FName == "open")
- CheckOpen(C, CE);
+ CheckOpen(C, Call);
else if (FName == "openat")
- CheckOpenAt(C, CE);
+ CheckOpenAt(C, Call);
else if (FName == "pthread_once")
- CheckPthreadOnce(C, CE);
+ CheckPthreadOnce(C, Call);
+
+ else if (is_contained({"getdelim", "getline"}, FName))
+ CheckGetDelim(C, Call);
}
void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
ProgramStateRef State,
@@ -152,17 +194,17 @@ void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
}
void UnixAPIMisuseChecker::CheckOpen(CheckerContext &C,
- const CallExpr *CE) const {
- CheckOpenVariant(C, CE, OpenVariant::Open);
+ const CallEvent &Call) const {
+ CheckOpenVariant(C, Call, OpenVariant::Open);
}
void UnixAPIMisuseChecker::CheckOpenAt(CheckerContext &C,
- const CallExpr *CE) const {
- CheckOpenVariant(C, CE, OpenVariant::OpenAt);
+ const CallEvent &Call) const {
+ CheckOpenVariant(C, Call, OpenVariant::OpenAt);
}
void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
- const CallExpr *CE,
+ const CallEvent &Call,
OpenVariant Variant) const {
// The index of the argument taking the flags open flags (O_RDONLY,
// O_WRONLY, O_CREAT, etc.),
@@ -191,11 +233,11 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
ProgramStateRef state = C.getState();
- if (CE->getNumArgs() < MinArgCount) {
+ if (Call.getNumArgs() < MinArgCount) {
// The frontend should issue a warning for this case. Just return.
return;
- } else if (CE->getNumArgs() == MaxArgCount) {
- const Expr *Arg = CE->getArg(CreateModeArgIndex);
+ } else if (Call.getNumArgs() == MaxArgCount) {
+ const Expr *Arg = Call.getArgExpr(CreateModeArgIndex);
QualType QT = Arg->getType();
if (!QT->isIntegerType()) {
SmallString<256> SBuf;
@@ -209,15 +251,14 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
Arg->getSourceRange());
return;
}
- } else if (CE->getNumArgs() > MaxArgCount) {
+ } else if (Call.getNumArgs() > MaxArgCount) {
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
OS << "Call to '" << VariantName << "' with more than " << MaxArgCount
<< " arguments";
- ReportOpenBug(C, state,
- SBuf.c_str(),
- CE->getArg(MaxArgCount)->getSourceRange());
+ ReportOpenBug(C, state, SBuf.c_str(),
+ Call.getArgExpr(MaxArgCount)->getSourceRange());
return;
}
@@ -226,8 +267,8 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
}
// Now check if oflags has O_CREAT set.
- const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
- const SVal V = C.getSVal(oflagsEx);
+ const Expr *oflagsEx = Call.getArgExpr(FlagsArgIndex);
+ const SVal V = Call.getArgSVal(FlagsArgIndex);
if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
@@ -253,7 +294,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
if (!(trueState && !falseState))
return;
- if (CE->getNumArgs() < MaxArgCount) {
+ if (Call.getNumArgs() < MaxArgCount) {
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
OS << "Call to '" << VariantName << "' requires a "
@@ -267,22 +308,109 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
}
//===----------------------------------------------------------------------===//
+// getdelim and getline
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef UnixAPIMisuseChecker::EnsureGetdelimBufferAndSizeCorrect(
+ SVal LinePtrPtrSVal, SVal SizePtrSVal, const Expr *LinePtrPtrExpr,
+ const Expr *SizePtrExpr, CheckerContext &C, ProgramStateRef State) const {
+ static constexpr llvm::StringLiteral SizeGreaterThanBufferSize =
+ "The buffer from the first argument is smaller than the size "
+ "specified by the second parameter";
+ static constexpr llvm::StringLiteral SizeUndef =
+ "The buffer from the first argument is not NULL, but the size specified "
+ "by the second parameter is undefined.";
+
+ auto EmitBugReport = [this, &C, SizePtrExpr, LinePtrPtrExpr](
+ ProgramStateRef BugState, StringRef ErrMsg) {
+ if (ExplodedNode *N = C.generateErrorNode(BugState)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(BT_getline, ErrMsg, N);
+ bugreporter::trackExpressionValue(N, SizePtrExpr, *R);
+ bugreporter::trackExpressionValue(N, LinePtrPtrExpr, *R);
+ C.emitReport(std::move(R));
+ }
+ };
+
+ // We have a pointer to a pointer to the buffer, and a pointer to the size.
+ // We want what they point at.
+ auto LinePtrSVal = getPointeeVal(LinePtrPtrSVal, State)->getAs<DefinedSVal>();
+ auto NSVal = getPointeeVal(SizePtrSVal, State);
+ if (!LinePtrSVal || !NSVal || NSVal->isUnknown())
+ return nullptr;
+
+ assert(LinePtrPtrExpr && SizePtrExpr);
+
+ const auto [LinePtrNotNull, LinePtrNull] = State->assume(*LinePtrSVal);
+ if (LinePtrNotNull && !LinePtrNull) {
+ // If `*lineptr` is not null, but `*n` is undefined, there is UB.
+ if (NSVal->isUndef()) {
+ EmitBugReport(LinePtrNotNull, SizeUndef);
+ return nullptr;
+ }
+
+ // If it is defined, and known, its size must be less than or equal to
+ // the buffer size.
+ auto NDefSVal = NSVal->getAs<DefinedSVal>();
+ auto &SVB = C.getSValBuilder();
+ auto LineBufSize =
+ getDynamicExtent(LinePtrNotNull, LinePtrSVal->getAsRegion(), SVB);
+ auto LineBufSizeGtN = SVB.evalBinOp(LinePtrNotNull, BO_GE, LineBufSize,
+ *NDefSVal, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!LineBufSizeGtN)
+ return LinePtrNotNull;
+ if (auto LineBufSizeOk = LinePtrNotNull->assume(*LineBufSizeGtN, true))
+ return LineBufSizeOk;
+
+ EmitBugReport(LinePtrNotNull, SizeGreaterThanBufferSize);
+ return nullptr;
+ }
+ return State;
+}
+
+void UnixAPIMisuseChecker::CheckGetDelim(CheckerContext &C,
+ const CallEvent &Call) const {
+ ProgramStateRef State = C.getState();
+
+ // The parameter `n` must not be NULL.
+ SVal SizePtrSval = Call.getArgSVal(1);
+ State = EnsurePtrNotNull(SizePtrSval, Call.getArgExpr(1), C, State, "Size");
+ if (!State)
+ return;
+
+ // The parameter `lineptr` must not be NULL.
+ SVal LinePtrPtrSVal = Call.getArgSVal(0);
+ State =
+ EnsurePtrNotNull(LinePtrPtrSVal, Call.getArgExpr(0), C, State, "Line");
+ if (!State)
+ return;
+
+ State = EnsureGetdelimBufferAndSizeCorrect(LinePtrPtrSVal, SizePtrSval,
+ Call.getArgExpr(0),
+ Call.getArgExpr(1), C, State);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+//===----------------------------------------------------------------------===//
// pthread_once
//===----------------------------------------------------------------------===//
void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
// This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
// They can possibly be refactored.
- if (CE->getNumArgs() < 1)
+ if (Call.getNumArgs() < 1)
return;
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
ProgramStateRef state = C.getState();
- const MemRegion *R = C.getSVal(CE->getArg(0)).getAsRegion();
+ const MemRegion *R = Call.getArgSVal(0).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
@@ -304,7 +432,7 @@ void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
auto report =
std::make_unique<PathSensitiveBugReport>(BT_pthreadOnce, os.str(), N);
- report->addRange(CE->getArg(0)->getSourceRange());
+ report->addRange(Call.getArgExpr(0)->getSourceRange());
C.emitReport(std::move(report));
}
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 3617fdd778e3..14ca507a16d5 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -138,7 +138,8 @@ public:
public:
PathDiagnosticConstruct(const PathDiagnosticConsumer *PDC,
const ExplodedNode *ErrorNode,
- const PathSensitiveBugReport *R);
+ const PathSensitiveBugReport *R,
+ const Decl *AnalysisEntryPoint);
/// \returns the location context associated with the current position in the
/// bug path.
@@ -1323,24 +1324,26 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
}
static std::unique_ptr<PathDiagnostic>
-generateDiagnosticForBasicReport(const BasicBugReport *R) {
+generateDiagnosticForBasicReport(const BasicBugReport *R,
+ const Decl *AnalysisEntryPoint) {
const BugType &BT = R->getBugType();
return std::make_unique<PathDiagnostic>(
BT.getCheckerName(), R->getDeclWithIssue(), BT.getDescription(),
R->getDescription(), R->getShortDescription(/*UseFallback=*/false),
BT.getCategory(), R->getUniqueingLocation(), R->getUniqueingDecl(),
- std::make_unique<FilesToLineNumsMap>());
+ AnalysisEntryPoint, std::make_unique<FilesToLineNumsMap>());
}
static std::unique_ptr<PathDiagnostic>
generateEmptyDiagnosticForReport(const PathSensitiveBugReport *R,
- const SourceManager &SM) {
+ const SourceManager &SM,
+ const Decl *AnalysisEntryPoint) {
const BugType &BT = R->getBugType();
return std::make_unique<PathDiagnostic>(
BT.getCheckerName(), R->getDeclWithIssue(), BT.getDescription(),
R->getDescription(), R->getShortDescription(/*UseFallback=*/false),
BT.getCategory(), R->getUniqueingLocation(), R->getUniqueingDecl(),
- findExecutedLines(SM, R->getErrorNode()));
+ AnalysisEntryPoint, findExecutedLines(SM, R->getErrorNode()));
}
static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
@@ -1976,10 +1979,11 @@ static void updateExecutedLinesWithDiagnosticPieces(PathDiagnostic &PD) {
PathDiagnosticConstruct::PathDiagnosticConstruct(
const PathDiagnosticConsumer *PDC, const ExplodedNode *ErrorNode,
- const PathSensitiveBugReport *R)
+ const PathSensitiveBugReport *R, const Decl *AnalysisEntryPoint)
: Consumer(PDC), CurrentNode(ErrorNode),
SM(CurrentNode->getCodeDecl().getASTContext().getSourceManager()),
- PD(generateEmptyDiagnosticForReport(R, getSourceManager())) {
+ PD(generateEmptyDiagnosticForReport(R, getSourceManager(),
+ AnalysisEntryPoint)) {
LCM[&PD->getActivePath()] = ErrorNode->getLocationContext();
}
@@ -1993,13 +1997,14 @@ PathDiagnosticBuilder::PathDiagnosticBuilder(
std::unique_ptr<PathDiagnostic>
PathDiagnosticBuilder::generate(const PathDiagnosticConsumer *PDC) const {
- PathDiagnosticConstruct Construct(PDC, ErrorNode, R);
+ const Decl *EntryPoint = getBugReporter().getAnalysisEntryPoint();
+ PathDiagnosticConstruct Construct(PDC, ErrorNode, R, EntryPoint);
const SourceManager &SM = getSourceManager();
const AnalyzerOptions &Opts = getAnalyzerOptions();
if (!PDC->shouldGenerateDiagnostics())
- return generateEmptyDiagnosticForReport(R, getSourceManager());
+ return generateEmptyDiagnosticForReport(R, getSourceManager(), EntryPoint);
// Construct the final (warning) event for the bug report.
auto EndNotes = VisitorsDiagnostics->find(ErrorNode);
@@ -3123,6 +3128,16 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
Pieces.back()->addFixit(I);
updateExecutedLinesWithDiagnosticPieces(*PD);
+
+ // If we are debugging, let's have the entry point as the first note.
+ if (getAnalyzerOptions().AnalyzerDisplayProgress ||
+ getAnalyzerOptions().AnalyzerNoteAnalysisEntryPoints) {
+ const Decl *EntryPoint = getAnalysisEntryPoint();
+ Pieces.push_front(std::make_shared<PathDiagnosticEventPiece>(
+ PathDiagnosticLocation{EntryPoint->getLocation(), getSourceManager()},
+ "[debug] analyzing from " +
+ AnalysisDeclContext::getFunctionName(EntryPoint)));
+ }
Consumer->HandlePathDiagnostic(std::move(PD));
}
}
@@ -3211,7 +3226,8 @@ BugReporter::generateDiagnosticForConsumerMap(
auto *basicReport = cast<BasicBugReport>(exampleReport);
auto Out = std::make_unique<DiagnosticForConsumerMapTy>();
for (auto *Consumer : consumers)
- (*Out)[Consumer] = generateDiagnosticForBasicReport(basicReport);
+ (*Out)[Consumer] =
+ generateDiagnosticForBasicReport(basicReport, AnalysisEntryPoint);
return Out;
}
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index bc14aea27f67..0e317ec765ec 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1408,9 +1408,12 @@ CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
if (const auto *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
- if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee)) {
if (MD->isImplicitObjectMemberFunction())
return create<CXXMemberOperatorCall>(OpCE, State, LCtx, ElemRef);
+ if (MD->isStatic())
+ return create<CXXStaticOperatorCall>(OpCE, State, LCtx, ElemRef);
+ }
} else if (CE->getCallee()->getType()->isBlockPointerType()) {
return create<BlockCall>(CE, State, LCtx, ElemRef);
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index d6d4cec9dd3d..1a9bff529e9b 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -87,9 +87,11 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (!II)
return false;
- // Look through 'extern "C"' and anything similar invented in the future.
- // If this function is not in TU directly, it is not a C library function.
- if (!FD->getDeclContext()->getRedeclContext()->isTranslationUnit())
+ // C library functions are either declared directly within a TU (the common
+ // case) or they are accessed through the namespace `std` (when they are used
+ // in C++ via headers like <cstdlib>).
+ const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
+ if (!(DC->isTranslationUnit() || DC->isStdNamespace()))
return false;
// If this function is not externally visible, it is not a C library function.
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 364c87e910b7..d7137a915b3d 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -183,10 +183,9 @@ OperatorKind operationKindFromOverloadedOperator(OverloadedOperatorKind OOK,
}
}
-std::optional<DefinedSVal> getPointeeDefVal(SVal PtrSVal,
- ProgramStateRef State) {
+std::optional<SVal> getPointeeVal(SVal PtrSVal, ProgramStateRef State) {
if (const auto *Ptr = PtrSVal.getAsRegion()) {
- return State->getSVal(Ptr).getAs<DefinedSVal>();
+ return State->getSVal(Ptr);
}
return std::nullopt;
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 4755b6bfa6dc..9d3e4fc944fb 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -846,6 +846,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
const StackFrameContext *CallerSFC = CurLC->getStackFrame();
switch (Call.getKind()) {
case CE_Function:
+ case CE_CXXStaticOperator:
case CE_Block:
break;
case CE_CXXMember:
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 16db6b249dc9..d6e4f23cc353 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -720,13 +720,21 @@ std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
CI->getValue().toString(Idx);
ArrayIndices = (llvm::Twine("[") + Idx.str() + "]" + ArrayIndices).str();
}
- // If not a ConcreteInt, try to obtain the variable
- // name by calling 'getDescriptiveName' recursively.
+ // Index is symbolic, but may have a descriptive name.
else {
- std::string Idx = ER->getDescriptiveName(false);
- if (!Idx.empty()) {
- ArrayIndices = (llvm::Twine("[") + Idx + "]" + ArrayIndices).str();
- }
+ auto SI = ER->getIndex().getAs<nonloc::SymbolVal>();
+ if (!SI)
+ return "";
+
+ const MemRegion *OR = SI->getAsSymbol()->getOriginRegion();
+ if (!OR)
+ return "";
+
+ std::string Idx = OR->getDescriptiveName(false);
+ if (Idx.empty())
+ return "";
+
+ ArrayIndices = (llvm::Twine("[") + Idx + "]" + ArrayIndices).str();
}
R = ER->getSuperRegion();
}
@@ -817,7 +825,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
};
auto IsArrayOfZero = [](const ArrayType *AT) {
const auto *CAT = dyn_cast<ConstantArrayType>(AT);
- return CAT && CAT->getSize() == 0;
+ return CAT && CAT->isZeroSize();
};
auto IsArrayOfOne = [](const ArrayType *AT) {
const auto *CAT = dyn_cast<ConstantArrayType>(AT);
diff --git a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index f12f1a5ac970..f82cd944750a 100644
--- a/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -226,6 +226,20 @@ ProgramStateRef ProgramState::killBinding(Loc LV) const {
return makeWithStore(newStore);
}
+/// SymbolicRegions are expected to be wrapped by an ElementRegion as a
+/// canonical representation. As a canonical representation, SymbolicRegions
+/// should be wrapped by ElementRegions before getting a FieldRegion.
+/// See f8643a9b31c4029942f67d4534c9139b45173504 why.
+SVal ProgramState::wrapSymbolicRegion(SVal Val) const {
+ const auto *BaseReg = dyn_cast_or_null<SymbolicRegion>(Val.getAsRegion());
+ if (!BaseReg)
+ return Val;
+
+ StoreManager &SM = getStateManager().getStoreManager();
+ QualType ElemTy = BaseReg->getPointeeStaticType();
+ return loc::MemRegionVal{SM.GetElementZeroRegion(BaseReg, ElemTy)};
+}
+
ProgramStateRef
ProgramState::enterStackFrame(const CallEvent &Call,
const StackFrameContext *CalleeCtx) const {
@@ -451,6 +465,24 @@ void ProgramState::setStore(const StoreRef &newStore) {
store = newStoreStore;
}
+SVal ProgramState::getLValue(const FieldDecl *D, SVal Base) const {
+ Base = wrapSymbolicRegion(Base);
+ return getStateManager().StoreMgr->getLValueField(D, Base);
+}
+
+SVal ProgramState::getLValue(const IndirectFieldDecl *D, SVal Base) const {
+ StoreManager &SM = *getStateManager().StoreMgr;
+ Base = wrapSymbolicRegion(Base);
+
+ // FIXME: This should work with `SM.getLValueField(D->getAnonField(), Base)`,
+ // but that would break some tests. There is probably a bug somewhere that it
+ // would expose.
+ for (const auto *I : D->chain()) {
+ Base = SM.getLValueField(cast<FieldDecl>(I), Base);
+ }
+ return Base;
+}
+
//===----------------------------------------------------------------------===//
// State pretty-printing.
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index da9a1a1a4d1f..755a8c4b22fd 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1166,7 +1166,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// Compute lower and upper offsets for region within array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
- NumElements = CAT->getSize().getZExtValue();
+ NumElements = CAT->getZExtSize();
if (!NumElements) // We are not dealing with a constant size array
goto conjure_default;
QualType ElementTy = AT->getElementType();
@@ -1613,7 +1613,7 @@ getConstantArrayExtents(const ConstantArrayType *CAT) {
CAT = cast<ConstantArrayType>(CAT->getCanonicalTypeInternal());
SmallVector<uint64_t, 2> Extents;
do {
- Extents.push_back(CAT->getSize().getZExtValue());
+ Extents.push_back(CAT->getZExtSize());
} while ((CAT = dyn_cast<ConstantArrayType>(CAT->getElementType())));
return Extents;
}
@@ -2436,7 +2436,7 @@ std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallArray(
return std::nullopt;
// If the array is too big, create a LCV instead.
- uint64_t ArrSize = CAT->getSize().getLimitedValue();
+ uint64_t ArrSize = CAT->getLimitedSize();
if (ArrSize > SmallArrayLimit)
return std::nullopt;
@@ -2465,7 +2465,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
std::optional<uint64_t> Size;
if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
- Size = CAT->getSize().getZExtValue();
+ Size = CAT->getZExtSize();
// Check if the init expr is a literal. If so, bind the rvalue instead.
// FIXME: It's not responsibility of the Store to transform this lvalue
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index b6ef40595e3c..03bc40804d73 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -527,7 +527,8 @@ static void reportAnalyzerFunctionMisuse(const AnalyzerOptions &Opts,
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BugReporter BR(*Mgr);
- TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ const TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ BR.setAnalysisEntryPoint(TU);
if (SyntaxCheckTimer)
SyntaxCheckTimer->startTimer();
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
@@ -675,6 +676,7 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
DisplayFunction(D, Mode, IMode);
BugReporter BR(*Mgr);
+ BR.setAnalysisEntryPoint(D);
if (Mode & AM_Syntax) {
llvm::TimeRecord CheckerStartTime;
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 1b750cec41e1..9b7812a1adb9 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -41,24 +41,25 @@ DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
return TentativeEntry(Stat, std::move(Buffer));
}
-EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
- const CachedFileSystemEntry &Entry, StringRef Filename, bool Disable) {
- if (Entry.isError() || Entry.isDirectory() || Disable ||
- !shouldScanForDirectives(Filename))
- return EntryRef(Filename, Entry);
+bool DependencyScanningWorkerFilesystem::ensureDirectiveTokensArePopulated(
+ EntryRef Ref) {
+ auto &Entry = Ref.Entry;
+
+ if (Entry.isError() || Entry.isDirectory())
+ return false;
CachedFileContents *Contents = Entry.getCachedContents();
assert(Contents && "contents not initialized");
// Double-checked locking.
if (Contents->DepDirectives.load())
- return EntryRef(Filename, Entry);
+ return true;
std::lock_guard<std::mutex> GuardLock(Contents->ValueLock);
// Double-checked locking.
if (Contents->DepDirectives.load())
- return EntryRef(Filename, Entry);
+ return true;
SmallVector<dependency_directives_scan::Directive, 64> Directives;
// Scan the file for preprocessor directives that might affect the
@@ -69,16 +70,16 @@ EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
Contents->DepDirectiveTokens.clear();
// FIXME: Propagate the diagnostic if desired by the client.
Contents->DepDirectives.store(new std::optional<DependencyDirectivesTy>());
- return EntryRef(Filename, Entry);
+ return false;
}
// This function performed double-checked locking using `DepDirectives`.
// Assigning it must be the last thing this function does, otherwise other
- // threads may skip the
- // critical section (`DepDirectives != nullptr`), leading to a data race.
+ // threads may skip the critical section (`DepDirectives != nullptr`), leading
+ // to a data race.
Contents->DepDirectives.store(
new std::optional<DependencyDirectivesTy>(std::move(Directives)));
- return EntryRef(Filename, Entry);
+ return true;
}
DependencyScanningFilesystemSharedCache::
@@ -161,34 +162,11 @@ DependencyScanningFilesystemSharedCache::CacheShard::
return *EntriesByFilename.insert({Filename, &Entry}).first->getValue();
}
-/// Whitelist file extensions that should be minimized, treating no extension as
-/// a source file that should be minimized.
-///
-/// This is kinda hacky, it would be better if we knew what kind of file Clang
-/// was expecting instead.
-static bool shouldScanForDirectivesBasedOnExtension(StringRef Filename) {
- StringRef Ext = llvm::sys::path::extension(Filename);
- if (Ext.empty())
- return true; // C++ standard library
- return llvm::StringSwitch<bool>(Ext)
- .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
- .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
- .CasesLower(".m", ".mm", true)
- .CasesLower(".i", ".ii", ".mi", ".mmi", true)
- .CasesLower(".def", ".inc", true)
- .Default(false);
-}
-
static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return false; // This may be the module cache directory.
- // Only cache stat failures on files that are not expected to change during
- // the build.
- StringRef FName = llvm::sys::path::filename(Filename);
- if (FName == "module.modulemap" || FName == "module.map")
- return true;
- return shouldScanForDirectivesBasedOnExtension(Filename);
+ return true;
}
DependencyScanningWorkerFilesystem::DependencyScanningWorkerFilesystem(
@@ -201,11 +179,6 @@ DependencyScanningWorkerFilesystem::DependencyScanningWorkerFilesystem(
updateWorkingDirForCacheLookup();
}
-bool DependencyScanningWorkerFilesystem::shouldScanForDirectives(
- StringRef Filename) {
- return shouldScanForDirectivesBasedOnExtension(Filename);
-}
-
const CachedFileSystemEntry &
DependencyScanningWorkerFilesystem::getOrEmplaceSharedEntryForUID(
TentativeEntry TEntry) {
@@ -259,7 +232,7 @@ DependencyScanningWorkerFilesystem::computeAndStoreResult(
llvm::ErrorOr<EntryRef>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
- StringRef OriginalFilename, bool DisableDirectivesScanning) {
+ StringRef OriginalFilename) {
StringRef FilenameForLookup;
SmallString<256> PathBuf;
if (llvm::sys::path::is_absolute_gnu(OriginalFilename)) {
@@ -276,15 +249,11 @@ DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
assert(llvm::sys::path::is_absolute_gnu(FilenameForLookup));
if (const auto *Entry =
findEntryByFilenameWithWriteThrough(FilenameForLookup))
- return scanForDirectivesIfNecessary(*Entry, OriginalFilename,
- DisableDirectivesScanning)
- .unwrapError();
+ return EntryRef(OriginalFilename, *Entry).unwrapError();
auto MaybeEntry = computeAndStoreResult(OriginalFilename, FilenameForLookup);
if (!MaybeEntry)
return MaybeEntry.getError();
- return scanForDirectivesIfNecessary(*MaybeEntry, OriginalFilename,
- DisableDirectivesScanning)
- .unwrapError();
+ return EntryRef(OriginalFilename, *MaybeEntry).unwrapError();
}
llvm::ErrorOr<llvm::vfs::Status>
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 76f3d950a13b..492b8f1e2b38 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -363,19 +363,22 @@ public:
PrebuiltModuleVFSMap, ScanInstance.getDiagnostics()))
return false;
- // Use the dependency scanning optimized file system if requested to do so.
- if (DepFS) {
- llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> LocalDepFS =
- DepFS;
- ScanInstance.getPreprocessorOpts().DependencyDirectivesForFile =
- [LocalDepFS = std::move(LocalDepFS)](FileEntryRef File)
- -> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
- if (llvm::ErrorOr<EntryRef> Entry =
- LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
- return Entry->getDirectiveTokens();
- return std::nullopt;
- };
- }
+ auto AdjustCI = [&](CompilerInstance &CI) {
+ // Set up the dependency scanning file system callback if requested.
+ if (DepFS) {
+ auto GetDependencyDirectives = [LocalDepFS = DepFS](FileEntryRef File)
+ -> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
+ if (llvm::ErrorOr<EntryRef> Entry =
+ LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
+ if (LocalDepFS->ensureDirectiveTokensArePopulated(*Entry))
+ return Entry->getDirectiveTokens();
+ return std::nullopt;
+ };
+
+ CI.getPreprocessor().setDependencyDirectivesFn(
+ std::move(GetDependencyDirectives));
+ }
+ };
// Create the dependency collector that will collect the produced
// dependencies.
@@ -427,9 +430,11 @@ public:
std::unique_ptr<FrontendAction> Action;
if (ModuleName)
- Action = std::make_unique<GetDependenciesByModuleNameAction>(*ModuleName);
+ Action = std::make_unique<GetDependenciesByModuleNameAction>(
+ *ModuleName, std::move(AdjustCI));
else
- Action = std::make_unique<ReadPCHAndPreprocessAction>();
+ Action =
+ std::make_unique<ReadPCHAndPreprocessAction>(std::move(AdjustCI));
if (ScanInstance.getDiagnostics().hasErrorOccurred())
return false;
diff --git a/clang/test/APINotes/Inputs/APINotes/SomeOtherKit.apinotes b/clang/test/APINotes/Inputs/APINotes/SomeOtherKit.apinotes
new file mode 100644
index 000000000000..ccdc4e15d34d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/APINotes/SomeOtherKit.apinotes
@@ -0,0 +1,8 @@
+Name: SomeOtherKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "methodB"
+ MethodKind: Instance
+ Availability: none
+ AvailabilityMsg: "anything but this"
diff --git a/clang/test/APINotes/Inputs/BrokenHeaders/APINotes.apinotes b/clang/test/APINotes/Inputs/BrokenHeaders/APINotes.apinotes
new file mode 100644
index 000000000000..cd5475b13423
--- /dev/null
+++ b/clang/test/APINotes/Inputs/BrokenHeaders/APINotes.apinotes
@@ -0,0 +1,5 @@
+Name: SomeBrokenLib
+Functions:
+ - Name: do_something_with_pointers
+ Nu llabilityOfRet: O
+ # the space is intentional, to make sure we don't crash on malformed API Notes
diff --git a/clang/test/APINotes/Inputs/BrokenHeaders/SomeBrokenLib.h b/clang/test/APINotes/Inputs/BrokenHeaders/SomeBrokenLib.h
new file mode 100644
index 000000000000..b09c6f63eae0
--- /dev/null
+++ b/clang/test/APINotes/Inputs/BrokenHeaders/SomeBrokenLib.h
@@ -0,0 +1,6 @@
+#ifndef SOME_BROKEN_LIB_H
+#define SOME_BROKEN_LIB_H
+
+void do_something_with_pointers(int *ptr1, int *ptr2);
+
+#endif // SOME_BROKEN_LIB_H
diff --git a/clang/test/APINotes/Inputs/BrokenHeaders2/APINotes.apinotes b/clang/test/APINotes/Inputs/BrokenHeaders2/APINotes.apinotes
new file mode 100644
index 000000000000..33eeaaada999
--- /dev/null
+++ b/clang/test/APINotes/Inputs/BrokenHeaders2/APINotes.apinotes
@@ -0,0 +1,7 @@
+Name: SomeBrokenLib
+Functions:
+ - Name: do_something_with_pointers
+ NullabilityOfRet: O
+ - Name: do_something_with_pointers
+ NullabilityOfRet: O
+
diff --git a/clang/test/APINotes/Inputs/BrokenHeaders2/SomeBrokenLib.h b/clang/test/APINotes/Inputs/BrokenHeaders2/SomeBrokenLib.h
new file mode 100644
index 000000000000..b09c6f63eae0
--- /dev/null
+++ b/clang/test/APINotes/Inputs/BrokenHeaders2/SomeBrokenLib.h
@@ -0,0 +1,6 @@
+#ifndef SOME_BROKEN_LIB_H
+#define SOME_BROKEN_LIB_H
+
+void do_something_with_pointers(int *ptr1, int *ptr2);
+
+#endif // SOME_BROKEN_LIB_H
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Headers/FrameworkWithActualPrivateModule.h b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Headers/FrameworkWithActualPrivateModule.h
new file mode 100644
index 000000000000..523de4f7ce08
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Headers/FrameworkWithActualPrivateModule.h
@@ -0,0 +1 @@
+extern int FrameworkWithActualPrivateModule;
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..859d723716be
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module FrameworkWithActualPrivateModule {
+ umbrella header "FrameworkWithActualPrivateModule.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.private.modulemap b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.private.modulemap
new file mode 100644
index 000000000000..e7fafe3bcbb1
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/Modules/module.private.modulemap
@@ -0,0 +1,5 @@
+framework module FrameworkWithActualPrivateModule_Private {
+ umbrella header "FrameworkWithActualPrivateModule_Private.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.apinotes b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.apinotes
new file mode 100644
index 000000000000..831cf1e93d35
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.apinotes
@@ -0,0 +1 @@
+Name: FrameworkWithActualPrivateModule_Private
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.h b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.h
new file mode 100644
index 000000000000..c07a3e95d740
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithActualPrivateModule.framework/PrivateHeaders/FrameworkWithActualPrivateModule_Private.h
@@ -0,0 +1,2 @@
+#include <FrameworkWithActualPrivateModule/FrameworkWithActualPrivateModule.h>
+extern int FrameworkWithActualPrivateModule_Private;
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Headers/FrameworkWithWrongCase.h b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Headers/FrameworkWithWrongCase.h
new file mode 100644
index 000000000000..4f3b631c27e3
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Headers/FrameworkWithWrongCase.h
@@ -0,0 +1 @@
+extern int FrameworkWithWrongCase;
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..e97d361039a1
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module FrameworkWithWrongCase {
+ umbrella header "FrameworkWithWrongCase.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/PrivateHeaders/FrameworkWithWrongCase_Private.apinotes b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/PrivateHeaders/FrameworkWithWrongCase_Private.apinotes
new file mode 100644
index 000000000000..ae5447c61e33
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCase.framework/PrivateHeaders/FrameworkWithWrongCase_Private.apinotes
@@ -0,0 +1 @@
+Name: FrameworkWithWrongCase
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Headers/FrameworkWithWrongCasePrivate.h b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Headers/FrameworkWithWrongCasePrivate.h
new file mode 100644
index 000000000000..d3d61483191c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Headers/FrameworkWithWrongCasePrivate.h
@@ -0,0 +1 @@
+extern int FrameworkWithWrongCasePrivate;
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..04b96adbbfeb
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module FrameworkWithWrongCasePrivate {
+ umbrella header "FrameworkWithWrongCasePrivate.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.private.modulemap b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.private.modulemap
new file mode 100644
index 000000000000..d6ad53cdc717
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/Modules/module.private.modulemap
@@ -0,0 +1 @@
+module FrameworkWithWrongCasePrivate.Inner {}
diff --git a/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/PrivateHeaders/FrameworkWithWrongCasePrivate_Private.apinotes b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/PrivateHeaders/FrameworkWithWrongCasePrivate_Private.apinotes
new file mode 100644
index 000000000000..d7af293e8125
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/FrameworkWithWrongCasePrivate.framework/PrivateHeaders/FrameworkWithWrongCasePrivate_Private.apinotes
@@ -0,0 +1 @@
+Name: FrameworkWithWrongCasePrivate
diff --git a/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Headers/LayeredKit.h b/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Headers/LayeredKit.h
new file mode 100644
index 000000000000..a95d19ecbe9a
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Headers/LayeredKit.h
@@ -0,0 +1,11 @@
+@import LayeredKitImpl;
+
+// @interface declarations already don't inherit attributes from forward
+// declarations, so in order to test this properly we have to /not/ define
+// UpwardClass anywhere.
+
+// @interface UpwardClass
+// @end
+
+@protocol UpwardProto
+@end
diff --git a/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..04bbe72a2b6e
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/LayeredKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module LayeredKit {
+ umbrella header "LayeredKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.apinotes b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.apinotes
new file mode 100644
index 000000000000..bece28cfe605
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.apinotes
@@ -0,0 +1,9 @@
+Name: LayeredKitImpl
+Classes:
+- Name: PerfectlyNormalClass
+ Availability: none
+- Name: UpwardClass
+ Availability: none
+Protocols:
+- Name: UpwardProto
+ Availability: none
diff --git a/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.h b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.h
new file mode 100644
index 000000000000..99591d35803a
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Headers/LayeredKitImpl.h
@@ -0,0 +1,7 @@
+@protocol UpwardProto;
+@class UpwardClass;
+
+@interface PerfectlyNormalClass
+@end
+
+void doImplementationThings(UpwardClass *first, id <UpwardProto> second) __attribute((unavailable));
diff --git a/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..58a6e55c1067
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/LayeredKitImpl.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module LayeredKitImpl {
+ umbrella header "LayeredKitImpl.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/SimpleKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/SimpleKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..2d07e76c0a14
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SimpleKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module SimpleKit {
+ umbrella header "SimpleKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit.apinotes b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit.apinotes
new file mode 100644
index 000000000000..817af123fc77
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit.apinotes
@@ -0,0 +1,74 @@
+Name: SomeKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "transform:"
+ MethodKind: Instance
+ Availability: none
+ AvailabilityMsg: "anything but this"
+ - Selector: "transform:integer:"
+ MethodKind: Instance
+ NullabilityOfRet: N
+ Nullability: [ N, S ]
+ Properties:
+ - Name: intValue
+ PropertyKind: Instance
+ Availability: none
+ AvailabilityMsg: "wouldn't work anyway"
+ - Name: nonnullAInstance
+ PropertyKind: Instance
+ Nullability: N
+ - Name: nonnullAClass
+ PropertyKind: Class
+ Nullability: N
+ - Name: nonnullABoth
+ Nullability: N
+ - Name: B
+ Availability: none
+ AvailabilityMsg: "just don't"
+ - Name: C
+ Methods:
+ - Selector: "initWithA:"
+ MethodKind: Instance
+ DesignatedInit: true
+ - Name: OverriddenTypes
+ Methods:
+ - Selector: "methodToMangle:second:"
+ MethodKind: Instance
+ ResultType: 'char *'
+ Parameters:
+ - Position: 0
+ Type: 'SOMEKIT_DOUBLE *'
+ - Position: 1
+ Type: 'float *'
+ Properties:
+ - Name: intPropertyToMangle
+ PropertyKind: Instance
+ Type: 'double *'
+Functions:
+ - Name: global_int_fun
+ ResultType: 'char *'
+ Parameters:
+ - Position: 0
+ Type: 'double *'
+ - Position: 1
+ Type: 'float *'
+Globals:
+ - Name: global_int_ptr
+ Type: 'double *'
+SwiftVersions:
+ - Version: 3.0
+ Classes:
+ - Name: A
+ Methods:
+ - Selector: "transform:integer:"
+ MethodKind: Instance
+ NullabilityOfRet: O
+ Nullability: [ O, S ]
+ Properties:
+ - Name: explicitNonnullInstance
+ PropertyKind: Instance
+ Nullability: O
+ - Name: explicitNullableInstance
+ PropertyKind: Instance
+ Nullability: N
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit_private.apinotes b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit_private.apinotes
new file mode 100644
index 000000000000..28ede9dfa25c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/APINotes/SomeKit_private.apinotes
@@ -0,0 +1,15 @@
+Name: SomeKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "privateTransform:input:"
+ MethodKind: Instance
+ NullabilityOfRet: N
+ Nullability: [ N, S ]
+ Properties:
+ - Name: internalProperty
+ Nullability: N
+Protocols:
+ - Name: InternalProtocol
+ Availability: none
+ AvailabilityMsg: "not for you"
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Headers/SomeKitForNullAnnotation.h b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Headers/SomeKitForNullAnnotation.h
new file mode 100644
index 000000000000..bc0c5da8848e
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Headers/SomeKitForNullAnnotation.h
@@ -0,0 +1,55 @@
+#ifndef SOMEKIT_H
+#define SOMEKIT_H
+
+#define ROOT_CLASS __attribute__((objc_root_class))
+
+ROOT_CLASS
+@interface A
+-(A*)transform:(A*)input;
+-(A*)transform:(A*)input integer:(int)integer;
+
+@property (nonatomic, readonly, retain) A* someA;
+@property (nonatomic, retain) A* someOtherA;
+
+@property (nonatomic) int intValue;
+@end
+
+@interface B : A
+@end
+
+@interface C : A
+- (instancetype)init;
+- (instancetype)initWithA:(A*)a;
+@end
+
+
+@interface MyClass : A
+- Inst;
++ Clas;
+@end
+
+struct CGRect {
+ float origin;
+ float size;
+};
+typedef struct CGRect NSRect;
+
+@interface I
+- (void) Meth : (NSRect[4])exposedRects;
+- (void) Meth1 : (const I*)exposedRects;
+- (void) Meth2 : (const I*)exposedRects;
+- (void) Meth3 : (I*)exposedRects;
+- (const I*) Meth4;
+- (const I*) Meth5 : (int) Arg1 : (const I*)Arg2 : (double)Arg3 : (const I*) Arg4 :(const volatile id) Arg5;
+- (volatile const I*) Meth6 : (const char *)Arg1 : (const char *)Arg2 : (double)Arg3 : (const I*) Arg4 :(const volatile id) Arg5;
+@end
+
+@class NSURL, NSArray, NSError;
+@interface INTF_BLOCKS
+ + (void)getNonLocalVersionsOfItemAtURL:(NSURL *)url completionHandler:(void (^)(NSArray *nonLocalFileVersions, NSError *error))completionHandler;
+ + (void *)getNonLocalVersionsOfItemAtURL2:(NSURL *)url completionHandler:(void (^)(NSArray *nonLocalFileVersions, NSError *error))completionHandler;
+ + (NSError **)getNonLocalVersionsOfItemAtURL3:(int)url completionHandler:(void (^)(NSArray *nonLocalFileVersions, NSError *error))completionHandler;
+ + (id)getNonLocalVersionsOfItemAtURL4:(NSURL *)url completionHandler:(void (^)(int nonLocalFileVersions, NSError *error, NSURL*))completionHandler;
+@end
+
+#endif
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..3abee2df0be1
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module SomeKit {
+ umbrella header "SomeKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.private.modulemap b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.private.modulemap
new file mode 100644
index 000000000000..bbda9d08e399
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module.private.modulemap
@@ -0,0 +1,8 @@
+module SomeKit.Private {
+ header "SomeKit_Private.h"
+ export *
+
+ explicit module NullAnnotation {
+ header "SomeKit_PrivateForNullAnnotation.h"
+ }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module_private.modulemap b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module_private.modulemap
new file mode 100644
index 000000000000..e31034317cb8
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/Modules/module_private.modulemap
@@ -0,0 +1,8 @@
+explicit framework module SomeKit.Private {
+ header "SomeKit_Private.h"
+ explicit NullAnnotation { header "SomeKit_PrivateForNullAnnotation.h" }
+ export *
+ module * { export * }
+syntax error
+
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_Private.h b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_Private.h
new file mode 100644
index 000000000000..c7611123e4ad
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_Private.h
@@ -0,0 +1,16 @@
+#ifndef SOMEKIT_PRIVATE_H
+#define SOMEKIT_PRIVATE_H
+
+#import <SomeKit/SomeKit.h>
+
+@interface A(Private)
+-(A*)privateTransform:(A*)input;
+
+@property (nonatomic) A* internalProperty;
+@end
+
+@protocol InternalProtocol
+@end
+
+#endif
+
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_PrivateForNullAnnotation.h b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_PrivateForNullAnnotation.h
new file mode 100644
index 000000000000..bae4456b4080
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_PrivateForNullAnnotation.h
@@ -0,0 +1,17 @@
+#ifndef SOMEKIT_PRIVATE_H
+#define SOMEKIT_PRIVATE_H
+
+#import <SomeKit/SomeKitForNullAnnotation.h>
+
+@interface A(Private)
+-(A*)privateTransform:(A*)input;
+
+@property (nonatomic) A* internalProperty;
+@end
+
+@protocol InternalProtocol
+- (id) MomeMethod;
+@end
+
+#endif
+
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_private.apinotes b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_private.apinotes
new file mode 100644
index 000000000000..28ede9dfa25c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeKit.framework/PrivateHeaders/SomeKit_private.apinotes
@@ -0,0 +1,15 @@
+Name: SomeKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "privateTransform:input:"
+ MethodKind: Instance
+ NullabilityOfRet: N
+ Nullability: [ N, S ]
+ Properties:
+ - Name: internalProperty
+ Nullability: N
+Protocols:
+ - Name: InternalProtocol
+ Availability: none
+ AvailabilityMsg: "not for you"
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/APINotes/SomeOtherKit.apinotes b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/APINotes/SomeOtherKit.apinotes
new file mode 100644
index 000000000000..2ad546b8f8bc
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/APINotes/SomeOtherKit.apinotes
@@ -0,0 +1,8 @@
+Name: SomeOtherKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "methodA"
+ MethodKind: Instance
+ Availability: none
+ AvailabilityMsg: "anything but this"
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.apinotes b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.apinotes
new file mode 100644
index 000000000000..2ad546b8f8bc
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.apinotes
@@ -0,0 +1,8 @@
+Name: SomeOtherKit
+Classes:
+ - Name: A
+ Methods:
+ - Selector: "methodA"
+ MethodKind: Instance
+ Availability: none
+ AvailabilityMsg: "anything but this"
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.h b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.h
new file mode 100644
index 000000000000..3911d765230c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.h
@@ -0,0 +1,9 @@
+#ifndef SOME_OTHER_KIT_H
+
+__attribute__((objc_root_class))
+@interface A
+-(void)methodA;
+-(void)methodB;
+@end
+
+#endif
diff --git a/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..0aaad92e041c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/SomeOtherKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module SomeOtherKit {
+ umbrella header "SomeOtherKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit.h b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit.h
new file mode 100644
index 000000000000..d3376f1dac5d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit.h
@@ -0,0 +1 @@
+extern int TopLevelPrivateKit_Public;
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit_Private.apinotes b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit_Private.apinotes
new file mode 100644
index 000000000000..ece1dd220adf
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Headers/TopLevelPrivateKit_Private.apinotes
@@ -0,0 +1 @@
+garbage here because this file shouldn't get read \ No newline at end of file
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..70faa54e8347
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module TopLevelPrivateKit {
+ umbrella header "TopLevelPrivateKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.private.modulemap b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.private.modulemap
new file mode 100644
index 000000000000..0958a14d6710
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/Modules/module.private.modulemap
@@ -0,0 +1,5 @@
+framework module TopLevelPrivateKit_Private {
+ umbrella header "TopLevelPrivateKit_Private.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit.apinotes b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit.apinotes
new file mode 100644
index 000000000000..908dae0e3b0b
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit.apinotes
@@ -0,0 +1 @@
+garbage here because this file shouldn't get read
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.apinotes b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.apinotes
new file mode 100644
index 000000000000..43323621588b
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.apinotes
@@ -0,0 +1,4 @@
+Name: TopLevelPrivateKit_Private
+Globals:
+- Name: TopLevelPrivateKit_Private
+ Type: float
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.h b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.h
new file mode 100644
index 000000000000..39cbfe6e9918
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private.h
@@ -0,0 +1 @@
+extern int TopLevelPrivateKit_Private;
diff --git a/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private_private.apinotes b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private_private.apinotes
new file mode 100644
index 000000000000..ece1dd220adf
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/TopLevelPrivateKit.framework/PrivateHeaders/TopLevelPrivateKit_Private_private.apinotes
@@ -0,0 +1 @@
+garbage here because this file shouldn't get read \ No newline at end of file
diff --git a/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.apinotes b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.apinotes
new file mode 100644
index 000000000000..572c714b3d61
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.apinotes
@@ -0,0 +1,156 @@
+Name: VersionedKit
+Classes:
+ - Name: TestProperties
+ SwiftObjCMembers: true
+ Properties:
+ - Name: accessorsOnly
+ PropertyKind: Instance
+ SwiftImportAsAccessors: true
+ - Name: accessorsOnlyForClass
+ PropertyKind: Class
+ SwiftImportAsAccessors: true
+ - Name: accessorsOnlyExceptInVersion3
+ PropertyKind: Instance
+ SwiftImportAsAccessors: true
+ - Name: accessorsOnlyForClassExceptInVersion3
+ PropertyKind: Class
+ SwiftImportAsAccessors: true
+Functions:
+ - Name: unversionedRenameDUMP
+ SwiftName: 'unversionedRename_NOTES()'
+Tags:
+ - Name: APINotedFlagEnum
+ FlagEnum: true
+ - Name: APINotedOpenEnum
+ EnumExtensibility: open
+ - Name: APINotedClosedEnum
+ EnumExtensibility: closed
+ - Name: SoonToBeCFEnum
+ EnumKind: CFEnum
+ - Name: SoonToBeNSEnum
+ EnumKind: NSEnum
+ - Name: SoonToBeCFOptions
+ EnumKind: CFOptions
+ - Name: SoonToBeNSOptions
+ EnumKind: NSOptions
+ - Name: SoonToBeCFClosedEnum
+ EnumKind: CFClosedEnum
+ - Name: SoonToBeNSClosedEnum
+ EnumKind: NSClosedEnum
+ - Name: UndoAllThatHasBeenDoneToMe
+ EnumKind: none
+Typedefs:
+ - Name: MultiVersionedTypedef34Notes
+ SwiftName: MultiVersionedTypedef34Notes_NEW
+ - Name: MultiVersionedTypedef345Notes
+ SwiftName: MultiVersionedTypedef345Notes_NEW
+ - Name: MultiVersionedTypedef4Notes
+ SwiftName: MultiVersionedTypedef4Notes_NEW
+ - Name: MultiVersionedTypedef45Notes
+ SwiftName: MultiVersionedTypedef45Notes_NEW
+SwiftVersions:
+ - Version: 3.0
+ Classes:
+ - Name: MyReferenceType
+ SwiftBridge: ''
+ - Name: TestGenericDUMP
+ SwiftImportAsNonGeneric: true
+ - Name: TestProperties
+ SwiftObjCMembers: false
+ Properties:
+ - Name: accessorsOnlyInVersion3
+ PropertyKind: Instance
+ SwiftImportAsAccessors: true
+ - Name: accessorsOnlyForClassInVersion3
+ PropertyKind: Class
+ SwiftImportAsAccessors: true
+ - Name: accessorsOnlyExceptInVersion3
+ PropertyKind: Instance
+ SwiftImportAsAccessors: false
+ - Name: accessorsOnlyForClassExceptInVersion3
+ PropertyKind: Class
+ SwiftImportAsAccessors: false
+ - Name: Swift3RenamedOnlyDUMP
+ SwiftName: SpecialSwift3Name
+ - Name: Swift3RenamedAlsoDUMP
+ SwiftName: SpecialSwift3Also
+ Functions:
+ - Name: moveToPointDUMP
+ SwiftName: 'moveTo(a:b:)'
+ - Name: acceptClosure
+ Parameters:
+ - Position: 0
+ NoEscape: false
+ - Name: privateFunc
+ SwiftPrivate: false
+ Tags:
+ - Name: MyErrorCode
+ NSErrorDomain: ''
+ - Name: NewlyFlagEnum
+ FlagEnum: false
+ - Name: OpenToClosedEnum
+ EnumExtensibility: open
+ - Name: ClosedToOpenEnum
+ EnumExtensibility: closed
+ - Name: NewlyClosedEnum
+ EnumExtensibility: none
+ - Name: NewlyOpenEnum
+ EnumExtensibility: none
+ Typedefs:
+ - Name: MyDoubleWrapper
+ SwiftWrapper: none
+ - Name: MultiVersionedTypedef34
+ SwiftName: MultiVersionedTypedef34_3
+ - Name: MultiVersionedTypedef34Header
+ SwiftName: MultiVersionedTypedef34Header_3
+ - Name: MultiVersionedTypedef34Notes
+ SwiftName: MultiVersionedTypedef34Notes_3
+ - Name: MultiVersionedTypedef345
+ SwiftName: MultiVersionedTypedef345_3
+ - Name: MultiVersionedTypedef345Header
+ SwiftName: MultiVersionedTypedef345Header_3
+ - Name: MultiVersionedTypedef345Notes
+ SwiftName: MultiVersionedTypedef345Notes_3
+ - Version: 5
+ Typedefs:
+ - Name: MultiVersionedTypedef345
+ SwiftName: MultiVersionedTypedef345_5
+ - Name: MultiVersionedTypedef345Header
+ SwiftName: MultiVersionedTypedef345Header_5
+ - Name: MultiVersionedTypedef345Notes
+ SwiftName: MultiVersionedTypedef345Notes_5
+ - Name: MultiVersionedTypedef45
+ SwiftName: MultiVersionedTypedef45_5
+ - Name: MultiVersionedTypedef45Header
+ SwiftName: MultiVersionedTypedef45Header_5
+ - Name: MultiVersionedTypedef45Notes
+ SwiftName: MultiVersionedTypedef45Notes_5
+ - Version: 4 # Versions are deliberately ordered as "3, 5, 4" to catch bugs.
+ Classes:
+ - Name: Swift4RenamedDUMP
+ SwiftName: SpecialSwift4Name
+ Typedefs:
+ - Name: MultiVersionedTypedef34
+ SwiftName: MultiVersionedTypedef34_4
+ - Name: MultiVersionedTypedef34Header
+ SwiftName: MultiVersionedTypedef34Header_4
+ - Name: MultiVersionedTypedef34Notes
+ SwiftName: MultiVersionedTypedef34Notes_4
+ - Name: MultiVersionedTypedef345
+ SwiftName: MultiVersionedTypedef345_4
+ - Name: MultiVersionedTypedef345Header
+ SwiftName: MultiVersionedTypedef345Header_4
+ - Name: MultiVersionedTypedef345Notes
+ SwiftName: MultiVersionedTypedef345Notes_4
+ - Name: MultiVersionedTypedef4
+ SwiftName: MultiVersionedTypedef4_4
+ - Name: MultiVersionedTypedef4Header
+ SwiftName: MultiVersionedTypedef4Header_4
+ - Name: MultiVersionedTypedef4Notes
+ SwiftName: MultiVersionedTypedef4Notes_4
+ - Name: MultiVersionedTypedef45
+ SwiftName: MultiVersionedTypedef45_4
+ - Name: MultiVersionedTypedef45Header
+ SwiftName: MultiVersionedTypedef45Header_4
+ - Name: MultiVersionedTypedef45Notes
+ SwiftName: MultiVersionedTypedef45Notes_4
diff --git a/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.h b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.h
new file mode 100644
index 000000000000..9ce95633c523
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Headers/VersionedKit.h
@@ -0,0 +1,137 @@
+void moveToPointDUMP(double x, double y) __attribute__((swift_name("moveTo(x:y:)")));
+
+void unversionedRenameDUMP(void) __attribute__((swift_name("unversionedRename_HEADER()")));
+
+void acceptClosure(void (^ __attribute__((noescape)) block)(void));
+
+void privateFunc(void) __attribute__((swift_private));
+
+typedef double MyDoubleWrapper __attribute__((swift_wrapper(struct)));
+
+#if __OBJC__
+@class NSString;
+
+extern NSString *MyErrorDomain;
+
+enum __attribute__((ns_error_domain(MyErrorDomain))) MyErrorCode {
+ MyErrorCodeFailed = 1
+};
+
+__attribute__((swift_bridge("MyValueType")))
+@interface MyReferenceType
+@end
+
+@interface TestProperties
+@property (nonatomic, readwrite, retain) id accessorsOnly;
+@property (nonatomic, readwrite, retain, class) id accessorsOnlyForClass;
+
+@property (nonatomic, readwrite, retain) id accessorsOnlyInVersion3;
+@property (nonatomic, readwrite, retain, class) id accessorsOnlyForClassInVersion3;
+
+@property (nonatomic, readwrite, retain) id accessorsOnlyExceptInVersion3;
+@property (nonatomic, readwrite, retain, class) id accessorsOnlyForClassExceptInVersion3;
+@end
+
+@interface Base
+@end
+
+@interface TestGenericDUMP<Element> : Base
+- (Element)element;
+@end
+
+@interface Swift3RenamedOnlyDUMP
+@end
+
+__attribute__((swift_name("Swift4Name")))
+@interface Swift3RenamedAlsoDUMP
+@end
+
+@interface Swift4RenamedDUMP
+@end
+
+#endif
+
+
+enum __attribute__((flag_enum)) FlagEnum {
+ FlagEnumA = 1,
+ FlagEnumB = 2
+};
+
+enum __attribute__((flag_enum)) NewlyFlagEnum {
+ NewlyFlagEnumA = 1,
+ NewlyFlagEnumB = 2
+};
+
+enum APINotedFlagEnum {
+ APINotedFlagEnumA = 1,
+ APINotedFlagEnumB = 2
+};
+
+
+enum __attribute__((enum_extensibility(open))) OpenEnum {
+ OpenEnumA = 1,
+};
+
+enum __attribute__((enum_extensibility(open))) NewlyOpenEnum {
+ NewlyOpenEnumA = 1,
+};
+
+enum __attribute__((enum_extensibility(closed))) NewlyClosedEnum {
+ NewlyClosedEnumA = 1,
+};
+
+enum __attribute__((enum_extensibility(open))) ClosedToOpenEnum {
+ ClosedToOpenEnumA = 1,
+};
+
+enum __attribute__((enum_extensibility(closed))) OpenToClosedEnum {
+ OpenToClosedEnumA = 1,
+};
+
+enum APINotedOpenEnum {
+ APINotedOpenEnumA = 1,
+};
+
+enum APINotedClosedEnum {
+ APINotedClosedEnumA = 1,
+};
+
+
+enum SoonToBeCFEnum {
+ SoonToBeCFEnumA = 1
+};
+enum SoonToBeNSEnum {
+ SoonToBeNSEnumA = 1
+};
+enum SoonToBeCFOptions {
+ SoonToBeCFOptionsA = 1
+};
+enum SoonToBeNSOptions {
+ SoonToBeNSOptionsA = 1
+};
+enum SoonToBeCFClosedEnum {
+ SoonToBeCFClosedEnumA = 1
+};
+enum SoonToBeNSClosedEnum {
+ SoonToBeNSClosedEnumA = 1
+};
+enum UndoAllThatHasBeenDoneToMe {
+ UndoAllThatHasBeenDoneToMeA = 1
+} __attribute__((flag_enum)) __attribute__((enum_extensibility(closed)));
+
+
+typedef int MultiVersionedTypedef4;
+typedef int MultiVersionedTypedef4Notes;
+typedef int MultiVersionedTypedef4Header __attribute__((swift_name("MultiVersionedTypedef4Header_NEW")));
+
+typedef int MultiVersionedTypedef34;
+typedef int MultiVersionedTypedef34Notes;
+typedef int MultiVersionedTypedef34Header __attribute__((swift_name("MultiVersionedTypedef34Header_NEW")));
+
+typedef int MultiVersionedTypedef45;
+typedef int MultiVersionedTypedef45Notes;
+typedef int MultiVersionedTypedef45Header __attribute__((swift_name("MultiVersionedTypedef45Header_NEW")));
+
+typedef int MultiVersionedTypedef345;
+typedef int MultiVersionedTypedef345Notes;
+typedef int MultiVersionedTypedef345Header __attribute__((swift_name("MultiVersionedTypedef345Header_NEW")));
diff --git a/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Modules/module.modulemap b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Modules/module.modulemap
new file mode 100644
index 000000000000..6d957fd68009
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Frameworks/VersionedKit.framework/Modules/module.modulemap
@@ -0,0 +1,5 @@
+framework module VersionedKit {
+ umbrella header "VersionedKit.h"
+ export *
+ module * { export * }
+}
diff --git a/clang/test/APINotes/Inputs/Headers/APINotes.apinotes b/clang/test/APINotes/Inputs/Headers/APINotes.apinotes
new file mode 100644
index 000000000000..08210fc70565
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/APINotes.apinotes
@@ -0,0 +1,18 @@
+Name: HeaderLib
+SwiftInferImportAsMember: true
+Functions:
+ - Name: custom_realloc
+ NullabilityOfRet: N
+ Nullability: [ N, S ]
+ - Name: unavailable_function
+ Availability: none
+ AvailabilityMsg: "I beg you not to use this"
+ - Name: do_something_with_pointers
+ NullabilityOfRet: O
+ Nullability: [ N, O ]
+
+Globals:
+ - Name: global_int
+ Nullability: N
+ - Name: unavailable_global_int
+ Availability: none
diff --git a/clang/test/APINotes/Inputs/Headers/BrokenTypes.apinotes b/clang/test/APINotes/Inputs/Headers/BrokenTypes.apinotes
new file mode 100644
index 000000000000..00f7b5074e98
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/BrokenTypes.apinotes
@@ -0,0 +1,10 @@
+Name: BrokenTypes
+Functions:
+ - Name: break_me_function
+ ResultType: 'int * with extra junk'
+ Parameters:
+ - Position: 0
+ Type: 'not_a_type'
+Globals:
+ - Name: break_me_variable
+ Type: 'double'
diff --git a/clang/test/APINotes/Inputs/Headers/BrokenTypes.h b/clang/test/APINotes/Inputs/Headers/BrokenTypes.h
new file mode 100644
index 000000000000..fee054b74cf7
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/BrokenTypes.h
@@ -0,0 +1,8 @@
+#ifndef BROKEN_TYPES_H
+#define BROKEN_TYPES_H
+
+char break_me_function(void *ptr);
+
+extern char break_me_variable;
+
+#endif // BROKEN_TYPES_H
diff --git a/clang/test/APINotes/Inputs/Headers/ExportAs.apinotes b/clang/test/APINotes/Inputs/Headers/ExportAs.apinotes
new file mode 100644
index 000000000000..14c77afd8c30
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ExportAs.apinotes
@@ -0,0 +1,5 @@
+Name: ExportAs
+Globals:
+ - Name: globalInt
+ Availability: none
+ AvailabilityMsg: "oh no"
diff --git a/clang/test/APINotes/Inputs/Headers/ExportAs.h b/clang/test/APINotes/Inputs/Headers/ExportAs.h
new file mode 100644
index 000000000000..ff490e096417
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ExportAs.h
@@ -0,0 +1 @@
+#include "ExportAsCore.h"
diff --git a/clang/test/APINotes/Inputs/Headers/ExportAsCore.h b/clang/test/APINotes/Inputs/Headers/ExportAsCore.h
new file mode 100644
index 000000000000..f7674c19935d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ExportAsCore.h
@@ -0,0 +1 @@
+static int globalInt = 123;
diff --git a/clang/test/APINotes/Inputs/Headers/ExternCtx.apinotes b/clang/test/APINotes/Inputs/Headers/ExternCtx.apinotes
new file mode 100644
index 000000000000..0f47ac6deea8
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ExternCtx.apinotes
@@ -0,0 +1,15 @@
+Name: ExternCtx
+Globals:
+ - Name: globalInExternC
+ Availability: none
+ AvailabilityMsg: "oh no"
+ - Name: globalInExternCXX
+ Availability: none
+ AvailabilityMsg: "oh no #2"
+Functions:
+ - Name: globalFuncInExternC
+ Availability: none
+ AvailabilityMsg: "oh no #3"
+ - Name: globalFuncInExternCXX
+ Availability: none
+ AvailabilityMsg: "oh no #4"
diff --git a/clang/test/APINotes/Inputs/Headers/ExternCtx.h b/clang/test/APINotes/Inputs/Headers/ExternCtx.h
new file mode 100644
index 000000000000..669d443f60ec
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ExternCtx.h
@@ -0,0 +1,11 @@
+extern "C" {
+ static int globalInExternC = 1;
+
+ static void globalFuncInExternC() {}
+}
+
+extern "C++" {
+ static int globalInExternCXX = 2;
+
+ static void globalFuncInExternCXX() {}
+}
diff --git a/clang/test/APINotes/Inputs/Headers/HeaderLib.apinotes b/clang/test/APINotes/Inputs/Headers/HeaderLib.apinotes
new file mode 100644
index 000000000000..7dcb22476a1d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/HeaderLib.apinotes
@@ -0,0 +1,37 @@
+Name: HeaderLib
+SwiftInferImportAsMember: true
+Functions:
+ - Name: custom_realloc
+ NullabilityOfRet: N
+ Nullability: [ N, S ]
+ - Name: unavailable_function
+ Availability: none
+ AvailabilityMsg: "I beg you not to use this"
+ - Name: do_something_with_pointers
+ NullabilityOfRet: O
+ Nullability: [ N, O ]
+ - Name: do_something_with_arrays
+ Parameters:
+ - Position: 0
+ Nullability: N
+ - Position: 1
+ Nullability: N
+ - Name: take_pointer_and_int
+ Parameters:
+ - Position: 0
+ Nullability: N
+ NoEscape: true
+ - Position: 1
+ NoEscape: true
+Globals:
+ - Name: global_int
+ Nullability: N
+ - Name: unavailable_global_int
+ Availability: none
+Tags:
+ - Name: unavailable_struct
+ Availability: none
+
+Typedefs:
+ - Name: unavailable_typedef
+ Availability: none
diff --git a/clang/test/APINotes/Inputs/Headers/HeaderLib.h b/clang/test/APINotes/Inputs/Headers/HeaderLib.h
new file mode 100644
index 000000000000..806524960785
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/HeaderLib.h
@@ -0,0 +1,19 @@
+#ifndef HEADER_LIB_H
+#define HEADER_LIB_H
+
+void *custom_realloc(void *member, unsigned size);
+
+int *global_int;
+
+int unavailable_function(void);
+int unavailable_global_int;
+
+void do_something_with_pointers(int *ptr1, int *ptr2);
+void do_something_with_arrays(int simple[], int nested[][2]);
+
+typedef int unavailable_typedef;
+struct unavailable_struct { int x, y, z; };
+
+void take_pointer_and_int(int *ptr1, int value);
+
+#endif
diff --git a/clang/test/APINotes/Inputs/Headers/InstancetypeModule.apinotes b/clang/test/APINotes/Inputs/Headers/InstancetypeModule.apinotes
new file mode 100644
index 000000000000..813eb506f39a
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/InstancetypeModule.apinotes
@@ -0,0 +1,10 @@
+Name: InstancetypeModule
+Classes:
+- Name: SomeBaseClass
+ Methods:
+ - Selector: instancetypeFactoryMethod
+ MethodKind: Class
+ ResultType: SomeBaseClass * _Nonnull
+ - Selector: staticFactoryMethod
+ MethodKind: Class
+ ResultType: SomeBaseClass * _Nonnull
diff --git a/clang/test/APINotes/Inputs/Headers/InstancetypeModule.h b/clang/test/APINotes/Inputs/Headers/InstancetypeModule.h
new file mode 100644
index 000000000000..767f201d9faf
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/InstancetypeModule.h
@@ -0,0 +1,10 @@
+@interface Object
+@end
+
+@interface SomeBaseClass : Object
++ (nullable instancetype)instancetypeFactoryMethod;
++ (nullable SomeBaseClass *)staticFactoryMethod;
+@end
+
+@interface SomeSubclass : SomeBaseClass
+@end
diff --git a/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase.h b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase.h
new file mode 100644
index 000000000000..867a15cae9a6
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase.h
@@ -0,0 +1 @@
+extern int ModuleWithWrongCase;
diff --git a/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate.h b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate.h
new file mode 100644
index 000000000000..aa014296ca7d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate.h
@@ -0,0 +1 @@
+extern int ModuleWithWrongCasePrivate;
diff --git a/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate_Private.apinotes b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate_Private.apinotes
new file mode 100644
index 000000000000..dc6dc50bab6e
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCasePrivate_Private.apinotes
@@ -0,0 +1 @@
+Name: ModuleWithWrongCasePrivate
diff --git a/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase_Private.apinotes b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase_Private.apinotes
new file mode 100644
index 000000000000..dc6dc50bab6e
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/ModuleWithWrongCase_Private.apinotes
@@ -0,0 +1 @@
+Name: ModuleWithWrongCasePrivate
diff --git a/clang/test/APINotes/Inputs/Headers/Namespaces.apinotes b/clang/test/APINotes/Inputs/Headers/Namespaces.apinotes
new file mode 100644
index 000000000000..e9da36787b63
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/Namespaces.apinotes
@@ -0,0 +1,53 @@
+---
+Name: Namespaces
+Globals:
+ - Name: varInInlineNamespace
+ SwiftName: swiftVarInInlineNamespace
+Functions:
+ - Name: funcInNamespace
+ SwiftName: inWrongContext()
+ - Name: funcInInlineNamespace
+ SwiftName: swiftFuncInInlineNamespace()
+Tags:
+ - Name: char_box
+ SwiftName: InWrongContext
+Namespaces:
+ - Name: Namespace1
+ Typedefs:
+ - Name: my_typedef
+ SwiftName: SwiftTypedef
+ - Name: my_using_decl
+ SwiftName: SwiftUsingDecl
+ Globals:
+ - Name: varInNamespace
+ SwiftName: swiftVarInNamespace
+ Functions:
+ - Name: funcInNamespace
+ SwiftName: swiftFuncInNamespace()
+ Tags:
+ - Name: char_box
+ SwiftName: CharBox
+ Namespaces:
+ - Name: Nested1
+ Globals:
+ - Name: varInNestedNamespace
+ SwiftName: swiftVarInNestedNamespace
+ Functions:
+ - Name: funcInNestedNamespace
+ SwiftName: swiftFuncInNestedNamespace(_:)
+ Tags:
+ - Name: char_box
+ SwiftName: NestedCharBox
+ Namespaces:
+ - Name: Namespace1
+ Tags:
+ - Name: char_box
+ SwiftName: DeepNestedCharBox
+ - Name: Nested2
+ Globals:
+ - Name: varInNestedNamespace
+ SwiftName: swiftAnotherVarInNestedNamespace
+ - Name: InlineNamespace1
+ Functions:
+ - Name: funcInInlineNamespace
+ SwiftName: shouldNotSpellOutInlineNamespaces()
diff --git a/clang/test/APINotes/Inputs/Headers/Namespaces.h b/clang/test/APINotes/Inputs/Headers/Namespaces.h
new file mode 100644
index 000000000000..6a79e996be86
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/Namespaces.h
@@ -0,0 +1,39 @@
+namespace Namespace1 { namespace Nested1 {} }
+
+namespace Namespace1 {
+static int varInNamespace = 1;
+struct char_box { char c; };
+void funcInNamespace();
+
+namespace Nested1 {
+void funcInNestedNamespace(int i);
+struct char_box {
+ char c;
+};
+}
+
+namespace Nested1 {
+static int varInNestedNamespace = 1;
+void funcInNestedNamespace(int i);
+
+namespace Namespace1 {
+struct char_box { char c; };
+} // namespace Namespace1
+} // namespace Nested1
+
+namespace Nested2 {
+static int varInNestedNamespace = 2;
+} // namespace Nested2
+
+namespace Nested1 { namespace Namespace1 {} }
+} // namespace Namespace1
+
+namespace Namespace1 {
+typedef int my_typedef;
+using my_using_decl = int;
+}
+
+inline namespace InlineNamespace1 {
+static int varInInlineNamespace = 3;
+void funcInInlineNamespace();
+}
diff --git a/clang/test/APINotes/Inputs/Headers/PrivateLib.apinotes b/clang/test/APINotes/Inputs/Headers/PrivateLib.apinotes
new file mode 100644
index 000000000000..5f62284aadca
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/PrivateLib.apinotes
@@ -0,0 +1,4 @@
+Name: HeaderLib
+Globals:
+- Name: PrivateLib
+ Type: float
diff --git a/clang/test/APINotes/Inputs/Headers/PrivateLib.h b/clang/test/APINotes/Inputs/Headers/PrivateLib.h
new file mode 100644
index 000000000000..59aeef09bdd3
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/PrivateLib.h
@@ -0,0 +1 @@
+extern int PrivateLib;
diff --git a/clang/test/APINotes/Inputs/Headers/PrivateLib_private.apinotes b/clang/test/APINotes/Inputs/Headers/PrivateLib_private.apinotes
new file mode 100644
index 000000000000..908dae0e3b0b
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/PrivateLib_private.apinotes
@@ -0,0 +1 @@
+garbage here because this file shouldn't get read
diff --git a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes
new file mode 100644
index 000000000000..5dbb83cab86b
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.apinotes
@@ -0,0 +1,9 @@
+---
+Name: SwiftImportAs
+Tags:
+- Name: ImmortalRefType
+ SwiftImportAs: reference
+- Name: RefCountedType
+ SwiftImportAs: reference
+ SwiftReleaseOp: RCRelease
+ SwiftRetainOp: RCRetain
diff --git a/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h
new file mode 100644
index 000000000000..82b8a6749c4f
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/SwiftImportAs.h
@@ -0,0 +1,6 @@
+struct ImmortalRefType {};
+
+struct RefCountedType { int value; };
+
+inline void RCRetain(RefCountedType *x) { x->value++; }
+inline void RCRelease(RefCountedType *x) { x->value--; }
diff --git a/clang/test/APINotes/Inputs/Headers/Templates.apinotes b/clang/test/APINotes/Inputs/Headers/Templates.apinotes
new file mode 100644
index 000000000000..b7336484da0c
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/Templates.apinotes
@@ -0,0 +1,5 @@
+---
+Name: Templates
+Tags:
+- Name: Box
+ SwiftImportAs: owned
diff --git a/clang/test/APINotes/Inputs/Headers/Templates.h b/clang/test/APINotes/Inputs/Headers/Templates.h
new file mode 100644
index 000000000000..862035fee363
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/Templates.h
@@ -0,0 +1,9 @@
+template <typename T>
+struct Box {
+ T value;
+
+ const T& get_value() const { return value; }
+ const T* get_ptr() const { return &value; }
+};
+
+using IntBox = Box<int>;
diff --git a/clang/test/APINotes/Inputs/Headers/module.modulemap b/clang/test/APINotes/Inputs/Headers/module.modulemap
new file mode 100644
index 000000000000..d515169184f4
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/module.modulemap
@@ -0,0 +1,45 @@
+module ExternCtx {
+ header "ExternCtx.h"
+}
+
+module ExportAsCore {
+ header "ExportAsCore.h"
+ export_as ExportAs
+}
+
+module ExportAs {
+ header "ExportAs.h"
+ export *
+}
+
+module HeaderLib {
+ header "HeaderLib.h"
+}
+
+module InstancetypeModule {
+ header "InstancetypeModule.h"
+}
+
+module BrokenTypes {
+ header "BrokenTypes.h"
+}
+
+module ModuleWithWrongCase {
+ header "ModuleWithWrongCase.h"
+}
+
+module ModuleWithWrongCasePrivate {
+ header "ModuleWithWrongCasePrivate.h"
+}
+
+module Namespaces {
+ header "Namespaces.h"
+}
+
+module Templates {
+ header "Templates.h"
+}
+
+module SwiftImportAs {
+ header "SwiftImportAs.h"
+}
diff --git a/clang/test/APINotes/Inputs/Headers/module.private.modulemap b/clang/test/APINotes/Inputs/Headers/module.private.modulemap
new file mode 100644
index 000000000000..2ecf322ed18d
--- /dev/null
+++ b/clang/test/APINotes/Inputs/Headers/module.private.modulemap
@@ -0,0 +1,5 @@
+module PrivateLib {
+ header "PrivateLib.h"
+}
+
+module ModuleWithWrongCasePrivate.Inner {}
diff --git a/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.apinotes b/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.apinotes
new file mode 100644
index 000000000000..77db84400899
--- /dev/null
+++ b/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.apinotes
@@ -0,0 +1,65 @@
+---
+Name: UIKit
+Classes:
+ - Name: UIFont
+ Methods:
+ - Selector: 'fontWithName:size:'
+ MethodKind: Instance
+ Nullability: [ N ]
+ NullabilityOfRet: O
+ DesignatedInit: true
+# CHECK: duplicate definition of method '-[UIFont fontWithName:size:]'
+ - Selector: 'fontWithName:size:'
+ MethodKind: Instance
+ Nullability: [ N ]
+ NullabilityOfRet: O
+ DesignatedInit: true
+ Properties:
+ - Name: familyName
+ Nullability: N
+ - Name: fontName
+ Nullability: N
+# CHECK: duplicate definition of instance property 'UIFont.familyName'
+ - Name: familyName
+ Nullability: N
+# CHECK: multiple definitions of class 'UIFont'
+ - Name: UIFont
+Protocols:
+ - Name: MyProto
+ AuditedForNullability: true
+# CHECK: multiple definitions of protocol 'MyProto'
+ - Name: MyProto
+ AuditedForNullability: true
+Functions:
+ - Name: 'globalFoo'
+ Nullability: [ N, N, O, S ]
+ NullabilityOfRet: O
+ - Name: 'globalFoo2'
+ Nullability: [ N, N, O, S ]
+ NullabilityOfRet: O
+Globals:
+ - Name: globalVar
+ Nullability: O
+ - Name: globalVar2
+ Nullability: O
+Tags:
+# CHECK: cannot mix EnumKind and FlagEnum (for FlagAndEnumKind)
+ - Name: FlagAndEnumKind
+ FlagEnum: true
+ EnumKind: CFOptions
+# CHECK: cannot mix EnumKind and FlagEnum (for FlagAndEnumKind2)
+ - Name: FlagAndEnumKind2
+ EnumKind: CFOptions
+ FlagEnum: false
+# CHECK: cannot mix EnumKind and EnumExtensibility (for ExtensibilityAndEnumKind)
+ - Name: ExtensibilityAndEnumKind
+ EnumExtensibility: open
+ EnumKind: CFOptions
+# CHECK: cannot mix EnumKind and EnumExtensibility (for ExtensibilityAndEnumKind2)
+ - Name: ExtensibilityAndEnumKind2
+ EnumKind: CFOptions
+ EnumExtensibility: closed
+# CHECK: cannot mix EnumKind and EnumExtensibility (for ExtensibilityAndEnumKind3)
+ - Name: ExtensibilityAndEnumKind3
+ EnumKind: none
+ EnumExtensibility: none
diff --git a/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.h b/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.h
new file mode 100644
index 000000000000..55313ae260ae
--- /dev/null
+++ b/clang/test/APINotes/Inputs/yaml-reader-errors/UIKit.h
@@ -0,0 +1 @@
+extern int yesOfCourseThisIsWhatUIKitLooksLike;
diff --git a/clang/test/APINotes/Inputs/yaml-reader-errors/module.modulemap b/clang/test/APINotes/Inputs/yaml-reader-errors/module.modulemap
new file mode 100644
index 000000000000..3d683d705cac
--- /dev/null
+++ b/clang/test/APINotes/Inputs/yaml-reader-errors/module.modulemap
@@ -0,0 +1,3 @@
+module UIKit {
+ header "UIKit.h"
+}
diff --git a/clang/test/APINotes/availability.m b/clang/test/APINotes/availability.m
new file mode 100644
index 000000000000..2ddc2a73da80
--- /dev/null
+++ b/clang/test/APINotes/availability.m
@@ -0,0 +1,48 @@
+// RUN: rm -rf %t
+// RUN: %clang_cc1 -fmodules -Wno-private-module -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+
+#include "HeaderLib.h"
+#import <SomeKit/SomeKit.h>
+#import <SomeKit/SomeKit_Private.h>
+
+int main() {
+ int i;
+ i = unavailable_function(); // expected-error{{'unavailable_function' is unavailable: I beg you not to use this}}
+ // expected-note@HeaderLib.h:8{{'unavailable_function' has been explicitly marked unavailable here}}
+ i = unavailable_global_int; // expected-error{{'unavailable_global_int' is unavailable}}
+ // expected-note@HeaderLib.h:9{{'unavailable_global_int' has been explicitly marked unavailable here}}
+
+ unavailable_typedef t; // expected-error{{'unavailable_typedef' is unavailable}}
+ // expected-note@HeaderLib.h:14{{'unavailable_typedef' has been explicitly marked unavailable here}}
+
+ struct unavailable_struct s; // expected-error{{'unavailable_struct' is unavailable}}
+ // expected-note@HeaderLib.h:15{{'unavailable_struct' has been explicitly marked unavailable here}}
+
+ B *b = 0; // expected-error{{'B' is unavailable: just don't}}
+ // expected-note@SomeKit/SomeKit.h:15{{'B' has been explicitly marked unavailable here}}
+
+ id<InternalProtocol> proto = 0; // expected-error{{'InternalProtocol' is unavailable: not for you}}
+ // expected-note@SomeKit/SomeKit_Private.h:12{{'InternalProtocol' has been explicitly marked unavailable here}}
+
+ A *a = 0;
+ i = a.intValue; // expected-error{{intValue' is unavailable: wouldn't work anyway}}
+ // expected-note@SomeKit/SomeKit.h:12{{'intValue' has been explicitly marked unavailable here}}
+
+ [a transform:a]; // expected-error{{'transform:' is unavailable: anything but this}}
+ // expected-note@SomeKit/SomeKit.h:6{{'transform:' has been explicitly marked unavailable here}}
+
+ [a implicitGetOnlyInstance]; // expected-error{{'implicitGetOnlyInstance' is unavailable: getter gone}}
+ // expected-note@SomeKit/SomeKit.h:53{{'implicitGetOnlyInstance' has been explicitly marked unavailable here}}
+ [A implicitGetOnlyClass]; // expected-error{{'implicitGetOnlyClass' is unavailable: getter gone}}
+ // expected-note@SomeKit/SomeKit.h:54{{'implicitGetOnlyClass' has been explicitly marked unavailable here}}
+ [a implicitGetSetInstance]; // expected-error{{'implicitGetSetInstance' is unavailable: getter gone}}
+ // expected-note@SomeKit/SomeKit.h:56{{'implicitGetSetInstance' has been explicitly marked unavailable here}}
+ [a setImplicitGetSetInstance: a]; // expected-error{{'setImplicitGetSetInstance:' is unavailable: setter gone}}
+ // expected-note@SomeKit/SomeKit.h:56{{'setImplicitGetSetInstance:' has been explicitly marked unavailable here}}
+ [A implicitGetSetClass]; // expected-error{{'implicitGetSetClass' is unavailable: getter gone}}
+ // expected-note@SomeKit/SomeKit.h:57{{'implicitGetSetClass' has been explicitly marked unavailable here}}
+ [A setImplicitGetSetClass: a]; // expected-error{{'setImplicitGetSetClass:' is unavailable: setter gone}}
+ // expected-note@SomeKit/SomeKit.h:57{{'setImplicitGetSetClass:' has been explicitly marked unavailable here}}
+ return 0;
+}
+
diff --git a/clang/test/APINotes/broken_types.m b/clang/test/APINotes/broken_types.m
new file mode 100644
index 000000000000..ee33ff7c4b4b
--- /dev/null
+++ b/clang/test/APINotes/broken_types.m
@@ -0,0 +1,19 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s 2> %t.err
+// RUN: FileCheck %s < %t.err
+
+#include "BrokenTypes.h"
+
+// CHECK: <API Notes>:1:1: error: unknown type name 'not_a_type'
+// CHECK-NEXT: not_a_type
+// CHECK-NEXT: ^
+
+// CHECK: <API Notes>:1:7: error: unparsed tokens following type
+// CHECK-NEXT: int * with extra junk
+// CHECK-NEXT: ^
+
+// CHECK: BrokenTypes.h:4:6: error: API notes replacement type 'int *' has a different size from original type 'char'
+
+// CHECK: BrokenTypes.h:6:13: error: API notes replacement type 'double' has a different size from original type 'char'
+
+// CHECK: 5 errors generated.
diff --git a/clang/test/APINotes/case-for-private-apinotes-file.c b/clang/test/APINotes/case-for-private-apinotes-file.c
new file mode 100644
index 000000000000..6aff3db54918
--- /dev/null
+++ b/clang/test/APINotes/case-for-private-apinotes-file.c
@@ -0,0 +1,22 @@
+// REQUIRES: case-insensitive-filesystem
+
+// RUN: rm -rf %t
+// RUN: %clang_cc1 -fsyntax-only -fmodules -fapinotes-modules -fimplicit-module-maps -fmodules-cache-path=%t -F %S/Inputs/Frameworks -I %S/Inputs/Headers %s 2>&1 | FileCheck %s
+
+// RUN: rm -rf %t
+// RUN: %clang_cc1 -fsyntax-only -fmodules -fapinotes-modules -fimplicit-module-maps -fmodules-cache-path=%t -iframework %S/Inputs/Frameworks -isystem %S/Inputs/Headers %s -Werror
+
+// RUN: rm -rf %t
+// RUN: %clang_cc1 -fsyntax-only -fmodules -fapinotes-modules -fimplicit-module-maps -fmodules-cache-path=%t -iframework %S/Inputs/Frameworks -isystem %S/Inputs/Headers %s -Wnonportable-private-system-apinotes-path 2>&1 | FileCheck %s
+
+#include <ModuleWithWrongCase.h>
+#include <ModuleWithWrongCasePrivate.h>
+#include <FrameworkWithWrongCase/FrameworkWithWrongCase.h>
+#include <FrameworkWithWrongCasePrivate/FrameworkWithWrongCasePrivate.h>
+#include <FrameworkWithActualPrivateModule/FrameworkWithActualPrivateModule_Private.h>
+
+// CHECK-NOT: warning:
+// CHECK: warning: private API notes file for module 'ModuleWithWrongCasePrivate' should be named 'ModuleWithWrongCasePrivate_private.apinotes', not 'ModuleWithWrongCasePrivate_Private.apinotes'
+// CHECK-NOT: warning:
+// CHECK: warning: private API notes file for module 'FrameworkWithWrongCasePrivate' should be named 'FrameworkWithWrongCasePrivate_private.apinotes', not 'FrameworkWithWrongCasePrivate_Private.apinotes'
+// CHECK-NOT: warning:
diff --git a/clang/test/APINotes/export-as.c b/clang/test/APINotes/export-as.c
new file mode 100644
index 000000000000..7a8a652ab755
--- /dev/null
+++ b/clang/test/APINotes/export-as.c
@@ -0,0 +1,8 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -ast-dump -ast-dump-filter globalInt -x c | FileCheck %s
+
+#include "ExportAs.h"
+
+// CHECK: Dumping globalInt:
+// CHECK: VarDecl {{.+}} imported in ExportAsCore globalInt 'int'
+// CHECK: UnavailableAttr {{.+}} <<invalid sloc>> "oh no"
diff --git a/clang/test/APINotes/extern-context.cpp b/clang/test/APINotes/extern-context.cpp
new file mode 100644
index 000000000000..331dee002361
--- /dev/null
+++ b/clang/test/APINotes/extern-context.cpp
@@ -0,0 +1,23 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -ast-dump -ast-dump-filter globalInExternC -x c++ | FileCheck -check-prefix=CHECK-EXTERN-C %s
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -ast-dump -ast-dump-filter globalInExternCXX -x c++ | FileCheck -check-prefix=CHECK-EXTERN-CXX %s
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -ast-dump -ast-dump-filter globalFuncInExternC -x c++ | FileCheck -check-prefix=CHECK-FUNC-EXTERN-C %s
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -ast-dump -ast-dump-filter globalFuncInExternCXX -x c++ | FileCheck -check-prefix=CHECK-FUNC-EXTERN-CXX %s
+
+#include "ExternCtx.h"
+
+// CHECK-EXTERN-C: Dumping globalInExternC:
+// CHECK-EXTERN-C: VarDecl {{.+}} imported in ExternCtx globalInExternC 'int'
+// CHECK-EXTERN-C: UnavailableAttr {{.+}} <<invalid sloc>> "oh no"
+
+// CHECK-EXTERN-CXX: Dumping globalInExternCXX:
+// CHECK-EXTERN-CXX: VarDecl {{.+}} imported in ExternCtx globalInExternCXX 'int'
+// CHECK-EXTERN-CXX: UnavailableAttr {{.+}} <<invalid sloc>> "oh no #2"
+
+// CHECK-FUNC-EXTERN-C: Dumping globalFuncInExternC:
+// CHECK-FUNC-EXTERN-C: FunctionDecl {{.+}} imported in ExternCtx globalFuncInExternC 'void ()'
+// CHECK-FUNC-EXTERN-C: UnavailableAttr {{.+}} <<invalid sloc>> "oh no #3"
+
+// CHECK-FUNC-EXTERN-CXX: Dumping globalFuncInExternCXX:
+// CHECK-FUNC-EXTERN-CXX: FunctionDecl {{.+}} imported in ExternCtx globalFuncInExternCXX 'void ()'
+// CHECK-FUNC-EXTERN-CXX: UnavailableAttr {{.+}} <<invalid sloc>> "oh no #4"
diff --git a/clang/test/APINotes/instancetype.m b/clang/test/APINotes/instancetype.m
new file mode 100644
index 000000000000..30339e5386f6
--- /dev/null
+++ b/clang/test/APINotes/instancetype.m
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -verify %s
+
+@import InstancetypeModule;
+
+void test() {
+ // The nullability is here to verify that the API notes were applied.
+ int good = [SomeSubclass instancetypeFactoryMethod]; // expected-error {{initializing 'int' with an expression of type 'SomeSubclass * _Nonnull'}}
+ int bad = [SomeSubclass staticFactoryMethod]; // expected-error {{initializing 'int' with an expression of type 'SomeBaseClass * _Nonnull'}}
+}
diff --git a/clang/test/APINotes/module-cache.m b/clang/test/APINotes/module-cache.m
new file mode 100644
index 000000000000..e5920884ad86
--- /dev/null
+++ b/clang/test/APINotes/module-cache.m
@@ -0,0 +1,66 @@
+// RUN: rm -rf %t
+
+// Set up directories
+// RUN: mkdir -p %t/APINotes
+// RUN: cp %S/Inputs/APINotes/SomeOtherKit.apinotes %t/APINotes/SomeOtherKit.apinotes
+// RUN: mkdir -p %t/Frameworks
+// RUN: cp -r %S/Inputs/Frameworks/SomeOtherKit.framework %t/Frameworks
+
+// First build: check that 'methodB' is unavailable but 'methodA' is available.
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -Rmodule-build -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %t/APINotes -F %t/Frameworks %s > %t/before.log 2>&1
+// RUN: FileCheck -check-prefix=CHECK-METHODB %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-REBUILD %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-ONE-ERROR %s < %t/before.log
+
+// Do it again; now we're using caches.
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -Rmodule-build -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %t/APINotes -F %t/Frameworks %s > %t/before.log 2>&1
+// RUN: FileCheck -check-prefix=CHECK-METHODB %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-WITHOUT-REBUILD %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-ONE-ERROR %s < %t/before.log
+
+// Add a blank line to the header to force the module to rebuild, without
+// (yet) changing API notes.
+// RUN: echo >> %t/Frameworks/SomeOtherKit.framework/Headers/SomeOtherKit.h
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -Rmodule-build -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %t/APINotes -F %t/Frameworks %s > %t/before.log 2>&1
+// RUN: FileCheck -check-prefix=CHECK-METHODB %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-REBUILD %s < %t/before.log
+// RUN: FileCheck -check-prefix=CHECK-ONE-ERROR %s < %t/before.log
+
+// Change the API notes file, after the module has rebuilt once.
+// RUN: chmod u+w %t/APINotes/SomeOtherKit.apinotes
+// RUN: echo ' - Selector: "methodA"' >> %t/APINotes/SomeOtherKit.apinotes
+// RUN: echo ' MethodKind: Instance' >> %t/APINotes/SomeOtherKit.apinotes
+// RUN: echo ' Availability: none' >> %t/APINotes/SomeOtherKit.apinotes
+// RUN: echo ' AvailabilityMsg: "not here either"' >> %t/APINotes/SomeOtherKit.apinotes
+
+// Build again: check that both methods are now unavailable and that the module rebuilt.
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -Rmodule-build -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %t/APINotes -F %t/Frameworks %s > %t/after.log 2>&1
+// RUN: FileCheck -check-prefix=CHECK-METHODA %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-METHODB %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-REBUILD %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-TWO-ERRORS %s < %t/after.log
+
+// Run the build again: check that both methods are now unavailable
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -Rmodule-build -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %t/APINotes -F %t/Frameworks %s > %t/after.log 2>&1
+// RUN: FileCheck -check-prefix=CHECK-METHODA %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-METHODB %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-WITHOUT-REBUILD %s < %t/after.log
+// RUN: FileCheck -check-prefix=CHECK-TWO-ERRORS %s < %t/after.log
+
+@import SomeOtherKit;
+
+void test(A *a) {
+ // CHECK-METHODA: error: 'methodA' is unavailable: not here either
+ [a methodA];
+
+ // CHECK-METHODB: error: 'methodB' is unavailable: anything but this
+ [a methodB];
+}
+
+// CHECK-REBUILD: remark: building module{{.*}}SomeOtherKit
+
+// CHECK-WITHOUT-REBUILD-NOT: remark: building module{{.*}}SomeOtherKit
+
+// CHECK-ONE-ERROR: 1 error generated.
+// CHECK-TWO-ERRORS: 2 errors generated.
+
diff --git a/clang/test/APINotes/namespaces.cpp b/clang/test/APINotes/namespaces.cpp
new file mode 100644
index 000000000000..2f9d93c2ea0e
--- /dev/null
+++ b/clang/test/APINotes/namespaces.cpp
@@ -0,0 +1,69 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -x objective-c++
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::my_typedef -x objective-c++ | FileCheck -check-prefix=CHECK-TYPEDEF-IN-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::my_using_decl -x objective-c++ | FileCheck -check-prefix=CHECK-USING-DECL-IN-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::varInNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-GLOBAL-IN-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::funcInNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-FUNC-IN-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::char_box -x objective-c++ | FileCheck -check-prefix=CHECK-STRUCT-IN-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::Nested1::varInNestedNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-GLOBAL-IN-NESTED-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::Nested2::varInNestedNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-ANOTHER-GLOBAL-IN-NESTED-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::Nested1::char_box -x objective-c++ | FileCheck -check-prefix=CHECK-STRUCT-IN-NESTED-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::Nested1::funcInNestedNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-FUNC-IN-NESTED-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Namespace1::Nested1::Namespace1::char_box -x objective-c++ | FileCheck -check-prefix=CHECK-STRUCT-IN-DEEP-NESTED-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter varInInlineNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-GLOBAL-IN-INLINE-NAMESPACE %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/CxxInterop -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter funcInInlineNamespace -x objective-c++ | FileCheck -check-prefix=CHECK-FUNC-IN-INLINE-NAMESPACE %s
+
+#import <Namespaces.h>
+
+// CHECK-TYPEDEF-IN-NAMESPACE: Dumping Namespace1::my_typedef:
+// CHECK-TYPEDEF-IN-NAMESPACE-NEXT: TypedefDecl {{.+}} imported in Namespaces my_typedef 'int'
+// CHECK-TYPEDEF-IN-NAMESPACE: SwiftNameAttr {{.+}} <<invalid sloc>> "SwiftTypedef"
+
+// CHECK-USING-DECL-IN-NAMESPACE: Dumping Namespace1::my_using_decl:
+// CHECK-USING-DECL-IN-NAMESPACE-NEXT: TypeAliasDecl {{.+}} imported in Namespaces my_using_decl 'int'
+// CHECK-USING-DECL-IN-NAMESPACE: SwiftNameAttr {{.+}} <<invalid sloc>> "SwiftUsingDecl"
+
+// CHECK-GLOBAL-IN-NAMESPACE: Dumping Namespace1::varInNamespace:
+// CHECK-GLOBAL-IN-NAMESPACE-NEXT: VarDecl {{.+}} imported in Namespaces varInNamespace 'int' static cinit
+// CHECK-GLOBAL-IN-NAMESPACE-NEXT: IntegerLiteral {{.+}} 'int' 1
+// CHECK-GLOBAL-IN-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftVarInNamespace"
+
+// CHECK-FUNC-IN-NAMESPACE: Dumping Namespace1::funcInNamespace:
+// CHECK-FUNC-IN-NAMESPACE-NEXT: FunctionDecl {{.+}} imported in Namespaces funcInNamespace 'void ()'
+// CHECK-FUNC-IN-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftFuncInNamespace()"
+
+// CHECK-STRUCT-IN-NAMESPACE: Dumping Namespace1::char_box:
+// CHECK-STRUCT-IN-NAMESPACE-NEXT: CXXRecordDecl {{.+}} imported in Namespaces <undeserialized declarations> struct char_box
+// CHECK-STRUCT-IN-NAMESPACE: SwiftNameAttr {{.+}} <<invalid sloc>> "CharBox"
+
+// CHECK-GLOBAL-IN-NESTED-NAMESPACE: Dumping Namespace1::Nested1::varInNestedNamespace:
+// CHECK-GLOBAL-IN-NESTED-NAMESPACE-NEXT: VarDecl {{.+}} imported in Namespaces varInNestedNamespace 'int' static cinit
+// CHECK-GLOBAL-IN-NESTED-NAMESPACE-NEXT: IntegerLiteral {{.+}} 'int' 1
+// CHECK-GLOBAL-IN-NESTED-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftVarInNestedNamespace"
+
+// CHECK-ANOTHER-GLOBAL-IN-NESTED-NAMESPACE: Dumping Namespace1::Nested2::varInNestedNamespace:
+// CHECK-ANOTHER-GLOBAL-IN-NESTED-NAMESPACE-NEXT: VarDecl {{.+}} imported in Namespaces varInNestedNamespace 'int' static cinit
+// CHECK-ANOTHER-GLOBAL-IN-NESTED-NAMESPACE-NEXT: IntegerLiteral {{.+}} 'int' 2
+// CHECK-ANOTHER-GLOBAL-IN-NESTED-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftAnotherVarInNestedNamespace"
+
+// CHECK-FUNC-IN-NESTED-NAMESPACE: Dumping Namespace1::Nested1::funcInNestedNamespace:
+// CHECK-FUNC-IN-NESTED-NAMESPACE-NEXT: FunctionDecl {{.+}} imported in Namespaces funcInNestedNamespace 'void (int)'
+// CHECK-FUNC-IN-NESTED-NAMESPACE-NEXT: ParmVarDecl {{.+}} i 'int'
+// CHECK-FUNC-IN-NESTED-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftFuncInNestedNamespace(_:)"
+
+// CHECK-STRUCT-IN-NESTED-NAMESPACE: Dumping Namespace1::Nested1::char_box:
+// CHECK-STRUCT-IN-NESTED-NAMESPACE-NEXT: CXXRecordDecl {{.+}} imported in Namespaces <undeserialized declarations> struct char_box
+// CHECK-STRUCT-IN-NESTED-NAMESPACE: SwiftNameAttr {{.+}} <<invalid sloc>> "NestedCharBox"
+
+// CHECK-STRUCT-IN-DEEP-NESTED-NAMESPACE: Dumping Namespace1::Nested1::Namespace1::char_box:
+// CHECK-STRUCT-IN-DEEP-NESTED-NAMESPACE-NEXT: CXXRecordDecl {{.+}} imported in Namespaces <undeserialized declarations> struct char_box
+// CHECK-STRUCT-IN-DEEP-NESTED-NAMESPACE: SwiftNameAttr {{.+}} <<invalid sloc>> "DeepNestedCharBox"
+
+// CHECK-GLOBAL-IN-INLINE-NAMESPACE: Dumping varInInlineNamespace:
+// CHECK-GLOBAL-IN-INLINE-NAMESPACE-NEXT: VarDecl {{.+}} imported in Namespaces varInInlineNamespace 'int' static cinit
+// CHECK-GLOBAL-IN-INLINE-NAMESPACE-NEXT: IntegerLiteral {{.+}} 'int' 3
+// CHECK-GLOBAL-IN-INLINE-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftVarInInlineNamespace"
+
+// CHECK-FUNC-IN-INLINE-NAMESPACE: Dumping funcInInlineNamespace:
+// CHECK-FUNC-IN-INLINE-NAMESPACE-NEXT: FunctionDecl {{.+}} imported in Namespaces funcInInlineNamespace 'void ()'
+// CHECK-FUNC-IN-INLINE-NAMESPACE-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "swiftFuncInInlineNamespace()"
diff --git a/clang/test/APINotes/nullability.c b/clang/test/APINotes/nullability.c
new file mode 100644
index 000000000000..e07fc2e5c117
--- /dev/null
+++ b/clang/test/APINotes/nullability.c
@@ -0,0 +1,21 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+
+#include "HeaderLib.h"
+
+int main() {
+ custom_realloc(0, 0); // expected-warning{{null passed to a callee that requires a non-null argument}}
+ int i = 0;
+ do_something_with_pointers(&i, 0);
+ do_something_with_pointers(0, &i); // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+ extern void *p;
+ do_something_with_arrays(0, p); // expected-warning{{null passed to a callee that requires a non-null argument}}
+ do_something_with_arrays(p, 0); // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+ take_pointer_and_int(0, 0); // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+ float *fp = global_int; // expected-warning{{incompatible pointer types initializing 'float *' with an expression of type 'int * _Nonnull'}}
+ return 0;
+}
+
diff --git a/clang/test/APINotes/nullability.m b/clang/test/APINotes/nullability.m
new file mode 100644
index 000000000000..21ec6680fa71
--- /dev/null
+++ b/clang/test/APINotes/nullability.m
@@ -0,0 +1,46 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -Wno-private-module -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+
+// Test with Swift version 3.0. This should only affect the few APIs that have an entry in the 3.0 tables.
+
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -Wno-private-module -fapinotes-swift-version=3.0 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify -DSWIFT_VERSION_3_0 -fmodules-ignore-macro=SWIFT_VERSION_3_0
+
+#import <SomeKit/SomeKit.h>
+
+int main() {
+ A *a;
+
+#if SWIFT_VERSION_3_0
+ float *fp = // expected-warning{{incompatible pointer types initializing 'float *' with an expression of type 'A * _Nullable'}}
+ [a transform: 0 integer: 0];
+#else
+ float *fp = // expected-warning{{incompatible pointer types initializing 'float *' with an expression of type 'A *'}}
+ [a transform: 0 integer: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+#endif
+
+ [a setNonnullAInstance: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+ [A setNonnullAInstance: 0]; // no warning
+ a.nonnullAInstance = 0; // expected-warning{{null passed to a callee that requires a non-null argument}}
+ A* _Nonnull aPtr = a.nonnullAInstance; // no warning
+
+ [a setNonnullAClass: 0]; // no warning
+ [A setNonnullAClass: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+ [a setNonnullABoth: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+ [A setNonnullABoth: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+ [a setInternalProperty: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+
+#if SWIFT_VERSION_3_0
+ // Version 3 information overrides header information.
+ [a setExplicitNonnullInstance: 0]; // okay
+ [a setExplicitNullableInstance: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+#else
+ // Header information overrides unversioned information.
+ [a setExplicitNonnullInstance: 0]; // expected-warning{{null passed to a callee that requires a non-null argument}}
+ [a setExplicitNullableInstance: 0]; // okay
+#endif
+
+ return 0;
+}
+
diff --git a/clang/test/APINotes/objc-forward-declarations.m b/clang/test/APINotes/objc-forward-declarations.m
new file mode 100644
index 000000000000..e82bed205550
--- /dev/null
+++ b/clang/test/APINotes/objc-forward-declarations.m
@@ -0,0 +1,12 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -F %S/Inputs/Frameworks %s -verify
+
+@import LayeredKit;
+
+void test(
+ UpwardClass *okayClass,
+ id <UpwardProto> okayProto,
+ PerfectlyNormalClass *badClass // expected-error {{'PerfectlyNormalClass' is unavailable}}
+) {
+ // expected-note@LayeredKitImpl/LayeredKitImpl.h:4 {{'PerfectlyNormalClass' has been explicitly marked unavailable here}}
+}
diff --git a/clang/test/APINotes/objc_designated_inits.m b/clang/test/APINotes/objc_designated_inits.m
new file mode 100644
index 000000000000..1f2b8ed534b7
--- /dev/null
+++ b/clang/test/APINotes/objc_designated_inits.m
@@ -0,0 +1,17 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -Wno-private-module -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+
+#include "HeaderLib.h"
+#import <SomeKit/SomeKit.h>
+
+@interface CSub : C
+-(instancetype)initWithA:(A*)a;
+@end
+
+@implementation CSub
+-(instancetype)initWithA:(A*)a { // expected-warning{{designated initializer missing a 'super' call to a designated initializer of the super class}}
+ // expected-note@SomeKit/SomeKit.h:20 2{{method marked as designated initializer of the class here}}
+ self = [super init]; // expected-warning{{designated initializer invoked a non-designated initializer}}
+ return self;
+}
+@end
diff --git a/clang/test/APINotes/properties.m b/clang/test/APINotes/properties.m
new file mode 100644
index 000000000000..f218092a66e1
--- /dev/null
+++ b/clang/test/APINotes/properties.m
@@ -0,0 +1,42 @@
+// RUN: rm -rf %t && mkdir -p %t
+
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fblocks -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter 'TestProperties::' | FileCheck -check-prefix=CHECK -check-prefix=CHECK-4 %s
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fblocks -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter 'TestProperties::' -fapinotes-swift-version=3 | FileCheck -check-prefix=CHECK -check-prefix=CHECK-3 %s
+
+@import VersionedKit;
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnly 'id'
+// CHECK-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnlyForClass 'id'
+// CHECK-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnlyInVersion3 'id'
+// CHECK-3-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftVersionedAdditionAttr {{.+}} 3.0{{$}}
+// CHECK-4-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnlyForClassInVersion3 'id'
+// CHECK-3-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftVersionedAdditionAttr {{.+}} 3.0{{$}}
+// CHECK-4-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnlyExceptInVersion3 'id'
+// CHECK-3-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0 IsReplacedByActive{{$}}
+// CHECK-3-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftVersionedRemovalAttr {{.+}} Implicit 3.0 {{[0-9]+}}
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: ObjCPropertyDecl {{.+}} accessorsOnlyForClassExceptInVersion3 'id'
+// CHECK-3-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0 IsReplacedByActive{{$}}
+// CHECK-3-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftImportPropertyAsAccessorsAttr {{.+}} <<invalid sloc>>
+// CHECK-4-NEXT: SwiftVersionedRemovalAttr {{.+}} Implicit 3.0 {{[0-9]+}}
+// CHECK-NOT: Attr
+
+// CHECK-LABEL: Decl
diff --git a/clang/test/APINotes/retain-count-convention.m b/clang/test/APINotes/retain-count-convention.m
new file mode 100644
index 000000000000..4bf9610a352a
--- /dev/null
+++ b/clang/test/APINotes/retain-count-convention.m
@@ -0,0 +1,38 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fdisable-module-hash -fsyntax-only -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/SimpleKit.pcm | FileCheck %s
+// RUN: %clang_cc1 -ast-dump -ast-dump-filter 'DUMP' %t/ModulesCache/SimpleKit.pcm | FileCheck -check-prefix CHECK-DUMP %s
+
+#import <SimpleKit/SimpleKit.h>
+
+// CHECK: void *getCFOwnedToUnowned(void) __attribute__((cf_returns_not_retained));
+// CHECK: void *getCFUnownedToOwned(void) __attribute__((cf_returns_retained));
+// CHECK: void *getCFOwnedToNone(void) __attribute__((cf_unknown_transfer));
+// CHECK: id getObjCOwnedToUnowned(void) __attribute__((ns_returns_not_retained));
+// CHECK: id getObjCUnownedToOwned(void) __attribute__((ns_returns_retained));
+// CHECK: int indirectGetCFOwnedToUnowned(void * _Nullable *out __attribute__((cf_returns_not_retained)));
+// CHECK: int indirectGetCFUnownedToOwned(void * _Nullable *out __attribute__((cf_returns_retained)));
+// CHECK: int indirectGetCFOwnedToNone(void * _Nullable *out);
+// CHECK: int indirectGetCFNoneToOwned(void **out __attribute__((cf_returns_not_retained)));
+
+// CHECK-LABEL: @interface MethodTest
+// CHECK: - (id)getOwnedToUnowned __attribute__((ns_returns_not_retained));
+// CHECK: - (id)getUnownedToOwned __attribute__((ns_returns_retained));
+// CHECK: @end
+
+// CHECK-DUMP-LABEL: Dumping getCFAuditedToUnowned_DUMP:
+// CHECK-DUMP-NEXT: FunctionDecl
+// CHECK-DUMP-NEXT: CFReturnsNotRetainedAttr
+// CHECK-DUMP-NEXT: CFAuditedTransferAttr
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping getCFAuditedToOwned_DUMP:
+// CHECK-DUMP-NEXT: FunctionDecl
+// CHECK-DUMP-NEXT: CFReturnsRetainedAttr
+// CHECK-DUMP-NEXT: CFAuditedTransferAttr
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping getCFAuditedToNone_DUMP:
+// CHECK-DUMP-NEXT: FunctionDecl
+// CHECK-DUMP-NEXT: CFUnknownTransferAttr
+// CHECK-DUMP-NOT: Attr
diff --git a/clang/test/APINotes/search-order.m b/clang/test/APINotes/search-order.m
new file mode 100644
index 000000000000..17e81d5eb2d6
--- /dev/null
+++ b/clang/test/APINotes/search-order.m
@@ -0,0 +1,25 @@
+// RUN: rm -rf %t && mkdir -p %t
+
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -DFROM_FRAMEWORK=1 -verify
+
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -iapinotes-modules %S/Inputs/APINotes -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -DFROM_SEARCH_PATH=1 -verify
+
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -iapinotes-modules %S/Inputs/APINotes -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -DFROM_FRAMEWORK=1 -verify
+
+@import SomeOtherKit;
+
+void test(A *a) {
+#if FROM_FRAMEWORK
+ [a methodA]; // expected-error{{unavailable}}
+ [a methodB];
+
+ // expected-note@SomeOtherKit/SomeOtherKit.h:5{{'methodA' has been explicitly marked unavailable here}}
+#elif FROM_SEARCH_PATH
+ [a methodA];
+ [a methodB]; // expected-error{{unavailable}}
+
+ // expected-note@SomeOtherKit/SomeOtherKit.h:6{{'methodB' has been explicitly marked unavailable here}}
+#else
+# error Not something we need to test
+#endif
+}
diff --git a/clang/test/APINotes/swift-import-as.cpp b/clang/test/APINotes/swift-import-as.cpp
new file mode 100644
index 000000000000..904857e58593
--- /dev/null
+++ b/clang/test/APINotes/swift-import-as.cpp
@@ -0,0 +1,16 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -x c++
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter ImmortalRefType | FileCheck -check-prefix=CHECK-IMMORTAL %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers %s -x c++ -ast-dump -ast-dump-filter RefCountedType | FileCheck -check-prefix=CHECK-REF-COUNTED %s
+
+#include <SwiftImportAs.h>
+
+// CHECK-IMMORTAL: Dumping ImmortalRefType:
+// CHECK-IMMORTAL-NEXT: CXXRecordDecl {{.+}} imported in SwiftImportAs {{.+}} struct ImmortalRefType
+// CHECK-IMMORTAL: SwiftAttrAttr {{.+}} <<invalid sloc>> "import_reference"
+
+// CHECK-REF-COUNTED: Dumping RefCountedType:
+// CHECK-REF-COUNTED-NEXT: CXXRecordDecl {{.+}} imported in SwiftImportAs {{.+}} struct RefCountedType
+// CHECK-REF-COUNTED: SwiftAttrAttr {{.+}} <<invalid sloc>> "import_reference"
+// CHECK-REF-COUNTED: SwiftAttrAttr {{.+}} <<invalid sloc>> "retain:RCRetain"
+// CHECK-REF-COUNTED: SwiftAttrAttr {{.+}} <<invalid sloc>> "release:RCRelease"
diff --git a/clang/test/APINotes/templates.cpp b/clang/test/APINotes/templates.cpp
new file mode 100644
index 000000000000..d4dce291615e
--- /dev/null
+++ b/clang/test/APINotes/templates.cpp
@@ -0,0 +1,9 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Tmpl -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -x c++
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Tmpl -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter Box -x c++ | FileCheck -check-prefix=CHECK-BOX %s
+
+#include "Templates.h"
+
+// CHECK-BOX: Dumping Box:
+// CHECK-BOX-NEXT: ClassTemplateDecl {{.+}} imported in Templates Box
+// CHECK-BOX: SwiftAttrAttr {{.+}} <<invalid sloc>> "import_owned"
diff --git a/clang/test/APINotes/top-level-private-modules.c b/clang/test/APINotes/top-level-private-modules.c
new file mode 100644
index 000000000000..0da72b2e36f4
--- /dev/null
+++ b/clang/test/APINotes/top-level-private-modules.c
@@ -0,0 +1,8 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -Wno-private-module -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+
+#include <PrivateLib.h>
+#include <TopLevelPrivateKit/TopLevelPrivateKit_Private.h>
+
+void *testPlain = PrivateLib; // expected-error {{initializing 'void *' with an expression of incompatible type 'float'}}
+void *testFramework = TopLevelPrivateKit_Private; // expected-error {{initializing 'void *' with an expression of incompatible type 'float'}}
diff --git a/clang/test/APINotes/types.m b/clang/test/APINotes/types.m
new file mode 100644
index 000000000000..133d504713d7
--- /dev/null
+++ b/clang/test/APINotes/types.m
@@ -0,0 +1,28 @@
+// RUN: rm -rf %t && mkdir -p %t
+// RUN: %clang_cc1 -fmodules -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache -fapinotes-modules -Wno-private-module -fdisable-module-hash -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -verify
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/SimpleKit.pcm | FileCheck %s
+
+#import <SomeKit/SomeKit.h>
+#import <SimpleKit/SimpleKit.h>
+
+// CHECK: struct __attribute__((swift_name("SuccessfullyRenamedA"))) RenamedAgainInAPINotesA {
+// CHECK: struct __attribute__((swift_name("SuccessfullyRenamedB"))) RenamedAgainInAPINotesB {
+
+void test(OverriddenTypes *overridden) {
+ int *ip1 = global_int_ptr; // expected-warning{{incompatible pointer types initializing 'int *' with an expression of type 'double (*)(int, int)'}}
+
+ int *ip2 = global_int_fun( // expected-warning{{incompatible pointer types initializing 'int *' with an expression of type 'char *'}}
+ ip2, // expected-warning{{incompatible pointer types passing 'int *' to parameter of type 'double *'}}
+ ip2); // expected-warning{{incompatible pointer types passing 'int *' to parameter of type 'float *'}}
+
+ int *ip3 = [overridden // expected-warning{{incompatible pointer types initializing 'int *' with an expression of type 'char *'}}
+ methodToMangle: ip3 // expected-warning{{incompatible pointer types sending 'int *' to parameter of type 'double *'}}
+ second: ip3]; // expected-warning{{incompatible pointer types sending 'int *' to parameter of type 'float *'}}
+
+ int *ip4 = overridden.intPropertyToMangle; // expected-warning{{incompatible pointer types initializing 'int *' with an expression of type 'double *'}}
+}
+
+// expected-note@SomeKit/SomeKit.h:42{{passing argument to parameter 'ptr' here}}
+// expected-note@SomeKit/SomeKit.h:42{{passing argument to parameter 'ptr2' here}}
+// expected-note@SomeKit/SomeKit.h:48{{passing argument to parameter 'ptr1' here}}
+// expected-note@SomeKit/SomeKit.h:48{{passing argument to parameter 'ptr2' here}}
diff --git a/clang/test/APINotes/versioned-multi.c b/clang/test/APINotes/versioned-multi.c
new file mode 100644
index 000000000000..48c51fd932e1
--- /dev/null
+++ b/clang/test/APINotes/versioned-multi.c
@@ -0,0 +1,69 @@
+// RUN: rm -rf %t && mkdir -p %t
+
+// Build and check the unversioned module file.
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Unversioned -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Unversioned/VersionedKit.pcm | FileCheck -check-prefix=CHECK-UNVERSIONED %s
+
+// Build and check the various versions.
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Versioned3 -fdisable-module-hash -fapinotes-modules -fapinotes-swift-version=3 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Versioned3/VersionedKit.pcm | FileCheck -check-prefix=CHECK-VERSIONED-3 %s
+
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Versioned4 -fdisable-module-hash -fapinotes-modules -fapinotes-swift-version=4 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Versioned4/VersionedKit.pcm | FileCheck -check-prefix=CHECK-VERSIONED-4 %s
+
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Versioned5 -fdisable-module-hash -fapinotes-modules -fapinotes-swift-version=5 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Versioned5/VersionedKit.pcm | FileCheck -check-prefix=CHECK-VERSIONED-5 %s
+
+#import <VersionedKit/VersionedKit.h>
+
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef4;
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef4Notes __attribute__((swift_name("MultiVersionedTypedef4Notes_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef4Header __attribute__((swift_name("MultiVersionedTypedef4Header_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef34;
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef34Notes __attribute__((swift_name("MultiVersionedTypedef34Notes_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef34Header __attribute__((swift_name("MultiVersionedTypedef34Header_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef45;
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef45Notes __attribute__((swift_name("MultiVersionedTypedef45Notes_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef45Header __attribute__((swift_name("MultiVersionedTypedef45Header_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef345;
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef345Notes __attribute__((swift_name("MultiVersionedTypedef345Notes_NEW")));
+// CHECK-UNVERSIONED: typedef int MultiVersionedTypedef345Header __attribute__((swift_name("MultiVersionedTypedef345Header_NEW")));
+
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef4 __attribute__((swift_name("MultiVersionedTypedef4_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef4Notes __attribute__((swift_name("MultiVersionedTypedef4Notes_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef4Header __attribute__((swift_name("MultiVersionedTypedef4Header_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef34 __attribute__((swift_name("MultiVersionedTypedef34_3")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef34Notes __attribute__((swift_name("MultiVersionedTypedef34Notes_3")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef34Header __attribute__((swift_name("MultiVersionedTypedef34Header_3")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef45 __attribute__((swift_name("MultiVersionedTypedef45_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef45Notes __attribute__((swift_name("MultiVersionedTypedef45Notes_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef45Header __attribute__((swift_name("MultiVersionedTypedef45Header_4")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef345 __attribute__((swift_name("MultiVersionedTypedef345_3")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef345Notes __attribute__((swift_name("MultiVersionedTypedef345Notes_3")));
+// CHECK-VERSIONED-3: typedef int MultiVersionedTypedef345Header __attribute__((swift_name("MultiVersionedTypedef345Header_3")));
+
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef4 __attribute__((swift_name("MultiVersionedTypedef4_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef4Notes __attribute__((swift_name("MultiVersionedTypedef4Notes_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef4Header __attribute__((swift_name("MultiVersionedTypedef4Header_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef34 __attribute__((swift_name("MultiVersionedTypedef34_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef34Notes __attribute__((swift_name("MultiVersionedTypedef34Notes_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef34Header __attribute__((swift_name("MultiVersionedTypedef34Header_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef45 __attribute__((swift_name("MultiVersionedTypedef45_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef45Notes __attribute__((swift_name("MultiVersionedTypedef45Notes_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef45Header __attribute__((swift_name("MultiVersionedTypedef45Header_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef345 __attribute__((swift_name("MultiVersionedTypedef345_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef345Notes __attribute__((swift_name("MultiVersionedTypedef345Notes_4")));
+// CHECK-VERSIONED-4: typedef int MultiVersionedTypedef345Header __attribute__((swift_name("MultiVersionedTypedef345Header_4")));
+
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef4;
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef4Notes __attribute__((swift_name("MultiVersionedTypedef4Notes_NEW")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef4Header __attribute__((swift_name("MultiVersionedTypedef4Header_NEW")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef34;
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef34Notes __attribute__((swift_name("MultiVersionedTypedef34Notes_NEW")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef34Header __attribute__((swift_name("MultiVersionedTypedef34Header_NEW")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef45 __attribute__((swift_name("MultiVersionedTypedef45_5")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef45Notes __attribute__((swift_name("MultiVersionedTypedef45Notes_5")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef45Header __attribute__((swift_name("MultiVersionedTypedef45Header_5")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef345 __attribute__((swift_name("MultiVersionedTypedef345_5")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef345Notes __attribute__((swift_name("MultiVersionedTypedef345Notes_5")));
+// CHECK-VERSIONED-5: typedef int MultiVersionedTypedef345Header __attribute__((swift_name("MultiVersionedTypedef345Header_5")));
diff --git a/clang/test/APINotes/versioned.m b/clang/test/APINotes/versioned.m
new file mode 100644
index 000000000000..61cc8c3f7c4d
--- /dev/null
+++ b/clang/test/APINotes/versioned.m
@@ -0,0 +1,187 @@
+// RUN: rm -rf %t && mkdir -p %t
+
+// Build and check the unversioned module file.
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Unversioned -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Unversioned/VersionedKit.pcm | FileCheck -check-prefix=CHECK-UNVERSIONED %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Unversioned -fdisable-module-hash -fapinotes-modules -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter 'DUMP' | FileCheck -check-prefix=CHECK-DUMP -check-prefix=CHECK-UNVERSIONED-DUMP %s
+
+// Build and check the versioned module file.
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Versioned -fdisable-module-hash -fapinotes-modules -fapinotes-swift-version=3 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s
+// RUN: %clang_cc1 -ast-print %t/ModulesCache/Versioned/VersionedKit.pcm | FileCheck -check-prefix=CHECK-VERSIONED %s
+// RUN: %clang_cc1 -fmodules -fblocks -fimplicit-module-maps -fmodules-cache-path=%t/ModulesCache/Versioned -fdisable-module-hash -fapinotes-modules -fapinotes-swift-version=3 -fsyntax-only -I %S/Inputs/Headers -F %S/Inputs/Frameworks %s -ast-dump -ast-dump-filter 'DUMP' | FileCheck -check-prefix=CHECK-DUMP -check-prefix=CHECK-VERSIONED-DUMP %s
+
+#import <VersionedKit/VersionedKit.h>
+
+// CHECK-UNVERSIONED: void moveToPointDUMP(double x, double y) __attribute__((swift_name("moveTo(x:y:)")));
+// CHECK-VERSIONED: void moveToPointDUMP(double x, double y) __attribute__((swift_name("moveTo(a:b:)")));
+
+// CHECK-DUMP-LABEL: Dumping moveToPointDUMP
+// CHECK-VERSIONED-DUMP: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0 IsReplacedByActive{{$}}
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} "moveTo(x:y:)"
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "moveTo(a:b:)"
+// CHECK-UNVERSIONED-DUMP: SwiftNameAttr {{.+}} "moveTo(x:y:)"
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0{{$}}
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "moveTo(a:b:)"
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping unversionedRenameDUMP
+// CHECK-DUMP: in VersionedKit unversionedRenameDUMP
+// CHECK-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 0 IsReplacedByActive{{$}}
+// CHECK-DUMP-NEXT: SwiftNameAttr {{.+}} "unversionedRename_HEADER()"
+// CHECK-DUMP-NEXT: SwiftNameAttr {{.+}} "unversionedRename_NOTES()"
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping TestGenericDUMP
+// CHECK-VERSIONED-DUMP: SwiftImportAsNonGenericAttr {{.+}} <<invalid sloc>>
+// CHECK-UNVERSIONED-DUMP: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0{{$}}
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftImportAsNonGenericAttr {{.+}} <<invalid sloc>>
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping Swift3RenamedOnlyDUMP
+// CHECK-DUMP: in VersionedKit Swift3RenamedOnlyDUMP
+// CHECK-VERSIONED-DUMP-NEXT: SwiftVersionedRemovalAttr {{.+}} Implicit 3.0 {{[0-9]+}} IsReplacedByActive{{$}}
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} "SpecialSwift3Name"
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0{{$}}
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "SpecialSwift3Name"
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping Swift3RenamedAlsoDUMP
+// CHECK-DUMP: in VersionedKit Swift3RenamedAlsoDUMP
+// CHECK-VERSIONED-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0 IsReplacedByActive{{$}}
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <line:{{.+}}, col:{{.+}}> "Swift4Name"
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} "SpecialSwift3Also"
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <line:{{.+}}, col:{{.+}}> "Swift4Name"
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 3.0{{$}}
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "SpecialSwift3Also"
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-LABEL: Dumping Swift4RenamedDUMP
+// CHECK-DUMP: in VersionedKit Swift4RenamedDUMP
+// CHECK-VERSIONED-DUMP-NEXT: SwiftVersionedRemovalAttr {{.+}} Implicit 4 {{[0-9]+}} IsReplacedByActive{{$}}
+// CHECK-VERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} "SpecialSwift4Name"
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftVersionedAdditionAttr {{.+}} Implicit 4{{$}}
+// CHECK-UNVERSIONED-DUMP-NEXT: SwiftNameAttr {{.+}} <<invalid sloc>> "SpecialSwift4Name"
+// CHECK-DUMP-NOT: Attr
+
+// CHECK-DUMP-NOT: Dumping
+
+// CHECK-UNVERSIONED: void acceptClosure(void (^block)(void) __attribute__((noescape)));
+// CHECK-VERSIONED: void acceptClosure(void (^block)(void));
+
+// CHECK-UNVERSIONED: void privateFunc(void) __attribute__((swift_private));
+
+// CHECK-UNVERSIONED: typedef double MyDoubleWrapper __attribute__((swift_wrapper("struct")));
+
+// CHECK-UNVERSIONED: enum __attribute__((ns_error_domain(MyErrorDomain))) MyErrorCode {
+// CHECK-UNVERSIONED-NEXT: MyErrorCodeFailed = 1
+// CHECK-UNVERSIONED-NEXT: };
+
+// CHECK-UNVERSIONED: __attribute__((swift_bridge("MyValueType")))
+// CHECK-UNVERSIONED: @interface MyReferenceType
+
+// CHECK-VERSIONED: void privateFunc(void);
+
+// CHECK-VERSIONED: typedef double MyDoubleWrapper;
+
+// CHECK-VERSIONED: enum MyErrorCode {
+// CHECK-VERSIONED-NEXT: MyErrorCodeFailed = 1
+// CHECK-VERSIONED-NEXT: };
+
+// CHECK-VERSIONED-NOT: __attribute__((swift_bridge("MyValueType")))
+// CHECK-VERSIONED: @interface MyReferenceType
+
+// CHECK-UNVERSIONED: __attribute__((swift_objc_members)
+// CHECK-UNVERSIONED-NEXT: @interface TestProperties
+// CHECK-VERSIONED-NOT: __attribute__((swift_objc_members)
+// CHECK-VERSIONED: @interface TestProperties
+
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((flag_enum)) FlagEnum {
+// CHECK-UNVERSIONED-NEXT: FlagEnumA = 1,
+// CHECK-UNVERSIONED-NEXT: FlagEnumB = 2
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((flag_enum)) NewlyFlagEnum {
+// CHECK-UNVERSIONED-NEXT: NewlyFlagEnumA = 1,
+// CHECK-UNVERSIONED-NEXT: NewlyFlagEnumB = 2
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((flag_enum)) APINotedFlagEnum {
+// CHECK-UNVERSIONED-NEXT: APINotedFlagEnumA = 1,
+// CHECK-UNVERSIONED-NEXT: APINotedFlagEnumB = 2
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) OpenEnum {
+// CHECK-UNVERSIONED-NEXT: OpenEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) NewlyOpenEnum {
+// CHECK-UNVERSIONED-NEXT: NewlyOpenEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) NewlyClosedEnum {
+// CHECK-UNVERSIONED-NEXT: NewlyClosedEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) ClosedToOpenEnum {
+// CHECK-UNVERSIONED-NEXT: ClosedToOpenEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) OpenToClosedEnum {
+// CHECK-UNVERSIONED-NEXT: OpenToClosedEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) APINotedOpenEnum {
+// CHECK-UNVERSIONED-NEXT: APINotedOpenEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) APINotedClosedEnum {
+// CHECK-UNVERSIONED-NEXT: APINotedClosedEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+
+// CHECK-VERSIONED-LABEL: enum __attribute__((flag_enum)) FlagEnum {
+// CHECK-VERSIONED-NEXT: FlagEnumA = 1,
+// CHECK-VERSIONED-NEXT: FlagEnumB = 2
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum NewlyFlagEnum {
+// CHECK-VERSIONED-NEXT: NewlyFlagEnumA = 1,
+// CHECK-VERSIONED-NEXT: NewlyFlagEnumB = 2
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((flag_enum)) APINotedFlagEnum {
+// CHECK-VERSIONED-NEXT: APINotedFlagEnumA = 1,
+// CHECK-VERSIONED-NEXT: APINotedFlagEnumB = 2
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) OpenEnum {
+// CHECK-VERSIONED-NEXT: OpenEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum NewlyOpenEnum {
+// CHECK-VERSIONED-NEXT: NewlyOpenEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum NewlyClosedEnum {
+// CHECK-VERSIONED-NEXT: NewlyClosedEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) ClosedToOpenEnum {
+// CHECK-VERSIONED-NEXT: ClosedToOpenEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) OpenToClosedEnum {
+// CHECK-VERSIONED-NEXT: OpenToClosedEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) APINotedOpenEnum {
+// CHECK-VERSIONED-NEXT: APINotedOpenEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+// CHECK-VERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) APINotedClosedEnum {
+// CHECK-VERSIONED-NEXT: APINotedClosedEnumA = 1
+// CHECK-VERSIONED-NEXT: };
+
+// These don't actually have versioned information, so we just check them once.
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) SoonToBeCFEnum {
+// CHECK-UNVERSIONED-NEXT: SoonToBeCFEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) SoonToBeNSEnum {
+// CHECK-UNVERSIONED-NEXT: SoonToBeNSEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) __attribute__((flag_enum)) SoonToBeCFOptions {
+// CHECK-UNVERSIONED-NEXT: SoonToBeCFOptionsA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("open"))) __attribute__((flag_enum)) SoonToBeNSOptions {
+// CHECK-UNVERSIONED-NEXT: SoonToBeNSOptionsA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) SoonToBeCFClosedEnum {
+// CHECK-UNVERSIONED-NEXT: SoonToBeCFClosedEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum __attribute__((enum_extensibility("closed"))) SoonToBeNSClosedEnum {
+// CHECK-UNVERSIONED-NEXT: SoonToBeNSClosedEnumA = 1
+// CHECK-UNVERSIONED-NEXT: };
+// CHECK-UNVERSIONED-LABEL: enum UndoAllThatHasBeenDoneToMe {
+// CHECK-UNVERSIONED-NEXT: UndoAllThatHasBeenDoneToMeA = 1
+// CHECK-UNVERSIONED-NEXT: };
diff --git a/clang/test/APINotes/yaml-convert-diags.c b/clang/test/APINotes/yaml-convert-diags.c
new file mode 100644
index 000000000000..1d352dc2c523
--- /dev/null
+++ b/clang/test/APINotes/yaml-convert-diags.c
@@ -0,0 +1,6 @@
+// RUN: rm -rf %t
+// RUN: not %clang_cc1 -fsyntax-only -fapinotes %s -I %S/Inputs/BrokenHeaders2 2>&1 | FileCheck %s
+
+#include "SomeBrokenLib.h"
+
+// CHECK: error: multiple definitions of global function 'do_something_with_pointers'
diff --git a/clang/test/APINotes/yaml-parse-diags.c b/clang/test/APINotes/yaml-parse-diags.c
new file mode 100644
index 000000000000..3ae39ccb301d
--- /dev/null
+++ b/clang/test/APINotes/yaml-parse-diags.c
@@ -0,0 +1,6 @@
+// RUN: rm -rf %t
+// RUN: %clang_cc1 -fsyntax-only -fapinotes %s -I %S/Inputs/BrokenHeaders -verify
+
+#include "SomeBrokenLib.h"
+
+// expected-error@APINotes.apinotes:4{{unknown key 'Nu llabilityOfRet'}}
diff --git a/clang/test/APINotes/yaml-reader-errors.m b/clang/test/APINotes/yaml-reader-errors.m
new file mode 100644
index 000000000000..9e5ee34c3e41
--- /dev/null
+++ b/clang/test/APINotes/yaml-reader-errors.m
@@ -0,0 +1,5 @@
+// RUN: rm -rf %t
+// RUN: not %clang_cc1 -fmodules -fimplicit-module-maps -fapinotes -fapinotes-modules -fmodules-cache-path=%t -I %S/Inputs/yaml-reader-errors/ -fsyntax-only %s > %t.err 2>&1
+// RUN: FileCheck %S/Inputs/yaml-reader-errors/UIKit.apinotes < %t.err
+
+@import UIKit;
diff --git a/clang/test/AST/ast-dump-invalid.cpp b/clang/test/AST/ast-dump-invalid.cpp
index 0a301dba51d2..5b6d74194b98 100644
--- a/clang/test/AST/ast-dump-invalid.cpp
+++ b/clang/test/AST/ast-dump-invalid.cpp
@@ -60,3 +60,12 @@ double Str::foo1(double, invalid_type)
// CHECK-NEXT: `-ReturnStmt {{.*}} <col:3, col:10>
// CHECK-NEXT: `-ImplicitCastExpr {{.*}} <col:10> 'double' <IntegralToFloating>
// CHECK-NEXT: `-IntegerLiteral {{.*}} <col:10> 'int' 45
+
+namespace TestAliasTemplateDecl {
+template<typename T> class A;
+
+template<typename T>
+template<typename U> using InvalidAlias = A<U>;
+// CHECK: TypeAliasTemplateDecl {{.*}} invalid InvalidAlias
+// CHECK-NEXT: |-TemplateTypeParmDecl {{.*}} typename depth 0 index 0 T
+}
diff --git a/clang/test/Analysis/ArrayDelete.cpp b/clang/test/Analysis/ArrayDelete.cpp
index 3b8d49552376..6887e0a35fb8 100644
--- a/clang/test/Analysis/ArrayDelete.cpp
+++ b/clang/test/Analysis/ArrayDelete.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.cplusplus.ArrayDelete -std=c++11 -verify -analyzer-output=text %s
+// RUN: %clang_cc1 -analyze -analyzer-checker=cplusplus.ArrayDelete -std=c++11 -verify -analyzer-output=text %s
struct Base {
virtual ~Base() = default;
diff --git a/clang/test/Analysis/Inputs/system-header-simulator-cxx.h b/clang/test/Analysis/Inputs/system-header-simulator-cxx.h
index 3ef7af2ea6c6..85db68d41a6c 100644
--- a/clang/test/Analysis/Inputs/system-header-simulator-cxx.h
+++ b/clang/test/Analysis/Inputs/system-header-simulator-cxx.h
@@ -1106,11 +1106,20 @@ using ostream = basic_ostream<char>;
extern std::ostream cout;
ostream &operator<<(ostream &, const string &);
-
#if __cplusplus >= 202002L
template <class T>
ostream &operator<<(ostream &, const std::unique_ptr<T> &);
#endif
+
+template <class CharT>
+class basic_istream;
+
+using istream = basic_istream<char>;
+
+extern std::istream cin;
+
+istream &getline(istream &, string &, char);
+istream &getline(istream &, string &);
} // namespace std
#ifdef TEST_INLINABLE_ALLOCATORS
diff --git a/clang/test/Analysis/analyzer-display-progress.cpp b/clang/test/Analysis/analyzer-display-progress.cpp
index dc8e27a8c3b4..fa1860004d03 100644
--- a/clang/test/Analysis/analyzer-display-progress.cpp
+++ b/clang/test/Analysis/analyzer-display-progress.cpp
@@ -1,22 +1,46 @@
-// RUN: %clang_analyze_cc1 -analyzer-display-progress %s 2>&1 | FileCheck %s
+// RUN: %clang_analyze_cc1 -verify %s 2>&1 \
+// RUN: -analyzer-display-progress \
+// RUN: -analyzer-checker=debug.ExprInspection \
+// RUN: -analyzer-output=text \
+// RUN: | FileCheck %s
-void f() {};
-void g() {};
-void h() {}
+void clang_analyzer_warnIfReached();
+
+// expected-note@+2 {{[debug] analyzing from f()}}
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+void f() { clang_analyzer_warnIfReached(); }
+
+// expected-note@+2 {{[debug] analyzing from g()}}
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+void g() { clang_analyzer_warnIfReached(); }
+
+// expected-note@+2 {{[debug] analyzing from h()}}
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+void h() { clang_analyzer_warnIfReached(); }
struct SomeStruct {
- void f() {}
+ // expected-note@+2 {{[debug] analyzing from SomeStruct::f()}}
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ void f() { clang_analyzer_warnIfReached(); }
};
struct SomeOtherStruct {
- void f() {}
+ // expected-note@+2 {{[debug] analyzing from SomeOtherStruct::f()}}
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ void f() { clang_analyzer_warnIfReached(); }
};
namespace ns {
struct SomeStruct {
- void f(int) {}
- void f(float, ::SomeStruct) {}
- void f(float, SomeStruct) {}
+ // expected-note@+2 {{[debug] analyzing from ns::SomeStruct::f(int)}}
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ void f(int) { clang_analyzer_warnIfReached(); }
+ // expected-note@+2 {{[debug] analyzing from ns::SomeStruct::f(float, ::SomeStruct)}}
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ void f(float, ::SomeStruct) { clang_analyzer_warnIfReached(); }
+ // expected-note@+2 {{[debug] analyzing from ns::SomeStruct::f(float, SomeStruct)}}
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ void f(float, SomeStruct) { clang_analyzer_warnIfReached(); }
};
}
diff --git a/clang/test/Analysis/analyzer-display-progress.m b/clang/test/Analysis/analyzer-display-progress.m
index 24414f659c39..90f223b34861 100644
--- a/clang/test/Analysis/analyzer-display-progress.m
+++ b/clang/test/Analysis/analyzer-display-progress.m
@@ -1,8 +1,16 @@
-// RUN: %clang_analyze_cc1 -fblocks -analyzer-display-progress %s 2>&1 | FileCheck %s
+// RUN: %clang_analyze_cc1 -fblocks -verify %s 2>&1 \
+// RUN: -analyzer-display-progress \
+// RUN: -analyzer-checker=debug.ExprInspection \
+// RUN: -analyzer-output=text \
+// RUN: | FileCheck %s
#include "Inputs/system-header-simulator-objc.h"
-static void f(void) {}
+void clang_analyzer_warnIfReached();
+
+// expected-note@+2 {{[debug] analyzing from f}}
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+static void f(void) { clang_analyzer_warnIfReached(); }
@interface I: NSObject
-(void)instanceMethod:(int)arg1 with:(int)arg2;
@@ -10,21 +18,26 @@ static void f(void) {}
@end
@implementation I
--(void)instanceMethod:(int)arg1 with:(int)arg2 {}
-+(void)classMethod {}
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+-(void)instanceMethod:(int)arg1 with:(int)arg2 { clang_analyzer_warnIfReached(); }
+
+// expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
++(void)classMethod { clang_analyzer_warnIfReached(); }
@end
+// expected-note@+1 3 {{[debug] analyzing from g}}
void g(I *i, int x, int y) {
- [I classMethod];
- [i instanceMethod: x with: y];
+ [I classMethod]; // expected-note {{Calling 'classMethod'}}
+ [i instanceMethod: x with: y]; // expected-note {{Calling 'instanceMethod:with:'}}
void (^block)(void);
- block = ^{};
- block();
+ // expected-warning@+1 {{REACHABLE}} expected-note@+1 {{REACHABLE}}
+ block = ^{ clang_analyzer_warnIfReached(); };
+ block(); // expected-note {{Calling anonymous block}}
}
// CHECK: analyzer-display-progress.m f
// CHECK: analyzer-display-progress.m -[I instanceMethod:with:]
// CHECK: analyzer-display-progress.m +[I classMethod]
// CHECK: analyzer-display-progress.m g
-// CHECK: analyzer-display-progress.m block (line: 22, col: 11)
+// CHECK: analyzer-display-progress.m block (line: 35, col: 11)
diff --git a/clang/test/Analysis/analyzer-note-analysis-entry-points.cpp b/clang/test/Analysis/analyzer-note-analysis-entry-points.cpp
new file mode 100644
index 000000000000..7d321bfae61c
--- /dev/null
+++ b/clang/test/Analysis/analyzer-note-analysis-entry-points.cpp
@@ -0,0 +1,75 @@
+// RUN: %clang_analyze_cc1 -verify=common %s \
+// RUN: -analyzer-checker=deadcode.DeadStores,debug.ExprInspection \
+// RUN: -analyzer-note-analysis-entry-points
+
+// RUN: %clang_analyze_cc1 -verify=common,textout %s \
+// RUN: -analyzer-checker=deadcode.DeadStores,debug.ExprInspection \
+// RUN: -analyzer-note-analysis-entry-points \
+// RUN: -analyzer-output=text
+
+// Test the actual source locations/ranges of entry point notes.
+// RUN: %clang_analyze_cc1 %s \
+// RUN: -analyzer-checker=deadcode.DeadStores,debug.ExprInspection \
+// RUN: -analyzer-note-analysis-entry-points \
+// RUN: -analyzer-output=text 2>&1 \
+// RUN: | FileCheck --strict-whitespace %s
+
+
+void clang_analyzer_warnIfReached();
+
+void other() {
+ // common-warning@+1 {{REACHABLE}} textout-note@+1 {{REACHABLE}}
+ clang_analyzer_warnIfReached();
+}
+
+struct SomeOtherStruct {
+ // CHECK: note: [debug] analyzing from SomeOtherStruct::f()
+ // CHECK-NEXT: | void f() {
+ // CHECK-NEXT: | ^
+ // textout-note@+1 {{[debug] analyzing from SomeOtherStruct::f()}}
+ void f() {
+ other(); // textout-note {{Calling 'other'}}
+ }
+};
+
+// CHECK: note: [debug] analyzing from operator""_w(const char *)
+// CHECK-NEXT: | unsigned operator ""_w(const char*) {
+// CHECK-NEXT: | ^
+// textout-note@+1 {{[debug] analyzing from operator""_w(const char *)}}
+unsigned operator ""_w(const char*) {
+ // common-warning@+1 {{REACHABLE}} textout-note@+1 {{REACHABLE}}
+ clang_analyzer_warnIfReached();
+ return 404;
+}
+
+// textout-note@+1 {{[debug] analyzing from checkASTCodeBodyHasAnalysisEntryPoints()}}
+void checkASTCodeBodyHasAnalysisEntryPoints() {
+ int z = 1;
+ z = 2;
+ // common-warning@-1 {{Value stored to 'z' is never read}}
+ // textout-note@-2 {{Value stored to 'z' is never read}}
+}
+
+void notInvokedLambdaScope() {
+ // CHECK: note: [debug] analyzing from notInvokedLambdaScope()::(anonymous class)::operator()()
+ // CHECK-NEXT: | auto notInvokedLambda = []() {
+ // CHECK-NEXT: | ^
+ // textout-note@+1 {{[debug] analyzing from notInvokedLambdaScope()::(anonymous class)::operator()()}}
+ auto notInvokedLambda = []() {
+ // common-warning@+1 {{REACHABLE}} textout-note@+1 {{REACHABLE}}
+ clang_analyzer_warnIfReached();
+ };
+ (void)notInvokedLambda; // Not invoking the lambda.
+}
+
+// CHECK: note: [debug] analyzing from invokedLambdaScope()
+// CHECK-NEXT: | void invokedLambdaScope() {
+// CHECK-NEXT: | ^
+// textout-note@+1 {{[debug] analyzing from invokedLambdaScope()}}
+void invokedLambdaScope() {
+ auto invokedLambda = []() {
+ // common-warning@+1 {{REACHABLE}} textout-note@+1 {{REACHABLE}}
+ clang_analyzer_warnIfReached();
+ };
+ invokedLambda(); // textout-note {{Calling 'operator()'}}
+} \ No newline at end of file
diff --git a/clang/test/Analysis/cxx23-static-operator.cpp b/clang/test/Analysis/cxx23-static-operator.cpp
new file mode 100644
index 000000000000..f380bd0dfa42
--- /dev/null
+++ b/clang/test/Analysis/cxx23-static-operator.cpp
@@ -0,0 +1,38 @@
+// RUN: %clang_analyze_cc1 -std=c++2b -verify %s \
+// RUN: -analyzer-checker=core,debug.ExprInspection
+
+template <typename T> void clang_analyzer_dump(T);
+
+struct Adder {
+ int data;
+ static int operator()(int x, int y) {
+ clang_analyzer_dump(x); // expected-warning {{1}}
+ clang_analyzer_dump(y); // expected-warning {{2}}
+ return x + y;
+ }
+};
+
+void static_operator_call_inlines() {
+ Adder s{10};
+ clang_analyzer_dump(s(1, 2)); // expected-warning {{3}}
+}
+
+struct DataWithCtor {
+ int x;
+ int y;
+ DataWithCtor(int parm) : x(parm + 10), y(parm + 20) {
+ clang_analyzer_dump(this); // expected-warning {{&v}}
+ }
+};
+
+struct StaticSubscript {
+ static void operator[](DataWithCtor v) {
+ clang_analyzer_dump(v.x); // expected-warning {{20}}
+ clang_analyzer_dump(v.y); // expected-warning {{30}}
+ }
+};
+
+void top() {
+ StaticSubscript s;
+ s[DataWithCtor{10}];
+}
diff --git a/clang/test/Analysis/getline-cpp.cpp b/clang/test/Analysis/getline-cpp.cpp
new file mode 100644
index 000000000000..ef9d3186009c
--- /dev/null
+++ b/clang/test/Analysis/getline-cpp.cpp
@@ -0,0 +1,15 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix,debug.ExprInspection -verify %s
+
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix,alpha.unix,debug.ExprInspection -verify %s
+//
+// expected-no-diagnostics
+
+#include "Inputs/system-header-simulator-cxx.h"
+
+void test_std_getline() {
+ std::string userid, comment;
+ // MallocChecker should not confuse the POSIX function getline() and the
+ // unrelated C++ standard library function std::getline.
+ std::getline(std::cin, userid, ' '); // no-crash
+ std::getline(std::cin, comment); // no-crash
+}
diff --git a/clang/test/Analysis/getline-unixapi.c b/clang/test/Analysis/getline-unixapi.c
new file mode 100644
index 000000000000..86635ed84997
--- /dev/null
+++ b/clang/test/Analysis/getline-unixapi.c
@@ -0,0 +1,322 @@
+// RUN: %clang_analyze_cc1 -analyzer-checker=core,unix,debug.ExprInspection -verify %s
+
+#include "Inputs/system-header-simulator.h"
+#include "Inputs/system-header-simulator-for-malloc.h"
+#include "Inputs/system-header-simulator-for-valist.h"
+
+void clang_analyzer_eval(int);
+void clang_analyzer_dump_int(int);
+void clang_analyzer_dump_ptr(void*);
+void clang_analyzer_warnIfReached();
+
+void test_getline_null_lineptr() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char **buffer = NULL;
+ size_t n = 0;
+ getline(buffer, &n, F1); // expected-warning {{Line pointer might be NULL}}
+ fclose(F1);
+}
+
+void test_getline_null_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ getline(&buffer, NULL, F1); // expected-warning {{Size pointer might be NULL}}
+ fclose(F1);
+}
+
+void test_getline_null_buffer_size_gt0() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ size_t n = 8;
+ getline(&buffer, &n, F1); // ok since posix 2018
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getline_null_buffer_size_gt0_2(size_t n) {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ if (n > 0) {
+ getline(&buffer, &n, F1); // ok since posix 2018
+ }
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getline_null_buffer_unknown_size(size_t n) {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+
+ getline(&buffer, &n, F1); // ok
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getline_null_buffer_undef_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char *buffer = NULL;
+ size_t n;
+
+ getline(&buffer, &n, F1); // ok since posix 2018
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getline_buffer_size_0() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char *buffer = malloc(10);
+ size_t n = 0;
+ if (buffer != NULL)
+ getline(&buffer, &n, F1); // ok, the buffer is enough for 0 character
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getline_buffer_bad_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char *buffer = malloc(10);
+ size_t n = 100;
+ if (buffer != NULL)
+ getline(&buffer, &n, F1); // expected-warning {{The buffer from the first argument is smaller than the size specified by the second parameter}}
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getline_buffer_smaller_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char *buffer = malloc(100);
+ size_t n = 10;
+ if (buffer != NULL)
+ getline(&buffer, &n, F1); // ok, there is enough space for 10 characters
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getline_buffer_undef_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+
+ char *buffer = malloc(100);
+ size_t n;
+ if (buffer != NULL)
+ getline(&buffer, &n, F1); // expected-warning {{The buffer from the first argument is not NULL, but the size specified by the second parameter is undefined}}
+ fclose(F1);
+ free(buffer);
+}
+
+
+void test_getline_null_buffer() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ size_t n = 0;
+ ssize_t r = getline(&buffer, &n, F1);
+ // getline returns -1 on failure, number of char reads on success (>= 0)
+ if (r < -1) {
+ clang_analyzer_warnIfReached(); // must not happen
+ } else {
+ // The buffer could be allocated both on failure and success
+ clang_analyzer_dump_int(n); // expected-warning {{conj_$}}
+ clang_analyzer_dump_ptr(buffer); // expected-warning {{conj_$}}
+ }
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getdelim_null_size() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ getdelim(&buffer, NULL, ',', F1); // expected-warning {{Size pointer might be NULL}}
+ fclose(F1);
+}
+
+void test_getdelim_null_buffer_size_gt0() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ size_t n = 8;
+ getdelim(&buffer, &n, ';', F1); // ok since posix 2018
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getdelim_null_buffer_size_gt0_2(size_t n) {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ if (n > 0) {
+ getdelim(&buffer, &n, ' ', F1); // ok since posix 2018
+ }
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getdelim_null_buffer_unknown_size(size_t n) {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ getdelim(&buffer, &n, '-', F1); // ok
+ fclose(F1);
+ free(buffer);
+}
+
+void test_getdelim_null_buffer() {
+ FILE *F1 = tmpfile();
+ if (!F1)
+ return;
+ char *buffer = NULL;
+ size_t n = 0;
+ ssize_t r = getdelim(&buffer, &n, '\r', F1);
+ // getdelim returns -1 on failure, number of char reads on success (>= 0)
+ if (r < -1) {
+ clang_analyzer_warnIfReached(); // must not happen
+ }
+ else {
+ // The buffer could be allocated both on failure and success
+ clang_analyzer_dump_int(n); // expected-warning {{conj_$}}
+ clang_analyzer_dump_ptr(buffer); // expected-warning {{conj_$}}
+ }
+ free(buffer);
+ fclose(F1);
+}
+
+void test_getline_while() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ char *line = NULL;
+ size_t len = 0;
+ ssize_t read;
+
+ while ((read = getline(&line, &len, file)) != -1) {
+ printf("%s\n", line);
+ }
+
+ free(line);
+ fclose(file);
+}
+
+void test_getline_return_check() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ char *line = NULL;
+ size_t len = 0;
+ ssize_t r = getline(&line, &len, file);
+
+ if (r != -1) {
+ if (line[0] == '\0') {} // ok
+ }
+ free(line);
+ fclose(file);
+}
+
+void test_getline_clear_eof() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ size_t n = 10;
+ char *buffer = malloc(n);
+ ssize_t read = fread(buffer, n, 1, file);
+ if (feof(file)) {
+ clearerr(file);
+ getline(&buffer, &n, file); // ok
+ }
+ fclose(file);
+ free(buffer);
+}
+
+void test_getline_not_null(char **buffer, size_t *size) {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ getline(buffer, size, file);
+ fclose(file);
+
+ if (size == NULL || buffer == NULL) {
+ clang_analyzer_warnIfReached(); // must not happen
+ }
+}
+
+void test_getline_size_constraint(size_t size) {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ size_t old_size = size;
+ char *buffer = malloc(10);
+ if (buffer != NULL) {
+ ssize_t r = getline(&buffer, &size, file);
+ if (r >= 0) {
+ // Since buffer has a size of 10, old_size must be less than or equal to 10.
+ // Otherwise, there would be UB.
+ clang_analyzer_eval(old_size <= 10); // expected-warning{{TRUE}}
+ }
+ }
+ fclose(file);
+ free(buffer);
+}
+
+void test_getline_negative_buffer() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ char *buffer = NULL;
+ size_t n = -1;
+ getline(&buffer, &n, file); // ok since posix 2018
+ free(buffer);
+ fclose(file);
+}
+
+void test_getline_negative_buffer_2(char *buffer) {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ size_t n = -1;
+ (void)getline(&buffer, &n, file); // ok
+ free(buffer);
+ fclose(file);
+}
diff --git a/clang/test/Analysis/inlining/false-positive-suppression.cpp b/clang/test/Analysis/inlining/false-positive-suppression.cpp
index 56659b4a1941..2f9ed7f78b3f 100644
--- a/clang/test/Analysis/inlining/false-positive-suppression.cpp
+++ b/clang/test/Analysis/inlining/false-positive-suppression.cpp
@@ -210,3 +210,20 @@ namespace Cleanups {
testArgumentHelper(NonTrivial().getNull());
}
}
+
+class Bear *getNullBear() { return nullptr; }
+class Bear {
+public:
+ void brum() const;
+};
+class Door {
+public:
+ Door() : ptr(getNullBear()) {
+ ptr->brum();
+#ifndef SUPPRESSED
+ // expected-warning@-2 {{Called C++ object pointer is null}}
+#endif
+ }
+private:
+ Bear* ptr;
+};
diff --git a/clang/test/Analysis/stream.c b/clang/test/Analysis/stream.c
index 7ba27740a937..ba5e66a4102e 100644
--- a/clang/test/Analysis/stream.c
+++ b/clang/test/Analysis/stream.c
@@ -4,6 +4,7 @@
// RUN: %clang_analyze_cc1 -triple=hexagon -analyzer-checker=core,alpha.unix.Stream,debug.ExprInspection -verify %s
#include "Inputs/system-header-simulator.h"
+#include "Inputs/system-header-simulator-for-malloc.h"
#include "Inputs/system-header-simulator-for-valist.h"
void clang_analyzer_eval(int);
@@ -376,3 +377,75 @@ void fflush_on_open_failed_stream(void) {
}
fclose(F);
}
+
+void getline_null_file() {
+ char *buffer = NULL;
+ size_t n = 0;
+ getline(&buffer, &n, NULL); // expected-warning {{Stream pointer might be NULL}}
+}
+
+void getdelim_null_file() {
+ char *buffer = NULL;
+ size_t n = 0;
+ getdelim(&buffer, &n, '\n', NULL); // expected-warning {{Stream pointer might be NULL}}
+}
+
+void getline_buffer_on_error() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ char *line = NULL;
+ size_t len = 0;
+ if (getline(&line, &len, file) == -1) {
+ if (line[0] == '\0') {} // expected-warning {{The left operand of '==' is a garbage value}}
+ } else {
+ if (line[0] == '\0') {} // no warning
+ }
+
+ free(line);
+ fclose(file);
+}
+
+void getline_ret_value() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ size_t n = 0;
+ char *buffer = NULL;
+ ssize_t r = getline(&buffer, &n, file);
+
+ if (r > -1) {
+ // The return value does *not* include the terminating null byte.
+ // The buffer must be large enough to include it.
+ clang_analyzer_eval(n > r); // expected-warning{{TRUE}}
+ clang_analyzer_eval(buffer != NULL); // expected-warning{{TRUE}}
+ }
+
+ fclose(file);
+ free(buffer);
+}
+
+
+void getline_buffer_size_negative() {
+ FILE *file = fopen("file.txt", "r");
+ if (file == NULL) {
+ return;
+ }
+
+ size_t n = -1;
+ clang_analyzer_eval((ssize_t)n >= 0); // expected-warning{{FALSE}}
+ char *buffer = NULL;
+ ssize_t r = getline(&buffer, &n, file);
+
+ if (r > -1) {
+ clang_analyzer_eval((ssize_t)n > r); // expected-warning{{TRUE}}
+ clang_analyzer_eval(buffer != NULL); // expected-warning{{TRUE}}
+ }
+
+ free(buffer);
+ fclose(file);
+}
diff --git a/clang/test/C/C11/n1282.c b/clang/test/C/C11/n1282.c
new file mode 100644
index 000000000000..ed952790c883
--- /dev/null
+++ b/clang/test/C/C11/n1282.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -verify -Wunsequenced -Wno-unused-value %s
+
+/* WG14 N1282: Yes
+ * Clarification of Expressions
+ */
+
+int g;
+
+int f(int i) {
+ g = i;
+ return 0;
+}
+
+int main(void) {
+ int x;
+ x = (10, g = 1, 20) + (30, g = 2, 40); /* Line A */ // expected-warning {{multiple unsequenced modifications to 'g'}}
+ x = (10, f(1), 20) + (30, f(2), 40); /* Line B */
+ x = (g = 1) + (g = 2); /* Line C */ // expected-warning {{multiple unsequenced modifications to 'g'}}
+ return 0;
+}
diff --git a/clang/test/C/C11/n1365.c b/clang/test/C/C11/n1365.c
new file mode 100644
index 000000000000..d60bb546b29a
--- /dev/null
+++ b/clang/test/C/C11/n1365.c
@@ -0,0 +1,60 @@
+// RUN: %clang_cc1 -ast-dump %s | FileCheck %s
+
+/* WG14 N1365: Clang 16
+ * Constant expressions
+ */
+
+// Note: we don't allow you to expand __FLT_EVAL_METHOD__ in the presence of a
+// pragma that changes its value. However, we can test that we have the correct
+// constant expression behavior by testing that the AST has the correct implicit
+// casts, which also specify that the cast was inserted due to an evaluation
+// method requirement.
+void func(void) {
+ {
+ #pragma clang fp eval_method(double)
+ _Static_assert(123.0F * 2.0F == 246.0F, "");
+ // CHECK: StaticAssertDecl
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} '_Bool' <IntegralToBoolean>
+ // CHECK-NEXT: BinaryOperator {{.*}} 'int' '=='
+ // CHECK-NEXT: BinaryOperator {{.*}} 'double' '*' FPEvalMethod=1
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'double' <FloatingCast> FPEvalMethod=1
+ // CHECK-NEXT: FloatingLiteral
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'double' <FloatingCast> FPEvalMethod=1
+ // CHECK-NEXT: FloatingLiteral
+
+ // Ensure that a cast removes the extra precision.
+ _Static_assert(123.0F * 2.0F == 246.0F, "");
+ // CHECK: StaticAssertDecl
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} '_Bool' <IntegralToBoolean>
+ // CHECK-NEXT: BinaryOperator {{.*}} 'int' '=='
+ // CHECK-NEXT: BinaryOperator {{.*}} 'double' '*' FPEvalMethod=1
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'double' <FloatingCast> FPEvalMethod=1
+ // CHECK-NEXT: FloatingLiteral
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'double' <FloatingCast> FPEvalMethod=1
+ // CHECK-NEXT: FloatingLiteral
+ }
+
+ {
+ #pragma clang fp eval_method(extended)
+ _Static_assert(123.0F * 2.0F == 246.0F, "");
+ // CHECK: StaticAssertDecl
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} '_Bool' <IntegralToBoolean>
+ // CHECK-NEXT: BinaryOperator {{.*}} 'int' '=='
+ // CHECK-NEXT: BinaryOperator {{.*}} 'long double' '*' FPEvalMethod=2
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'long double' <FloatingCast> FPEvalMethod=2
+ // CHECK-NEXT: FloatingLiteral
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} 'long double' <FloatingCast> FPEvalMethod=2
+ // CHECK-NEXT: FloatingLiteral
+ }
+
+ {
+ #pragma clang fp eval_method(source)
+ _Static_assert(123.0F * 2.0F == 246.0F, "");
+ // CHECK: StaticAssertDecl
+ // CHECK-NEXT: ImplicitCastExpr {{.*}} '_Bool' <IntegralToBoolean>
+ // CHECK-NEXT: BinaryOperator {{.*}} 'int' '=='
+ // CHECK-NEXT: BinaryOperator {{.*}} 'float' '*' FPEvalMethod=0
+ // CHECK-NEXT: FloatingLiteral
+ // CHECK-NEXT: FloatingLiteral
+ }
+}
diff --git a/clang/test/C/C2x/n2350.c b/clang/test/C/C2x/n2350.c
index 2f738488a374..af0ca6d79be5 100644
--- a/clang/test/C/C2x/n2350.c
+++ b/clang/test/C/C2x/n2350.c
@@ -5,7 +5,7 @@
// RUN: %clang_cc1 -fsyntax-only -pedantic -Wno-comment -std=c99 -verify %s
// RUN: %clang_cc1 -fsyntax-only -pedantic -Wno-comment -std=c11 -verify %s
// RUN: %clang_cc1 -fsyntax-only -pedantic -Wno-comment -std=c17 -verify %s
-// RUN: %clang_cc1 -fsyntax-only -pedantic -Wno-comment -std=c2x -verify %s
+// RUN: %clang_cc1 -fsyntax-only -pedantic -Wno-comment -std=c2x -verify=silent %s
// silent-no-diagnostics
@@ -13,10 +13,10 @@
// https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2350.htm
int simple(void) {
return __builtin_offsetof(struct A // cpp-error {{'A' cannot be defined in a type specifier}} \
- expected-warning {{defining a type within '__builtin_offsetof' is a Clang extension}}
+ expected-warning {{defining a type within '__builtin_offsetof' is a C23 extension}}
{
int a;
- struct B // expected-warning {{defining a type within '__builtin_offsetof' is a Clang extension}}
+ struct B // expected-warning {{defining a type within '__builtin_offsetof' is a C23 extension}}
{
int c;
int d;
@@ -26,7 +26,7 @@ int simple(void) {
int anonymous_struct(void) {
return __builtin_offsetof(struct // cpp-error-re {{'(unnamed struct at {{.*}})' cannot be defined in a type specifier}} \
- expected-warning {{defining a type within '__builtin_offsetof' is a Clang extension}}
+ expected-warning {{defining a type within '__builtin_offsetof' is a C23 extension}}
{
int a;
int b;
@@ -47,7 +47,7 @@ int struct_in_second_param(void) {
int macro(void) {
return offsetof(struct A // cpp-error {{'A' cannot be defined in a type specifier}} \
- expected-warning 2 {{defining a type within 'offsetof' is a Clang extension}}
+ expected-warning 2 {{defining a type within 'offsetof' is a C23 extension}}
{
int a;
struct B // verifier seems to think the error is emitted by the macro
diff --git a/clang/test/C/C99/Inputs/nested-include.h b/clang/test/C/C99/Inputs/nested-include.h
new file mode 100644
index 000000000000..0f64473c0fae
--- /dev/null
+++ b/clang/test/C/C99/Inputs/nested-include.h
@@ -0,0 +1,3 @@
+#if __COUNTER__ < 15
+#include __FILE__
+#endif
diff --git a/clang/test/C/C99/block-scopes.c b/clang/test/C/C99/block-scopes.c
new file mode 100644
index 000000000000..589047df3e52
--- /dev/null
+++ b/clang/test/C/C99/block-scopes.c
@@ -0,0 +1,34 @@
+// RUN: %clang_cc1 -std=c89 -verify %s
+// RUN: %clang_cc1 -std=c99 -verify %s
+// RUN: %clang_cc1 -std=c11 -verify %s
+// RUN: %clang_cc1 -std=c17 -verify %s
+// RUN: %clang_cc1 -std=c23 -verify %s
+
+// expected-no-diagnostics
+
+/* WG14 ???: yes
+ * new block scopes for selection and iteration statements
+ *
+ * This is referenced in the C99 front matter as new changes to C99, but it is
+ * not clear which document number introduced the changes. It's possible this
+ * is WG14 N759, based on discussion in the C99 rationale document that claims
+ * these changes were made in response to surprising issues with the lifetime
+ * of compound literals in compound statements vs non-compound statements.
+ */
+
+enum {a, b};
+void different(void) {
+ if (sizeof(enum {b, a}) != sizeof(int))
+ _Static_assert(a == 1, "");
+ /* In C89, the 'b' found here would have been from the enum declaration in
+ * the controlling expression of the selection statement, not from the global
+ * declaration. In C99 and later, that enumeration is scoped to the 'if'
+ * statement and the global declaration is what's found.
+ */
+ #if __STDC_VERSION__ >= 199901L
+ _Static_assert(b == 1, "");
+ #else
+ _Static_assert(b == 0, "");
+ #endif
+}
+
diff --git a/clang/test/C/C99/digraphs.c b/clang/test/C/C99/digraphs.c
new file mode 100644
index 000000000000..870a44111816
--- /dev/null
+++ b/clang/test/C/C99/digraphs.c
@@ -0,0 +1,90 @@
+// RUN: %clang_cc1 -verify -ffreestanding %s
+
+/* WG14 ???: yes
+ * restricted character set support via digraphs and <iso646.h>
+ *
+ * NB: I cannot find a definitive document number associated with the feature,
+ * which was pulled from the editor's report in the C99 front matter. However,
+ * based on discussion in the C99 rationale document, I believe this is
+ * referring to features added by AMD1 to support ISO 646 and digraphs.
+ */
+
+// Validate that we provide iso646.h in freestanding mode.
+#include <iso646.h>
+
+// Validate that we define all the expected macros and their expected
+// expansions (when suitable for a constant expression) as well.
+#ifndef and
+#error "missing and"
+#else
+_Static_assert((1 and 1) == (1 && 1), "");
+#endif
+
+#ifndef and_eq
+#error "missing and_eq"
+#endif
+
+#ifndef bitand
+#error "missing bitand"
+#else
+_Static_assert((1 bitand 3) == (1 & 3), "");
+#endif
+
+#ifndef bitor
+#error "missing bitor"
+#else
+_Static_assert((1 bitor 2) == (1 | 2), "");
+#endif
+
+#ifndef compl
+#error "missing compl"
+#else
+_Static_assert((compl 0) == (~0), "");
+#endif
+
+#ifndef not
+#error "missing not"
+#else
+_Static_assert((not 12) == (!12), "");
+#endif
+
+#ifndef not_eq
+#error "missing not_eq"
+#else
+_Static_assert((0 not_eq 12) == (0 != 12), "");
+#endif
+
+#ifndef or
+#error "missing or"
+#else
+// This intentionally diagnoses use of '||' only, because the user likely did
+// not confuse the operator when using 'or' instead.
+_Static_assert((0 or 12) == (0 || 12), ""); // expected-warning {{use of logical '||' with constant operand}} \
+ expected-note {{use '|' for a bitwise operation}}
+#endif
+
+#ifndef or_eq
+#error "missing or_eq"
+#endif
+
+#ifndef xor
+#error "missing xor"
+#else
+_Static_assert((1 xor 3) == (1 ^ 3), "");
+#endif
+
+#ifndef xor_eq
+#error "missing xor_eq"
+#endif
+
+// Validate that digraphs behave the same as their expected counterparts. The
+// definition should match the declaration in every way except spelling.
+#define DI_NAME(f, b) f %:%: b
+#define STD_NAME(f, b) f ## b
+void DI_NAME(foo, bar)(int (*array)<: 0 :>);
+void STD_NAME(foo, bar)(int (*array)[0]) {}
+
+#define DI_STR(f) %:f
+#define STD_STR(f) #f
+_Static_assert(__builtin_strcmp(DI_STR(testing), STD_STR(testing)) == 0, "");
+
diff --git a/clang/test/C/C99/n590.c b/clang/test/C/C99/n590.c
new file mode 100644
index 000000000000..7302e3c5ceba
--- /dev/null
+++ b/clang/test/C/C99/n590.c
@@ -0,0 +1,390 @@
+// RUN: %clang_cc1 -verify -Wno-unused -I %S/Inputs %s
+
+/* WG14 N590: Clang 3.2
+ * Increase minimum translation limits
+ *
+ * NB: the content of this document is not available, so this is testing
+ * against the implementation limits in C23, which are the most aggressive
+ * translation limits we can test against.
+ */
+// expected-no-diagnostics
+
+// 15 nesting levels for #included files
+// NOTE: this relies on the value of __COUNTER__, so be very careful about
+// adding code above this line of the test.
+#include "nested-include.h"
+
+// Helpers for declaring unique variables.
+#define CAT_IMPL(x, y) x ## y
+#define CAT(x, y) CAT_IMPL(x, y)
+#define ONCE(x) CAT(x, __COUNTER__) SEPARATOR
+#define REPEAT10(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x)
+#define REPEAT63(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) ONCE(x) ONCE(x) ONCE(x)
+#define REPEAT100(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x)
+#define REPEAT127(x) REPEAT100(x) REPEAT10(x) REPEAT10(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x)
+#define REPEAT500(x) REPEAT100(x) REPEAT100(x) REPEAT100(x) REPEAT100(x) REPEAT100(x)
+#define REPEAT1000(x) REPEAT500(x) REPEAT500(x)
+#define REPEAT1023(x) REPEAT1000(x) REPEAT10(x) REPEAT10(x) ONCE(x) ONCE(x) ONCE(x)
+#define REPEAT4095(x) REPEAT1000(x) REPEAT1000(x) REPEAT1000(x) REPEAT1000(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) REPEAT10(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x) ONCE(x)
+
+// Limits taken from C23 5.2.5.2p1.
+void func(void) {
+ // 127 nesting levels of blocks
+ {{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+
+ // 12 pointer, array, and function declarators (in any combination) modifying
+ // an arithmetic, structure, union, or void type in a declaration.
+ void (************fp)(int foo[1][2][3][4][5][6][7][8][9][10][11][12]);
+
+ // 63 nesting levels of parenthesized declarators within a full declarator.
+ int (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((decl)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))));
+
+ // 63 significant initial characters in an internal identifier or a macro
+ // name.
+ static int aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffggg;
+
+ // 31 significant initial characters in an external identifier.
+ extern int gggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjjkkkkkkkkkkllllllllllmmm;
+
+ // 511 identifiers with block scope declared in one block.
+ {
+ #define SEPARATOR ;
+ REPEAT500(int a)
+ REPEAT10(int a)
+ ONCE(int a)
+ #undef SEPARATOR
+ }
+
+ // 4095 characters in a logical source line.
+ (void)0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4;
+
+ // 4095 characters in a string literal (after concatenation)
+ (void)"0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,\
+ 9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,\
+ 7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,\
+ 5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,\
+ 3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,\
+ 1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4";
+
+ // 1023 case labels for a switch statement.
+ switch (aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffggg) {
+ #define SEPARATOR :
+ REPEAT1023(case 0x)
+ #undef SEPARATOR
+ break;
+ }
+}
+
+// 32767 bytes in an object.
+struct S {
+ unsigned char mem[32767];
+};
+_Static_assert(sizeof(struct S) >= 32767, "");
+
+// 1023 members in a single structure or union
+struct T {
+#define SEPARATOR ;
+ REPEAT1023(int mem)
+#undef SEPARATOR
+};
+
+// 1023 enumeration constants in a single enumeration
+enum E {
+#define SEPARATOR ,
+REPEAT1023(e)
+#undef SEPARATOR
+};
+
+// 127 parameters in one function definition.
+#define SEPARATOR ,
+// Because the parameters are all separated by commas, we have to add a final
+// parameter (without a preceeding comma) to form a valid declaration.
+void lots_of_params(REPEAT127(int a) int last) {
+ // 127 arguments in one function call
+ lots_of_params(REPEAT127(0x0) 0);
+}
+#undef SEPARATOR
+
+// 63 levels of nested structure or union definitions in a single member
+// declaration list.
+struct U {
+#define SEPARATOR ;
+REPEAT63(struct { int a)
+REPEAT63(} mem)
+#undef SEPARATOR
+};
+
+// 63 nesting levels of conditional inclusion
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+#if 1
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+// 63 initial characters in a macro name.
+#define AAAAAAAAAABBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEEEEEEEEEFFFFFFFFFFGGG
+
+// 4095 external identifiers in one translation unit.
+#define SEPARATOR ;
+REPEAT4095(extern int val)
+#undef SEPARATOR
+
+// 127 parameters in one macro definition
+#define BIG_UN(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, \
+ a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, \
+ a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, \
+ a30, a31, a32, a33, a34, a35, a36, a37, a38, a39, \
+ a40, a41, a42, a43, a44, a45, a46, a47, a48, a49, \
+ a50, a51, a52, a53, a54, a55, a56, a57, a58, a59, \
+ a60, a61, a62, a63, a64, a65, a66, a67, a68, a69, \
+ a70, a71, a72, a73, a74, a75, a76, a77, a78, a79, \
+ a80, a81, a82, a83, a84, a85, a86, a87, a88, a89, \
+ a90, a91, a92, a93, a94, a95, a96, a97, a98, a99, \
+ a100, a101, a102, a103, a104, a105, a106, a107, a108, a109, \
+ a110, a111, a112, a113, a114, a115, a116, a117, a118, a119, \
+ a120, a121, a122, a123, a124, a125, a126) 0
+
+// 127 arguments in one macro invocation
+int val = BIG_UN(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, \
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, \
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, \
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, \
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, \
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, \
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, \
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, \
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, \
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, \
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, \
+ 120, 121, 122, 123, 124, 125, 126);
+
+// 4095 macro identifiers simultaneously defined in one preprocessing
+// translation unit. Rather than write out 4095 macro definitions, we'll use a
+// cheap python script to generate the contents and test that.
+// RUN: %python -c "print('\n'.join(['// expected-no-diagnostics'] + [f'#define M{i}\n' for i in range(4096)]))" >%t.inc
+// RUN: %clang_cc1 -verify %t.inc
diff --git a/clang/test/C/C99/n696.c b/clang/test/C/C99/n696.c
new file mode 100644
index 000000000000..4499c6e42226
--- /dev/null
+++ b/clang/test/C/C99/n696.c
@@ -0,0 +1,22 @@
+// RUN: %clang_cc1 -triple x86_64 -verify %s
+
+/* WG14 N696: yes
+ * Standard pragmas - improved wording
+ *
+ * NB: this also covers N631 which changed these features into pragmas rather
+ * than macros.
+ */
+
+// Verify that we do not expand macros in STDC pragmas. If we expanded them,
+// this code would issue diagnostics.
+#define ON 12
+#pragma STDC FENV_ACCESS ON
+#pragma STDC CX_LIMITED_RANGE ON
+#pragma STDC FP_CONTRACT ON
+
+// If we expanded macros, this code would not issue diagnostics.
+#define BLERP OFF
+#pragma STDC FENV_ACCESS BLERP // expected-warning {{expected 'ON' or 'OFF' or 'DEFAULT' in pragma}}
+#pragma STDC CX_LIMITED_RANGE BLERP // expected-warning {{expected 'ON' or 'OFF' or 'DEFAULT' in pragma}}
+#pragma STDC FP_CONTRACT BLERP // expected-warning {{expected 'ON' or 'OFF' or 'DEFAULT' in pragma}}
+
diff --git a/clang/test/C/drs/dr0xx.c b/clang/test/C/drs/dr0xx.c
index c93cfb63d604..36de32a93da9 100644
--- a/clang/test/C/drs/dr0xx.c
+++ b/clang/test/C/drs/dr0xx.c
@@ -73,6 +73,10 @@
* WG14 DR085: yes
* Returning from main
*
+ * WG14 DR087: yes
+ * Order of evaluation
+ * Note: this DR is covered by C/C11/n1282.c
+ *
* WG14 DR086: yes
* Object-like macros in system headers
*
diff --git a/clang/test/C/drs/dr290.c b/clang/test/C/drs/dr290.c
new file mode 100644
index 000000000000..3a6fd1d0dab6
--- /dev/null
+++ b/clang/test/C/drs/dr290.c
@@ -0,0 +1,20 @@
+/* RUN: %clang_cc1 -fsyntax-only -ast-dump %s | FileCheck %s
+ */
+
+/* WG14 DR290: no
+ * FLT_EVAL_METHOD and extra precision and/or range
+ *
+ * We retain an implicit conversion based on the float eval method being used
+ * instead of dropping it due to the explicit cast. See GH86304 and C23 6.5.5p7.
+ */
+
+#pragma clang fp eval_method(double)
+_Static_assert((float)(123.0F * 2.0F) == (float)246.0F, "");
+
+// CHECK: StaticAssertDecl
+// CHECK-NEXT: ImplicitCastExpr {{.*}} '_Bool' <IntegralToBoolean>
+// CHECK-NEXT: BinaryOperator {{.*}} 'int' '=='
+// NB: the following implicit cast is incorrect.
+// CHECK-NEXT: ImplicitCastExpr {{.*}} 'double' <FloatingCast> FPEvalMethod=1
+// CHECK-NEXT: CStyleCastExpr {{.*}} 'float' <FloatingCast> FPEvalMethod=1
+
diff --git a/clang/test/C/drs/dr4xx.c b/clang/test/C/drs/dr4xx.c
index 30145dcfeef1..83d7b94cd679 100644
--- a/clang/test/C/drs/dr4xx.c
+++ b/clang/test/C/drs/dr4xx.c
@@ -1,7 +1,7 @@
-/* RUN: %clang_cc1 -std=c89 -verify=expected,c89only -pedantic -Wno-c11-extensions %s
- RUN: %clang_cc1 -std=c99 -verify=expected -pedantic -Wno-c11-extensions %s
- RUN: %clang_cc1 -std=c11 -verify=expected -pedantic %s
- RUN: %clang_cc1 -std=c17 -verify=expected -pedantic %s
+/* RUN: %clang_cc1 -std=c89 -verify=expected,c89only,pre-c23 -pedantic -Wno-c11-extensions %s
+ RUN: %clang_cc1 -std=c99 -verify=expected,pre-c23 -pedantic -Wno-c11-extensions %s
+ RUN: %clang_cc1 -std=c11 -verify=expected,pre-c23 -pedantic %s
+ RUN: %clang_cc1 -std=c17 -verify=expected,pre-c23 -pedantic %s
RUN: %clang_cc1 -std=c2x -verify=expected -pedantic %s
*/
@@ -343,10 +343,13 @@ void dr496(void) {
*/
/* The DR asked a question about whether defining a new type within offsetof
- * is allowed. C2x N2350 made this explicitly undefined behavior, but GCC and
- * Clang both support it as an extension.
+ * is allowed. C23 N2350 had made this explicitly undefined behavior, but this
+ * was later overturned when C23 DE-137 was accepted, making it well-formed.
+ *
+ * Additionally, GCC and Clang both support it as an extension in pre-C23
+ * mode.
*/
- (void)__builtin_offsetof(struct S { int a; }, a); /* expected-warning{{defining a type within '__builtin_offsetof' is a Clang extension}} */
+ (void)__builtin_offsetof(struct S { int a; }, a); /* pre-c23-warning{{defining a type within '__builtin_offsetof' is a C23 extension}} */
}
/* WG14 DR499: yes
diff --git a/clang/test/C/drs/dr5xx.c b/clang/test/C/drs/dr5xx.c
index 68bcef78bacc..13464f78b6a6 100644
--- a/clang/test/C/drs/dr5xx.c
+++ b/clang/test/C/drs/dr5xx.c
@@ -29,7 +29,7 @@ void dr502(void) {
*/
struct t {
int i;
- struct { int a[]; }; /* expected-error {{flexible array member 'a' not allowed in otherwise empty struct}}
+ struct { int a[]; }; /* expected-warning {{flexible array member 'a' in otherwise empty struct is a GNU extension}}
c89only-warning {{flexible array members are a C99 feature}}
expected-warning {{'' may not be nested in a struct due to flexible array member}}
*/
diff --git a/clang/test/ClangScanDeps/modules-extension.c b/clang/test/ClangScanDeps/modules-extension.c
new file mode 100644
index 000000000000..0f27f608440f
--- /dev/null
+++ b/clang/test/ClangScanDeps/modules-extension.c
@@ -0,0 +1,33 @@
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+
+// This test checks that source files with uncommon extensions still undergo
+// dependency directives scan. If header.pch would not and b.h would, the scan
+// would fail when parsing `void function(B)` and not knowing the symbol B.
+
+//--- module.modulemap
+module __PCH { header "header.pch" }
+module B { header "b.h" }
+
+//--- header.pch
+#include "b.h"
+void function(B);
+
+//--- b.h
+typedef int B;
+
+//--- tu.c
+int main() {
+ function(0);
+ return 0;
+}
+
+//--- cdb.json.in
+[{
+ "directory": "DIR",
+ "file": "DIR/tu.c",
+ "command": "clang -c DIR/tu.c -fmodules -fmodules-cache-path=DIR/cache -fimplicit-module-maps -include DIR/header.pch"
+}]
+
+// RUN: sed -e "s|DIR|%/t|g" %t/cdb.json.in > %t/cdb.json
+// RUN: clang-scan-deps -compilation-database %t/cdb.json -format experimental-full > %t/deps.json
diff --git a/clang/test/ClangScanDeps/modules-extern-unrelated.m b/clang/test/ClangScanDeps/modules-extern-unrelated.m
index 442ee90aa183..76611c596d3e 100644
--- a/clang/test/ClangScanDeps/modules-extern-unrelated.m
+++ b/clang/test/ClangScanDeps/modules-extern-unrelated.m
@@ -71,6 +71,7 @@ module second { header "second.h" }
// CHECK-NEXT: "context-hash": "{{.*}}",
// CHECK-NEXT: "file-deps": [
// CHECK-NEXT: "[[PREFIX]]/first/module.modulemap",
+// CHECK-NEXT: "[[PREFIX]]/second/module.modulemap",
// CHECK-NEXT: "[[PREFIX]]/second/second.h",
// CHECK-NEXT: "[[PREFIX]]/second/second.modulemap"
// CHECK-NEXT: ],
diff --git a/clang/test/CodeCompletion/member-access.cpp b/clang/test/CodeCompletion/member-access.cpp
index 474b909ab115..9f8c21c0bca6 100644
--- a/clang/test/CodeCompletion/member-access.cpp
+++ b/clang/test/CodeCompletion/member-access.cpp
@@ -348,7 +348,23 @@ namespace function_can_be_call {
T foo(U, V);
};
- &S::f
- // RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:351:7 %s -o - | FileCheck -check-prefix=CHECK_FUNCTION_CAN_BE_CALL %s
+ void test() {
+ &S::f
+ }
+ // RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:352:9 %s -o - | FileCheck -check-prefix=CHECK_FUNCTION_CAN_BE_CALL %s
// CHECK_FUNCTION_CAN_BE_CALL: COMPLETION: foo : [#T#]foo<<#typename T#>, <#typename U#>>(<#U#>, <#V#>)
}
+
+namespace deref_dependent_this {
+template <typename T>
+class A {
+ int field;
+
+ void function() {
+ (*this).field;
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:364:13 %s -o - | FileCheck -check-prefix=CHECK-DEREF-THIS %s
+// CHECK-DEREF-THIS: field : [#int#]field
+// CHECK-DEREF-THIS: [#void#]function()
+ }
+};
+}
diff --git a/clang/test/CodeGen/CSKY/csky-abi.c b/clang/test/CodeGen/CSKY/csky-abi.c
index 2e549376ba93..29ed661aea75 100644
--- a/clang/test/CodeGen/CSKY/csky-abi.c
+++ b/clang/test/CodeGen/CSKY/csky-abi.c
@@ -185,13 +185,13 @@ void f_va_caller(void) {
// CHECK: [[VA:%.*]] = alloca ptr, align 4
// CHECK: [[V:%.*]] = alloca i32, align 4
// CHECK: store ptr %fmt, ptr [[FMT_ADDR]], align 4
-// CHECK: call void @llvm.va_start(ptr [[VA]])
+// CHECK: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK: [[TMP1:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
// CHECK: store i32 [[TMP1]], ptr [[V]], align 4
-// CHECK: call void @llvm.va_end(ptr [[VA]])
+// CHECK: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK: [[TMP2:%.*]] = load i32, ptr [[V]], align 4
// CHECK: ret i32 [[TMP2]]
// CHECK: }
@@ -210,13 +210,13 @@ int f_va_1(char *fmt, ...) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca double, align 4
// CHECK-NEXT: store ptr [[FMT:%.*]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: store double [[TMP4]], ptr [[V]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[V]], align 4
// CHECK-NEXT: ret double [[TMP5]]
double f_va_2(char *fmt, ...) {
@@ -236,7 +236,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[X:%.*]] = alloca double, align 4
// CHECK-NEXT: store ptr [[FMT:%.*]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -252,7 +252,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-NEXT: store ptr [[ARGP_NEXT5]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP11:%.*]] = load double, ptr [[ARGP_CUR4]], align 4
// CHECK-NEXT: store double [[TMP11]], ptr [[X]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP12:%.*]] = load double, ptr [[V]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load double, ptr [[X]], align 4
// CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP12]], [[TMP13]]
@@ -279,7 +279,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT:%.*]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -302,7 +302,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-NEXT: [[ARGP_NEXT9:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR8]], i32 16
// CHECK-NEXT: store ptr [[ARGP_NEXT9]], ptr [[VA]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[LS]], ptr align 4 [[ARGP_CUR8]], i32 16, i1 false)
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
int f_va_4(char *fmt, ...) {
__builtin_va_list va;
diff --git a/clang/test/CodeGen/LoongArch/abi-lp64d.c b/clang/test/CodeGen/LoongArch/abi-lp64d.c
index 66b480a7f068..fc7f1eada586 100644
--- a/clang/test/CodeGen/LoongArch/abi-lp64d.c
+++ b/clang/test/CodeGen/LoongArch/abi-lp64d.c
@@ -449,13 +449,13 @@ void f_va_caller(void) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT:%.*]], ptr [[FMT_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
// CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
int f_va_int(char *fmt, ...) {
diff --git a/clang/test/CodeGen/PowerPC/aix-altivec-vaargs.c b/clang/test/CodeGen/PowerPC/aix-altivec-vaargs.c
index 03182423a422..b3f1e93b6394 100644
--- a/clang/test/CodeGen/PowerPC/aix-altivec-vaargs.c
+++ b/clang/test/CodeGen/PowerPC/aix-altivec-vaargs.c
@@ -17,7 +17,7 @@ vector double vector_varargs(int count, ...) {
}
// CHECK: %arg_list = alloca ptr
-// CHECK: call void @llvm.va_start(ptr %arg_list)
+// CHECK: call void @llvm.va_start.p0(ptr %arg_list)
// AIX32: for.body:
// AIX32-NEXT: %argp.cur = load ptr, ptr %arg_list, align 4
@@ -41,4 +41,4 @@ vector double vector_varargs(int count, ...) {
// CHECK: for.end:
-// CHECK: call void @llvm.va_end(ptr %arg_list)
+// CHECK: call void @llvm.va_end.p0(ptr %arg_list)
diff --git a/clang/test/CodeGen/PowerPC/aix-vaargs.c b/clang/test/CodeGen/PowerPC/aix-vaargs.c
index 8b8417d315a5..724ba6560cdb 100644
--- a/clang/test/CodeGen/PowerPC/aix-vaargs.c
+++ b/clang/test/CodeGen/PowerPC/aix-vaargs.c
@@ -35,7 +35,7 @@ void testva (int n, ...) {
// CHECK-NEXT: %v = alloca i32, align 4
// CHECK-NEXT: store i32 %n, ptr %n.addr, align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr %ap)
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr %ap)
// AIX32-NEXT: %argp.cur = load ptr, ptr %ap, align 4
// AIX32-NEXT: %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 16
@@ -48,7 +48,7 @@ void testva (int n, ...) {
// AIX32-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 8 %t, ptr align 4 %argp.cur, i32 16, i1 false)
// AIX64-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %t, ptr align 8 %argp.cur, i64 16, i1 false)
-// CHECK-NEXT: call void @llvm.va_copy(ptr %ap2, ptr %ap)
+// CHECK-NEXT: call void @llvm.va_copy.p0(ptr %ap2, ptr %ap)
// AIX32-NEXT: %argp.cur1 = load ptr, ptr %ap2, align 4
// AIX32-NEXT: %argp.next2 = getelementptr inbounds i8, ptr %argp.cur1, i32 4
@@ -62,14 +62,14 @@ void testva (int n, ...) {
// AIX64-NEXT: %1 = load i32, ptr %0, align 4
// AIX64-NEXT: store i32 %1, ptr %v, align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr %ap2)
-// CHECK-NEXT: call void @llvm.va_end(ptr %ap)
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr %ap2)
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr %ap)
// CHECK-NEXT: ret void
-// CHECK: declare void @llvm.va_start(ptr)
+// CHECK: declare void @llvm.va_start.p0(ptr)
// AIX32: declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
// AIX64: declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
-// CHECK: declare void @llvm.va_copy(ptr, ptr)
-// CHECK: declare void @llvm.va_end(ptr)
+// CHECK: declare void @llvm.va_copy.p0(ptr, ptr)
+// CHECK: declare void @llvm.va_end.p0(ptr)
diff --git a/clang/test/CodeGen/PowerPC/ppc64le-varargs-f128.c b/clang/test/CodeGen/PowerPC/ppc64le-varargs-f128.c
index 396614fe5bac..2f5459d1bb9c 100644
--- a/clang/test/CodeGen/PowerPC/ppc64le-varargs-f128.c
+++ b/clang/test/CodeGen/PowerPC/ppc64le-varargs-f128.c
@@ -31,7 +31,7 @@ void foo_ls(ldbl128_s);
// OMP-TARGET: call void @foo_ld(ppc_fp128 noundef %[[V3]])
// OMP-HOST-LABEL: define{{.*}} void @omp(
-// OMP-HOST: call void @llvm.va_start(ptr %[[AP:[0-9a-zA-Z_.]+]])
+// OMP-HOST: call void @llvm.va_start.p0(ptr %[[AP:[0-9a-zA-Z_.]+]])
// OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load ptr, ptr %[[AP]], align 8
// OMP-HOST: %[[TMP0:[^ ]+]] = getelementptr inbounds i8, ptr %[[CUR]], i32 15
// OMP-HOST: %[[ALIGN:[^ ]+]] = call ptr @llvm.ptrmask.p0.i64(ptr %[[TMP0]], i64 -16)
@@ -49,13 +49,13 @@ void omp(int n, ...) {
}
// IEEE-LABEL: define{{.*}} void @f128
-// IEEE: call void @llvm.va_start(ptr %[[AP:[0-9a-zA-Z_.]+]])
+// IEEE: call void @llvm.va_start.p0(ptr %[[AP:[0-9a-zA-Z_.]+]])
// IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load ptr, ptr %[[AP]]
// IEEE: %[[TMP0:[^ ]+]] = getelementptr inbounds i8, ptr %[[CUR]], i32 15
// IEEE: %[[ALIGN:[^ ]+]] = call ptr @llvm.ptrmask.p0.i64(ptr %[[TMP0]], i64 -16)
// IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, ptr %[[ALIGN]], align 16
// IEEE: call void @foo_fq(fp128 noundef %[[V4]])
-// IEEE: call void @llvm.va_end(ptr %[[AP]])
+// IEEE: call void @llvm.va_end.p0(ptr %[[AP]])
void f128(int n, ...) {
va_list ap;
va_start(ap, n);
@@ -64,20 +64,20 @@ void f128(int n, ...) {
}
// IEEE-LABEL: define{{.*}} void @long_double
-// IEEE: call void @llvm.va_start(ptr %[[AP:[0-9a-zA-Z_.]+]])
+// IEEE: call void @llvm.va_start.p0(ptr %[[AP:[0-9a-zA-Z_.]+]])
// IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load ptr, ptr %[[AP]]
// IEEE: %[[TMP0:[^ ]+]] = getelementptr inbounds i8, ptr %[[CUR]], i32 15
// IEEE: %[[ALIGN:[^ ]+]] = call ptr @llvm.ptrmask.p0.i64(ptr %[[TMP0]], i64 -16)
// IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, ptr %[[ALIGN]], align 16
// IEEE: call void @foo_ld(fp128 noundef %[[V4]])
-// IEEE: call void @llvm.va_end(ptr %[[AP]])
+// IEEE: call void @llvm.va_end.p0(ptr %[[AP]])
// IBM-LABEL: define{{.*}} void @long_double
-// IBM: call void @llvm.va_start(ptr %[[AP:[0-9a-zA-Z_.]+]])
+// IBM: call void @llvm.va_start.p0(ptr %[[AP:[0-9a-zA-Z_.]+]])
// IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load ptr, ptr %[[AP]]
// IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ptr %[[CUR]], align 8
// IBM: call void @foo_ld(ppc_fp128 noundef %[[V4]])
-// IBM: call void @llvm.va_end(ptr %[[AP]])
+// IBM: call void @llvm.va_end.p0(ptr %[[AP]])
void long_double(int n, ...) {
va_list ap;
va_start(ap, n);
@@ -86,7 +86,7 @@ void long_double(int n, ...) {
}
// IEEE-LABEL: define{{.*}} void @long_double_struct
-// IEEE: call void @llvm.va_start(ptr %[[AP:[0-9a-zA-Z_.]+]])
+// IEEE: call void @llvm.va_start.p0(ptr %[[AP:[0-9a-zA-Z_.]+]])
// IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load ptr, ptr %[[AP]]
// IEEE: %[[TMP0:[^ ]+]] = getelementptr inbounds i8, ptr %[[CUR]], i32 15
// IEEE: %[[ALIGN:[^ ]+]] = call ptr @llvm.ptrmask.p0.i64(ptr %[[TMP0]], i64 -16)
@@ -96,7 +96,7 @@ void long_double(int n, ...) {
// IEEE: %[[COERCE:[0-9a-zA-Z_.]+]] = getelementptr inbounds %struct.ldbl128_s, ptr %[[TMP]], i32 0, i32 0
// IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, ptr %[[COERCE]], align 16
// IEEE: call void @foo_ls(fp128 inreg %[[V4]])
-// IEEE: call void @llvm.va_end(ptr %[[AP]])
+// IEEE: call void @llvm.va_end.p0(ptr %[[AP]])
void long_double_struct(int n, ...) {
va_list ap;
va_start(ap, n);
diff --git a/clang/test/CodeGen/RISCV/riscv-func-attr-target-err.c b/clang/test/CodeGen/RISCV/riscv-func-attr-target-err.c
index 35d6973818d0..b303d71304bf 100644
--- a/clang/test/CodeGen/RISCV/riscv-func-attr-target-err.c
+++ b/clang/test/CodeGen/RISCV/riscv-func-attr-target-err.c
@@ -2,6 +2,28 @@
// RUN: not %clang_cc1 -triple riscv64 -target-feature +zifencei -target-feature +m -target-feature +a \
// RUN: -emit-llvm %s 2>&1 | FileCheck %s
+#include <riscv_vector.h>
+
+void test_builtin() {
+// CHECK: error: '__builtin_rvv_vsetvli' needs target feature zve32x
+ __riscv_vsetvl_e8m8(1);
+}
+
+void test_rvv_i32_type() {
+// CHECK: error: RISC-V type 'vint32m1_t' (aka '__rvv_int32m1_t') requires the 'zve32x' extension
+ vint32m1_t v;
+}
+
+void test_rvv_f32_type() {
+// CHECK: error: RISC-V type 'vfloat32m1_t' (aka '__rvv_float32m1_t') requires the 'zve32f' extension
+ vfloat32m1_t v;
+}
+
+void test_rvv_f64_type() {
+// CHECK: error: RISC-V type 'vfloat64m1_t' (aka '__rvv_float64m1_t') requires the 'zve64d' extension
+ vfloat64m1_t v;
+}
+
// CHECK: error: duplicate 'arch=' in the 'target' attribute string;
__attribute__((target("arch=rv64gc;arch=rv64gc_zbb"))) void testMultiArchSelectLast() {}
// CHECK: error: duplicate 'cpu=' in the 'target' attribute string;
diff --git a/clang/test/CodeGen/RISCV/riscv-func-attr-target.c b/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
index f216eaf735b4..1f8682179ea8 100644
--- a/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
+++ b/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
@@ -4,6 +4,8 @@
// RUN: -target-feature -relax -target-feature -zfa \
// RUN: -emit-llvm %s -o - | FileCheck %s
+#include <riscv_vector.h>
+
// CHECK-LABEL: define dso_local void @testDefault
// CHECK-SAME: () #0 {
void testDefault() {}
@@ -35,6 +37,34 @@ testAttrFullArchAndAttrCpu() {}
// CHECK-SAME: () #8 {
__attribute__((target("cpu=sifive-u54"))) void testAttrCpuOnly() {}
+__attribute__((target("arch=+zve32x")))
+void test_builtin_w_zve32x() {
+// CHECK-LABEL: test_builtin_w_zve32x
+// CHECK-SAME: #9
+ __riscv_vsetvl_e8m8(1);
+}
+
+__attribute__((target("arch=+zve32x")))
+void test_rvv_i32_type_w_zve32x() {
+// CHECK-LABEL: test_rvv_i32_type_w_zve32x
+// CHECK-SAME: #9
+ vint32m1_t v;
+}
+
+__attribute__((target("arch=+zve32f")))
+void test_rvv_f32_type_w_zve32f() {
+// CHECK-LABEL: test_rvv_f32_type_w_zve32f
+// CHECK-SAME: #11
+ vfloat32m1_t v;
+}
+
+__attribute__((target("arch=+zve64d")))
+void test_rvv_f64_type_w_zve64d() {
+// CHECK-LABEL: test_rvv_f64_type_w_zve64d
+// CHECK-SAME: #12
+ vfloat64m1_t v;
+}
+
//.
// CHECK: attributes #0 = { {{.*}}"target-features"="+64bit,+a,+m,+save-restore,+zifencei,-relax,-zbb,-zfa" }
// CHECK: attributes #1 = { {{.*}}"target-cpu"="rocket-rv64" "target-features"="+64bit,+a,+d,+f,+m,+save-restore,+v,+zicsr,+zifencei,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-relax,-zbb,-zfa" "tune-cpu"="generic-rv64" }
@@ -46,3 +76,6 @@ __attribute__((target("cpu=sifive-u54"))) void testAttrCpuOnly() {}
// CHECK: attributes #6 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+a,+m,+save-restore,+zbb,+zifencei,-relax,-zfa" }
// CHECK: attributes #7 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+m,+save-restore,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" }
// CHECK: attributes #8 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+a,+c,+d,+f,+m,+save-restore,+zicsr,+zifencei,{{(-[[:alnum:]-]+)(,-[[:alnum:]-]+)*}}" }
+// CHECK: attributes #9 = { {{.*}}"target-features"="+64bit,+a,+m,+save-restore,+zicsr,+zifencei,+zve32x,+zvl32b,-relax,-zbb,-zfa" }
+// CHECK: attributes #11 = { {{.*}}"target-features"="+64bit,+a,+f,+m,+save-restore,+zicsr,+zifencei,+zve32f,+zve32x,+zvl32b,-relax,-zbb,-zfa" }
+// CHECK: attributes #12 = { {{.*}}"target-features"="+64bit,+a,+d,+f,+m,+save-restore,+zicsr,+zifencei,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl32b,+zvl64b,-relax,-zbb,-zfa" }
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
new file mode 100644
index 000000000000..072d8a863d45
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.c
@@ -0,0 +1,34 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+// RUN: %clang_cc1 -std=c23 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+
+#include <riscv_vector.h>
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
+vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @bar
+[[riscv::vector_cc]] vint32m1_t bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr2(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call <vscale x 2 x i32> @baz
+vint32m1_t baz(vint32m1_t input);
+vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = baz(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
new file mode 100644
index 000000000000..c01aeb21f675
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv-llvm-ir.cpp
@@ -0,0 +1,32 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -std=c++11 -triple riscv64 -target-feature +v \
+// RUN: -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-LLVM %s
+
+#include <riscv_vector.h>
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @_Z3baru15__rvv_int32m1_t
+vint32m1_t __attribute__((riscv_vector_cc)) bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call riscv_vector_cc <vscale x 2 x i32> @_Z3baru15__rvv_int32m1_t
+[[riscv::vector_cc]] vint32m1_t bar(vint32m1_t input);
+vint32m1_t test_vector_cc_attr2(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = bar(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
+
+// CHECK-LLVM: call <vscale x 2 x i32> @_Z3bazu15__rvv_int32m1_t
+vint32m1_t baz(vint32m1_t input);
+vint32m1_t test_no_vector_cc_attr(vint32m1_t input, int32_t *base, size_t vl) {
+ vint32m1_t val = __riscv_vle32_v_i32m1(base, vl);
+ vint32m1_t ret = baz(input);
+ __riscv_vse32_v_i32m1(base, val, vl);
+ return ret;
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c
new file mode 100644
index 000000000000..5c35901799b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.c
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 %s -std=c23 -triple riscv64 -target-feature +v -verify
+
+__attribute__((riscv_vector_cc)) int var; // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'int'}}
+
+__attribute__((riscv_vector_cc)) void func();
+__attribute__((riscv_vector_cc(1))) void func_invalid(); // expected-error {{'riscv_vector_cc' attribute takes no arguments}}
+
+void test_no_attribute(int); // expected-note {{previous declaration is here}}
+void __attribute__((riscv_vector_cc)) test_no_attribute(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+[[riscv::vector_cc]] int var2; // expected-warning {{'vector_cc' only applies to function types; type here is 'int'}}
+
+[[riscv::vector_cc]] void func2();
+[[riscv::vector_cc(1)]] void func_invalid2(); // expected-error {{'vector_cc' attribute takes no arguments}}
+
+void test_no_attribute2(int); // expected-note {{previous declaration is here}}
+[[riscv::vector_cc]] void test_no_attribute2(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
diff --git a/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
new file mode 100644
index 000000000000..264bb7d9ad7c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-vector-callingconv.cpp
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 %s -triple riscv64 -target-feature +v -verify
+
+__attribute__((riscv_vector_cc)) int var; // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'int'}}
+
+__attribute__((riscv_vector_cc)) void func();
+__attribute__((riscv_vector_cc(1))) void func_invalid(); // expected-error {{'riscv_vector_cc' attribute takes no arguments}}
+
+void test_no_attribute(int); // expected-note {{previous declaration is here}}
+void __attribute__((riscv_vector_cc)) test_no_attribute(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+class test_cc {
+ __attribute__((riscv_vector_cc)) void member_func();
+};
+
+void test_lambda() {
+ __attribute__((riscv_vector_cc)) auto lambda = []() { // expected-warning {{'riscv_vector_cc' only applies to function types; type here is 'auto'}}
+ };
+}
+
+[[riscv::vector_cc]] int var2; // expected-warning {{'vector_cc' only applies to function types; type here is 'int'}}
+
+[[riscv::vector_cc]] void func2();
+[[riscv::vector_cc(1)]] void func_invalid2(); // expected-error {{'vector_cc' attribute takes no arguments}}
+
+void test_no_attribute2(int); // expected-note {{previous declaration is here}}
+[[riscv::vector_cc]] void test_no_attribute2(int x) { } // expected-error {{function declared 'riscv_vector_cc' here was previously declared without calling convention}}
+
+class test_cc2 {
+ [[riscv::vector_cc]] void member_func();
+};
+
+void test_lambda2() {
+ [[riscv::vector_cc]] auto lambda = []() { // expected-warning {{'vector_cc' only applies to function types; type here is 'auto'}}
+ };
+}
diff --git a/clang/test/CodeGen/RISCV/riscv32-vararg.c b/clang/test/CodeGen/RISCV/riscv32-vararg.c
index 1c4e41f2f54c..00e04eb89467 100644
--- a/clang/test/CodeGen/RISCV/riscv32-vararg.c
+++ b/clang/test/CodeGen/RISCV/riscv32-vararg.c
@@ -80,13 +80,13 @@ void f_va_caller(void) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
@@ -111,7 +111,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-ILP32F-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-ILP32F-NEXT: [[V:%.*]] = alloca double, align 8
// CHECK-ILP32F-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32F-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-ILP32F-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
@@ -119,7 +119,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-ILP32F-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
// CHECK-ILP32F-NEXT: store double [[TMP1]], ptr [[V]], align 8
-// CHECK-ILP32F-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[TMP2:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32F-NEXT: ret double [[TMP2]]
//
@@ -130,7 +130,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-ILP32D-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-ILP32D-NEXT: [[V:%.*]] = alloca double, align 8
// CHECK-ILP32D-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32D-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-ILP32D-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
@@ -138,7 +138,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-ILP32D-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[TMP1:%.*]] = load double, ptr [[ARGP_CUR_ALIGNED]], align 8
// CHECK-ILP32D-NEXT: store double [[TMP1]], ptr [[V]], align 8
-// CHECK-ILP32D-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[TMP2:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32D-NEXT: ret double [[TMP2]]
//
@@ -149,13 +149,13 @@ int f_va_1(char *fmt, ...) {
// CHECK-ILP32E-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-ILP32E-NEXT: [[V:%.*]] = alloca double, align 8
// CHECK-ILP32E-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32E-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 8
// CHECK-ILP32E-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[TMP0:%.*]] = load double, ptr [[ARGP_CUR]], align 4
// CHECK-ILP32E-NEXT: store double [[TMP0]], ptr [[V]], align 8
-// CHECK-ILP32E-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[TMP1:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32E-NEXT: ret double [[TMP1]]
//
@@ -180,7 +180,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32F-NEXT: [[W:%.*]] = alloca i32, align 4
// CHECK-ILP32F-NEXT: [[X:%.*]] = alloca double, align 8
// CHECK-ILP32F-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32F-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-ILP32F-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
@@ -200,7 +200,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32F-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[TMP4:%.*]] = load double, ptr [[ARGP_CUR3_ALIGNED]], align 8
// CHECK-ILP32F-NEXT: store double [[TMP4]], ptr [[X]], align 8
-// CHECK-ILP32F-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[TMP5:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32F-NEXT: [[TMP6:%.*]] = load double, ptr [[X]], align 8
// CHECK-ILP32F-NEXT: [[ADD:%.*]] = fadd double [[TMP5]], [[TMP6]]
@@ -215,7 +215,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32D-NEXT: [[W:%.*]] = alloca i32, align 4
// CHECK-ILP32D-NEXT: [[X:%.*]] = alloca double, align 8
// CHECK-ILP32D-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32D-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-ILP32D-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
@@ -235,7 +235,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32D-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[TMP4:%.*]] = load double, ptr [[ARGP_CUR3_ALIGNED]], align 8
// CHECK-ILP32D-NEXT: store double [[TMP4]], ptr [[X]], align 8
-// CHECK-ILP32D-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[TMP5:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32D-NEXT: [[TMP6:%.*]] = load double, ptr [[X]], align 8
// CHECK-ILP32D-NEXT: [[ADD:%.*]] = fadd double [[TMP5]], [[TMP6]]
@@ -250,7 +250,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32E-NEXT: [[W:%.*]] = alloca i32, align 4
// CHECK-ILP32E-NEXT: [[X:%.*]] = alloca double, align 8
// CHECK-ILP32E-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32E-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 8
// CHECK-ILP32E-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -266,7 +266,7 @@ double f_va_2(char *fmt, ...) {
// CHECK-ILP32E-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[TMP2:%.*]] = load double, ptr [[ARGP_CUR3]], align 4
// CHECK-ILP32E-NEXT: store double [[TMP2]], ptr [[X]], align 8
-// CHECK-ILP32E-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[TMP3:%.*]] = load double, ptr [[V]], align 8
// CHECK-ILP32E-NEXT: [[TMP4:%.*]] = load double, ptr [[X]], align 8
// CHECK-ILP32E-NEXT: [[ADD:%.*]] = fadd double [[TMP3]], [[TMP4]]
@@ -296,7 +296,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32F-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
// CHECK-ILP32F-NEXT: [[RET:%.*]] = alloca i32, align 4
// CHECK-ILP32F-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32F-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-ILP32F-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -321,7 +321,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32F-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VA]], align 4
// CHECK-ILP32F-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGP_CUR7]], align 4
// CHECK-ILP32F-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[LS]], ptr align 4 [[TMP3]], i32 16, i1 false)
-// CHECK-ILP32F-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32F-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32F-NEXT: [[TMP4:%.*]] = load i32, ptr [[V]], align 4
// CHECK-ILP32F-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to fp128
// CHECK-ILP32F-NEXT: [[TMP5:%.*]] = load fp128, ptr [[LD]], align 16
@@ -384,7 +384,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32D-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
// CHECK-ILP32D-NEXT: [[RET:%.*]] = alloca i32, align 4
// CHECK-ILP32D-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32D-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-ILP32D-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -409,7 +409,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32D-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VA]], align 4
// CHECK-ILP32D-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGP_CUR7]], align 4
// CHECK-ILP32D-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[LS]], ptr align 4 [[TMP3]], i32 16, i1 false)
-// CHECK-ILP32D-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32D-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32D-NEXT: [[TMP4:%.*]] = load i32, ptr [[V]], align 4
// CHECK-ILP32D-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to fp128
// CHECK-ILP32D-NEXT: [[TMP5:%.*]] = load fp128, ptr [[LD]], align 16
@@ -472,7 +472,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32E-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 4
// CHECK-ILP32E-NEXT: [[RET:%.*]] = alloca i32, align 4
// CHECK-ILP32E-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-ILP32E-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-ILP32E-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -497,7 +497,7 @@ double f_va_3(char *fmt, ...) {
// CHECK-ILP32E-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VA]], align 4
// CHECK-ILP32E-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGP_CUR7]], align 4
// CHECK-ILP32E-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[LS]], ptr align 4 [[TMP3]], i32 16, i1 false)
-// CHECK-ILP32E-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-ILP32E-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-ILP32E-NEXT: [[TMP4:%.*]] = load i32, ptr [[V]], align 4
// CHECK-ILP32E-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to fp128
// CHECK-ILP32E-NEXT: [[TMP5:%.*]] = load fp128, ptr [[LD]], align 16
diff --git a/clang/test/CodeGen/RISCV/riscv64-vararg.c b/clang/test/CodeGen/RISCV/riscv64-vararg.c
index 634cde61320c..efdffa2687e6 100644
--- a/clang/test/CodeGen/RISCV/riscv64-vararg.c
+++ b/clang/test/CodeGen/RISCV/riscv64-vararg.c
@@ -135,13 +135,13 @@ void f_va_caller(void) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 8
// CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
@@ -166,7 +166,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[V:%.*]] = alloca fp128, align 16
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
@@ -174,7 +174,7 @@ int f_va_1(char *fmt, ...) {
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[ARGP_CUR_ALIGNED]], align 16
// CHECK-NEXT: store fp128 [[TMP1]], ptr [[V]], align 16
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP2:%.*]] = load fp128, ptr [[V]], align 16
// CHECK-NEXT: ret fp128 [[TMP2]]
//
@@ -199,7 +199,7 @@ long double f_va_2(char *fmt, ...) {
// CHECK-NEXT: [[W:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[X:%.*]] = alloca fp128, align 16
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
@@ -219,7 +219,7 @@ long double f_va_2(char *fmt, ...) {
// CHECK-NEXT: store ptr [[ARGP_NEXT4]], ptr [[VA]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load fp128, ptr [[ARGP_CUR3_ALIGNED]], align 16
// CHECK-NEXT: store fp128 [[TMP4]], ptr [[X]], align 16
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP5:%.*]] = load fp128, ptr [[V]], align 16
// CHECK-NEXT: [[TMP6:%.*]] = load fp128, ptr [[X]], align 16
// CHECK-NEXT: [[ADD:%.*]] = fadd fp128 [[TMP5]], [[TMP6]]
@@ -248,7 +248,7 @@ long double f_va_3(char *fmt, ...) {
// CHECK-NEXT: [[LS:%.*]] = alloca [[STRUCT_LARGE:%.*]], align 8
// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 8
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 8
@@ -267,7 +267,7 @@ long double f_va_3(char *fmt, ...) {
// CHECK-NEXT: store ptr [[ARGP_NEXT6]], ptr [[VA]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGP_CUR5]], align 8
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[LS]], ptr align 8 [[TMP1]], i64 32, i1 false)
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_TINY]], ptr [[TS]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
// CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP2]] to i64
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c
index ecf090a128aa..bad68504fab0 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c
@@ -1,6 +1,6 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -triple riscv32 -target-feature +zbb -verify %s -o -
+// RUN: %clang_cc1 -triple riscv32 -target-feature +zbb -S -verify %s -o -
unsigned int orc_b_64(unsigned int a) {
- return __builtin_riscv_orc_b_64(a); // expected-error {{builtin requires: 'RV64'}}
+ return __builtin_riscv_orc_b_64(a); // expected-error {{'__builtin_riscv_orc_b_64' needs target feature zbb,64bit}}
}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c
index d2e3e76043ae..a256bf75b031 100644
--- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c
@@ -1,14 +1,10 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -verify %s -o -
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -S -verify %s -o -
#include <stdint.h>
-uint32_t zip(uint32_t rs1)
+uint32_t zip_unzip(uint32_t rs1)
{
- return __builtin_riscv_zip_32(rs1); // expected-error {{builtin requires: 'RV32'}}
-}
-
-uint32_t unzip(uint32_t rs1)
-{
- return __builtin_riscv_unzip_32(rs1); // expected-error {{builtin requires: 'RV32'}}
+ (void)__builtin_riscv_zip_32(rs1); // expected-error {{'__builtin_riscv_zip_32' needs target feature zbkb,32bit}}
+ return __builtin_riscv_unzip_32(rs1); // expected-error {{'__builtin_riscv_unzip_32' needs target feature zbkb,32bit}}
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-error.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-error.c
index 6ec9b0579976..ecb6c5f27025 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-error.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/rvv-error.c
@@ -11,7 +11,7 @@
// CHECK-RV64V-NEXT: ret i32 [[CONV]]
//
-// CHECK-RV64-ERR: error: builtin requires at least one of the following extensions: 'Zve32x'
+// CHECK-RV64-ERR: error: '__builtin_rvv_vsetvli' needs target feature zve32x
int test() {
return __builtin_rvv_vsetvli(1, 0, 0);
diff --git a/clang/test/CodeGen/WebAssembly/wasm-varargs.c b/clang/test/CodeGen/WebAssembly/wasm-varargs.c
index c475de19ae44..e794857304e1 100644
--- a/clang/test/CodeGen/WebAssembly/wasm-varargs.c
+++ b/clang/test/CodeGen/WebAssembly/wasm-varargs.c
@@ -10,13 +10,13 @@
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
@@ -38,7 +38,7 @@ int test_i32(char *fmt, ...) {
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca i64, align 8
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
@@ -46,7 +46,7 @@ int test_i32(char *fmt, ...) {
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGP_CUR_ALIGNED]], align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr [[V]], align 8
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[V]], align 8
// CHECK-NEXT: ret i64 [[TMP2]]
//
@@ -73,13 +73,13 @@ struct S {
// CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_RESULT]], ptr align 4 [[TMP0]], i32 12, i1 false)
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: ret void
//
struct S test_struct(char *fmt, ...) {
@@ -102,7 +102,7 @@ struct Z {};
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[U:%.*]] = alloca [[STRUCT_Z:%.*]], align 1
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
-// CHECK-NEXT: call void @llvm.va_start(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 0
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
@@ -112,7 +112,7 @@ struct Z {};
// CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR1]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_RESULT]], ptr align 4 [[TMP0]], i32 12, i1 false)
-// CHECK-NEXT: call void @llvm.va_end(ptr [[VA]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: ret void
//
struct S test_empty_struct(char *fmt, ...) {
diff --git a/clang/test/CodeGen/X86/va-arg-sse.c b/clang/test/CodeGen/X86/va-arg-sse.c
index e040b0e5790b..b7d00dad1453 100644
--- a/clang/test/CodeGen/X86/va-arg-sse.c
+++ b/clang/test/CodeGen/X86/va-arg-sse.c
@@ -21,7 +21,7 @@ struct S a[5];
// CHECK-NEXT: store i32 0, ptr [[J]], align 4
// CHECK-NEXT: store i32 0, ptr [[K]], align 4
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[AP]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.va_start(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
// CHECK-NEXT: store ptr getelementptr inbounds ([5 x %struct.S], ptr @a, i64 0, i64 2), ptr [[P]], align 8
// CHECK-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[AP]], i64 0, i64 0
// CHECK-NEXT: [[FP_OFFSET_P:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG:%.*]], ptr [[ARRAYDECAY2]], i32 0, i32 1
@@ -52,7 +52,7 @@ struct S a[5];
// CHECK-NEXT: [[VAARG_ADDR:%.*]] = phi ptr [ [[TMP]], [[VAARG_IN_REG]] ], [ [[OVERFLOW_ARG_AREA]], [[VAARG_IN_MEM]] ]
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARG]], ptr align 4 [[VAARG_ADDR]], i64 12, i1 false)
// CHECK-NEXT: [[ARRAYDECAY3:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[AP]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.va_end(ptr [[ARRAYDECAY3]])
+// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[ARRAYDECAY3]])
// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[P]], align 8
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne ptr [[TMP15]], null
// CHECK-NEXT: br i1 [[TOBOOL]], label [[LAND_LHS_TRUE:%.*]], label [[IF_END:%.*]]
diff --git a/clang/test/CodeGen/X86/x86_64-vaarg.c b/clang/test/CodeGen/X86/x86_64-vaarg.c
new file mode 100644
index 000000000000..07c6df14a0b8
--- /dev/null
+++ b/clang/test/CodeGen/X86/x86_64-vaarg.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+
+typedef struct { struct {} a; } empty;
+
+// CHECK-LABEL: define dso_local void @empty_record_test(
+// CHECK-SAME: i32 noundef [[Z:%.*]], ...) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
+// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[LIST:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_EMPTY]], align 1
+// CHECK-NEXT: store i32 [[Z]], ptr [[Z_ADDR]], align 4
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[RETVAL]], ptr align 1 [[TMP]], i64 0, i1 false)
+// CHECK-NEXT: ret void
+//
+empty empty_record_test(int z, ...) {
+ __builtin_va_list list;
+ __builtin_va_start(list, z);
+ return __builtin_va_arg(list, empty);
+}
+
+typedef struct {
+ struct{} a;
+ double b;
+} s1;
+
+// CHECK-LABEL: define dso_local double @f(
+// CHECK-SAME: i32 noundef [[Z:%.*]], ...) #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
+// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[LIST:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: store i32 [[Z]], ptr [[Z_ADDR]], align 4
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: [[FP_OFFSET_P:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG:%.*]], ptr [[ARRAYDECAY1]], i32 0, i32 1
+// CHECK-NEXT: [[FP_OFFSET:%.*]] = load i32, ptr [[FP_OFFSET_P]], align 4
+// CHECK-NEXT: [[FITS_IN_FP:%.*]] = icmp ule i32 [[FP_OFFSET]], 160
+// CHECK-NEXT: br i1 [[FITS_IN_FP]], label [[VAARG_IN_REG:%.*]], label [[VAARG_IN_MEM:%.*]]
+// CHECK: vaarg.in_reg:
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG]], ptr [[ARRAYDECAY1]], i32 0, i32 3
+// CHECK-NEXT: [[REG_SAVE_AREA:%.*]] = load ptr, ptr [[TMP0]], align 16
+// CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[REG_SAVE_AREA]], i32 [[FP_OFFSET]]
+// CHECK-NEXT: [[TMP2:%.*]] = add i32 [[FP_OFFSET]], 16
+// CHECK-NEXT: store i32 [[TMP2]], ptr [[FP_OFFSET_P]], align 4
+// CHECK-NEXT: br label [[VAARG_END:%.*]]
+// CHECK: vaarg.in_mem:
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA_P:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG]], ptr [[ARRAYDECAY1]], i32 0, i32 2
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA:%.*]] = load ptr, ptr [[OVERFLOW_ARG_AREA_P]], align 8
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA_NEXT:%.*]] = getelementptr i8, ptr [[OVERFLOW_ARG_AREA]], i32 8
+// CHECK-NEXT: store ptr [[OVERFLOW_ARG_AREA_NEXT]], ptr [[OVERFLOW_ARG_AREA_P]], align 8
+// CHECK-NEXT: br label [[VAARG_END]]
+// CHECK: vaarg.end:
+// CHECK-NEXT: [[VAARG_ADDR:%.*]] = phi ptr [ [[TMP1]], [[VAARG_IN_REG]] ], [ [[OVERFLOW_ARG_AREA]], [[VAARG_IN_MEM]] ]
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[VAARG_ADDR]], i64 8, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[RETVAL]], align 8
+// CHECK-NEXT: ret double [[TMP3]]
+//
+s1 f(int z, ...) {
+ __builtin_va_list list;
+ __builtin_va_start(list, z);
+ return __builtin_va_arg(list, s1);
+}
diff --git a/clang/test/CodeGen/aapcs-align.cpp b/clang/test/CodeGen/aapcs-align.cpp
index 2886a32974b0..4f393d9e6b7f 100644
--- a/clang/test/CodeGen/aapcs-align.cpp
+++ b/clang/test/CodeGen/aapcs-align.cpp
@@ -134,8 +134,8 @@ void g6() {
f6m(1, 2, 3, 4, 5, s);
}
// CHECK: define{{.*}} void @g6
-// CHECK: call void @f6(i32 noundef 1, [4 x i32] [i32 6, i32 7, i32 0, i32 0])
-// CHECK: call void @f6m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [4 x i32] [i32 6, i32 7, i32 0, i32 0])
+// CHECK: call void @f6(i32 noundef 1, [4 x i32] [i32 6, i32 7, i32 0, i32 undef])
+// CHECK: call void @f6m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [4 x i32] [i32 6, i32 7, i32 0, i32 undef])
// CHECK: declare void @f6(i32 noundef, [4 x i32])
// CHECK: declare void @f6m(i32 noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef, [4 x i32])
}
diff --git a/clang/test/CodeGen/aapcs-bitfield-access-unit.c b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
new file mode 100644
index 000000000000..e95dba1c5f50
--- /dev/null
+++ b/clang/test/CodeGen/aapcs-bitfield-access-unit.c
@@ -0,0 +1,231 @@
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -fno-aapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT
+
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi -faapcs-bitfield-width -fdump-record-layouts-simple -emit-llvm -o /dev/null %s | FileCheck %s -check-prefixes=LAYOUT
+
+struct st0 {
+ short c : 7;
+} st0;
+// LAYOUT-LABEL: LLVMType:%struct.st0 =
+// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st1 {
+ int a : 10;
+ short c : 6;
+} st1;
+// LAYOUT-LABEL: LLVMType:%struct.st1 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:6 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st2 {
+ int a : 10;
+ short c : 7;
+} st2;
+// LAYOUT-LABEL: LLVMType:%struct.st2 =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st3 {
+ volatile short c : 7;
+} st3;
+// LAYOUT-LABEL: LLVMType:%struct.st3 =
+// LAYOUT-SAME: type { i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st4 {
+ int b : 9;
+ volatile char c : 5;
+} st4;
+// LAYOUT-LABEL: LLVMType:%struct.st4 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:9 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st5 {
+ int a : 12;
+ volatile char c : 5;
+} st5;
+// LAYOUT-LABEL: LLVMType:%struct.st5 =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st6 {
+ int a : 12;
+ char b;
+ int c : 5;
+} st6;
+// LAYOUT-LABEL: LLVMType:%struct.st6 =
+// LAYOUT-SAME: type { i16, i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:8 StorageOffset:3
+// LAYOUT-NEXT: ]>
+
+struct st7a {
+ char a;
+ int b : 5;
+} st7a;
+// LAYOUT-LABEL: LLVMType:%struct.st7a =
+// LAYOUT-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:5 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct st7b {
+ char x;
+ volatile struct st7a y;
+} st7b;
+// LAYOUT-LABEL: LLVMType:%struct.st7b =
+// LAYOUT-SAME: type { i8, [3 x i8], %struct.st7a }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: ]>
+
+struct st8 {
+ unsigned f : 16;
+} st8;
+// LAYOUT-LABEL: LLVMType:%struct.st8 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st9{
+ int f : 8;
+} st9;
+// LAYOUT-LABEL: LLVMType:%struct.st9 =
+// LAYOUT-SAME: type { i8, [3 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st10{
+ int e : 1;
+ int f : 8;
+} st10;
+// LAYOUT-LABEL: LLVMType:%struct.st10 =
+// LAYOUT-SAME: type { i16, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:1 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st11{
+ char e;
+ int f : 16;
+} st11;
+// LAYOUT-LABEL: LLVMType:%struct.st11 =
+// LAYOUT-SAME: type <{ i8, i16, i8 }>
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct st12{
+ int e : 8;
+ int f : 16;
+} st12;
+// LAYOUT-LABEL: LLVMType:%struct.st12 =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st13 {
+ char a : 8;
+ int b : 32;
+} __attribute__((packed)) st13;
+// LAYOUT-LABEL: LLVMType:%struct.st13 =
+// LAYOUT-SAME: type <{ i8, i32 }>
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct st14 {
+ char a : 8;
+} __attribute__((packed)) st14;
+// LAYOUT-LABEL: LLVMType:%struct.st14 =
+// LAYOUT-SAME: type { i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st15 {
+ short a : 8;
+} __attribute__((packed)) st15;
+// LAYOUT-LABEL: LLVMType:%struct.st15 =
+// LAYOUT-SAME: type { i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+struct st16 {
+ int a : 32;
+ int b : 16;
+ int c : 32;
+ int d : 16;
+} st16;
+// LAYOUT-LABEL: LLVMType:%struct.st16 =
+// LAYOUT-SAME: type { i32, i16, i32, i16 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:8
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:12
+// LAYOUT-NEXT: ]>
+
+struct st17 {
+int b : 32;
+char c : 8;
+} __attribute__((packed)) st17;
+// LAYOUT-LABEL: LLVMType:%struct.st17 =
+// LAYOUT-SAME: type <{ i32, i8 }>
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:32 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-NEXT: ]>
+
+struct zero_bitfield {
+ int a : 8;
+ char : 0;
+ int b : 8;
+} st18;
+// LAYOUT-LABEL: LLVMType:%struct.zero_bitfield =
+// LAYOUT-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
+struct zero_bitfield_ok {
+ short a : 8;
+ char a1 : 8;
+ long : 0;
+ int b : 24;
+} st19;
+// LAYOUT-LABEL: LLVMType:%struct.zero_bitfield_ok =
+// LAYOUT-SAME: type { i16, i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:1 StorageSize:32 StorageOffset:4
+// LAYOUT-NEXT: ]>
+
+
diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c
index 152ee26e7a3e..0df250d4ebc5 100644
--- a/clang/test/CodeGen/aapcs-bitfield.c
+++ b/clang/test/CodeGen/aapcs-bitfield.c
@@ -299,77 +299,73 @@ struct st2 {
// LE-LABEL: @st2_check_load(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// LE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// LE-NEXT: ret i32 [[CONV]]
//
// BE-LABEL: @st2_check_load(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// BE-NEXT: ret i32 [[CONV]]
//
// LENUMLOADS-LABEL: @st2_check_load(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// LENUMLOADS-NEXT: ret i32 [[CONV]]
//
// BENUMLOADS-LABEL: @st2_check_load(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// BENUMLOADS-NEXT: ret i32 [[CONV]]
//
// LEWIDTH-LABEL: @st2_check_load(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// LEWIDTH-NEXT: ret i32 [[CONV]]
//
// BEWIDTH-LABEL: @st2_check_load(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// BEWIDTH-NEXT: ret i32 [[CONV]]
//
// LEWIDTHNUM-LABEL: @st2_check_load(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 1
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 9
+// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// LEWIDTHNUM-NEXT: ret i32 [[CONV]]
//
// BEWIDTHNUM-LABEL: @st2_check_load(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i16
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 25
+// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i16
// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_CAST]] to i32
// BEWIDTHNUM-NEXT: ret i32 [[CONV]]
//
@@ -379,74 +375,66 @@ int st2_check_load(struct st2 *m) {
// LE-LABEL: @st2_check_store(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @st2_check_store(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BE-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BE-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @st2_check_store(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @st2_check_store(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BENUMLOADS-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BENUMLOADS-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @st2_check_store(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTH-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @st2_check_store(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTH-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTH-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @st2_check_store(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -8323073
+// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LEWIDTHNUM-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @st2_check_store(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[C]], align 2
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// BEWIDTHNUM-NEXT: store i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[M:%.*]], align 4
+// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65025
+// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 512
+// BEWIDTHNUM-NEXT: store i32 [[BF_SET]], ptr [[M]], align 4
// BEWIDTHNUM-NEXT: ret void
//
void st2_check_store(struct st2 *m) {
@@ -636,8 +624,8 @@ struct st4 {
//
// LEWIDTH-LABEL: @st4_check_load(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -645,8 +633,8 @@ struct st4 {
//
// BEWIDTH-LABEL: @st4_check_load(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -654,8 +642,8 @@ struct st4 {
//
// LEWIDTHNUM-LABEL: @st4_check_load(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -663,8 +651,8 @@ struct st4 {
//
// BEWIDTHNUM-LABEL: @st4_check_load(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -708,38 +696,38 @@ int st4_check_load(struct st4 *m) {
//
// LEWIDTH-LABEL: @st4_check_store(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @st4_check_store(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @st4_check_store(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
-// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @st4_check_store(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
-// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 1
// BEWIDTHNUM-NEXT: ret void
//
void st4_check_store(struct st4 *m) {
@@ -821,42 +809,44 @@ struct st5 {
// LE-LABEL: @st5_check_load(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
// LE-NEXT: ret i32 [[CONV]]
//
// BE-LABEL: @st5_check_load(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
// BE-NEXT: ret i32 [[CONV]]
//
// LENUMLOADS-LABEL: @st5_check_load(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
-// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
+// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
// LENUMLOADS-NEXT: ret i32 [[CONV]]
//
// BENUMLOADS-LABEL: @st5_check_load(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
-// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
+// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
+// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
// BENUMLOADS-NEXT: ret i32 [[CONV]]
//
// LEWIDTH-LABEL: @st5_check_load(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -864,16 +854,16 @@ struct st5 {
//
// BEWIDTH-LABEL: @st5_check_load(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BEWIDTH-NEXT: ret i32 [[CONV]]
//
// LEWIDTHNUM-LABEL: @st5_check_load(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
@@ -881,8 +871,8 @@ struct st5 {
//
// BEWIDTHNUM-LABEL: @st5_check_load(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BEWIDTHNUM-NEXT: ret i32 [[CONV]]
@@ -893,74 +883,70 @@ int st5_check_load(struct st5 *m) {
// LE-LABEL: @st5_check_store(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @st5_check_store(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @st5_check_store(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
+// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
+// LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @st5_check_store(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
+// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
+// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
+// BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @st5_check_store(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @st5_check_store(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @st5_check_store(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
-// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @st5_check_store(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
-// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
+// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
// BEWIDTHNUM-NEXT: ret void
//
void st5_check_store(struct st5 *m) {
@@ -980,8 +966,8 @@ struct st6 {
// LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// LE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -997,8 +983,8 @@ struct st6 {
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BE-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// BE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1014,8 +1000,8 @@ struct st6 {
// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// LENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1031,8 +1017,8 @@ struct st6 {
// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// BENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1048,8 +1034,8 @@ struct st6 {
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1065,8 +1051,8 @@ struct st6 {
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1082,8 +1068,8 @@ struct st6 {
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 4
// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// LEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1099,8 +1085,8 @@ struct st6 {
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6:%.*]], ptr [[M]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = load volatile i8, ptr [[B]], align 2
-// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = load volatile i8, ptr [[B]], align 2
+// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
// BEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], ptr [[M]], i32 0, i32 2
// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
@@ -1704,9 +1690,9 @@ void store_st9(volatile struct st9 *m) {
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
// LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT: store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_st9(
@@ -1714,9 +1700,9 @@ void store_st9(volatile struct st9 *m) {
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT: store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_st9(
@@ -1724,10 +1710,10 @@ void store_st9(volatile struct st9 *m) {
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[M]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_st9(
@@ -1735,10 +1721,10 @@ void store_st9(volatile struct st9 *m) {
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[M:%.*]], align 4
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[M]], align 4
-// BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[M]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[M]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_st9(
@@ -1949,9 +1935,9 @@ void store_st10(volatile struct st10 *m) {
// LE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// LE-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
// LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
// LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1968,9 +1954,9 @@ void store_st10(volatile struct st10 *m) {
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// BE-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
// BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -1987,9 +1973,9 @@ void store_st10(volatile struct st10 *m) {
// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 1
// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -511
// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2006,9 +1992,9 @@ void store_st10(volatile struct st10 *m) {
// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[M]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP1]], 255
+// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i16 [[BF_VALUE]], 7
// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD1]], -32641
// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
@@ -2767,146 +2753,70 @@ struct st13 {
// LE-LABEL: @increment_b_st13(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// LE-NEXT: ret void
//
// BE-LABEL: @increment_b_st13(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_b_st13(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_b_st13(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_b_st13(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_b_st13(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_b_st13(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_b_st13(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST13:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[B]], align 1
+// BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[B]], align 1
// BEWIDTHNUM-NEXT: ret void
//
void increment_b_st13(volatile struct st13 *s) {
@@ -2990,9 +2900,9 @@ struct st15 {
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// LE-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LE-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LE-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// LE-NEXT: ret void
//
// BE-LABEL: @increment_a_st15(
@@ -3000,9 +2910,9 @@ struct st15 {
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// BE-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BE-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BE-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_a_st15(
@@ -3010,10 +2920,10 @@ struct st15 {
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// LENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_a_st15(
@@ -3021,10 +2931,10 @@ struct st15 {
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// BENUMLOADS-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_a_st15(
@@ -3032,9 +2942,9 @@ struct st15 {
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// LEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// LEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_a_st15(
@@ -3042,9 +2952,9 @@ struct st15 {
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// BEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
-// BEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3052,10 +2962,10 @@ struct st15 {
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// LEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_a_st15(
@@ -3063,10 +2973,10 @@ struct st15 {
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 1
// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i16
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add i16 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i16 [[INC]] to i8
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i16 [[INC]] to i8
// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i16
+// BEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 1
+// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i16
// BEWIDTHNUM-NEXT: ret void
//
void increment_a_st15(volatile struct st15 *s) {
@@ -3082,146 +2992,58 @@ struct st16 {
// LE-LABEL: @increment_a_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store i32 [[INC]], ptr [[S]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @increment_a_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store i32 [[INC]], ptr [[S]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_a_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: store i32 [[INC]], ptr [[S]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_a_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: store i32 [[INC]], ptr [[S]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_a_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: store i32 [[INC]], ptr [[S]], align 4
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_a_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: store i32 [[INC]], ptr [[S]], align 4
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_a_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: store i32 [[INC]], ptr [[S]], align 4
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_a_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[S:%.*]], align 4
+// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: store i32 [[INC]], ptr [[S]], align 4
// BEWIDTHNUM-NEXT: ret void
//
void increment_a_st16(struct st16 *s) {
@@ -3230,154 +3052,90 @@ void increment_a_st16(struct st16 *s) {
// LE-LABEL: @increment_b_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_b_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_b_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_b_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_b_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_b_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_b_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_b_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[S:%.*]], align 4
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[B]], align 4
+// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT: store i16 [[TMP0]], ptr [[B]], align 4
+// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BEWIDTHNUM-NEXT: ret void
//
void increment_b_st16(struct st16 *s) {
@@ -3386,154 +3144,66 @@ void increment_b_st16(struct st16 *s) {
// LE-LABEL: @increment_c_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store i32 [[INC]], ptr [[C]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @increment_c_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store i32 [[INC]], ptr [[C]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_c_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: store i32 [[INC]], ptr [[C]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_c_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: store i32 [[INC]], ptr [[C]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_c_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: store i32 [[INC]], ptr [[C]], align 4
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_c_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: store i32 [[INC]], ptr [[C]], align 4
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_c_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: store i32 [[INC]], ptr [[C]], align 4
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_c_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[C]], align 4
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[C]], align 4
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: store i32 [[INC]], ptr [[C]], align 4
// BEWIDTHNUM-NEXT: ret void
//
void increment_c_st16(struct st16 *s) {
@@ -3542,162 +3212,90 @@ void increment_c_st16(struct st16 *s) {
// LE-LABEL: @increment_d_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_d_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_d_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_d_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_d_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTH-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_d_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTH-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTH-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_d_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LEWIDTHNUM-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_d_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[D]], align 4
+// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load i64, ptr [[D]], align 4
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BEWIDTHNUM-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BEWIDTHNUM-NEXT: store i64 [[BF_SET]], ptr [[D]], align 4
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BEWIDTHNUM-NEXT: store i16 [[TMP0]], ptr [[D]], align 4
+// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BEWIDTHNUM-NEXT: ret void
//
void increment_d_st16(struct st16 *s) {
@@ -3706,74 +3304,32 @@ void increment_d_st16(struct st16 *s) {
// LE-LABEL: @increment_v_a_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_a_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_a_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// LENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_a_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 4
+// BENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[S]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_a_st16(
@@ -3812,140 +3368,110 @@ void increment_v_a_st16(volatile struct st16 *s) {
// LE-LABEL: @increment_v_b_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT: store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_b_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT: store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_b_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// LENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[B]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_b_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[S:%.*]], align 4
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[B]], align 4
+// BENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[B]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_b_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_v_b_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_v_b_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_v_b_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// BEWIDTHNUM-NEXT: ret void
@@ -3956,112 +3482,70 @@ void increment_v_b_st16(volatile struct st16 *s) {
// LE-LABEL: @increment_v_c_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_c_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_c_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_c_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[C]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_c_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_v_c_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_v_c_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_v_c_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 2
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 2
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[C]], align 4
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
-// BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[C]], align 4
+// BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[C]], align 4
// BEWIDTHNUM-NEXT: ret void
//
void increment_v_c_st16(volatile struct st16 *s) {
@@ -4070,144 +3554,110 @@ void increment_v_c_st16(volatile struct st16 *s) {
// LE-LABEL: @increment_v_d_st16(
// LE-NEXT: entry:
-// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LE-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LE-NEXT: store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_d_st16(
// BE-NEXT: entry:
-// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BE-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BE-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BE-NEXT: store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_d_st16(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 16
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// LENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// LENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// LENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// LENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[D]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_d_st16(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 1
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i64 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i64 [[BF_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_ASHR]] to i32
+// BENUMLOADS-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], ptr [[S:%.*]], i32 0, i32 3
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i64 [[TMP1]], 65535
-// BENUMLOADS-NEXT: [[BF_SHL2:%.*]] = shl i64 [[BF_VALUE]], 16
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
-// BENUMLOADS-NEXT: store volatile i64 [[BF_SET]], ptr [[D]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i64 [[BF_VALUE]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i64 [[BF_RESULT_SHL]], 48
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i64 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i16
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, ptr [[D]], align 4
+// BENUMLOADS-NEXT: store volatile i16 [[TMP0]], ptr [[D]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i16 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_d_st16(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_v_d_st16(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_v_d_st16(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// LEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_v_d_st16(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[S:%.*]], i32 3
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 16
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_ASHR]], 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP1]], align 4
+// BEWIDTHNUM-NEXT: store volatile i32 [[BF_SET]], ptr [[TMP0]], align 4
// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i32 [[BF_VALUE]], 16
// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i32 [[BF_RESULT_SHL]], 16
// BEWIDTHNUM-NEXT: ret void
@@ -4224,146 +3674,62 @@ char c : 8;
// LE-LABEL: @increment_v_b_st17(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LE-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_b_st17(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BE-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_b_st17(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_b_st17(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BENUMLOADS-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_b_st17(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_v_b_st17(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTH-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTH-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTH-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_v_b_st17(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 8
-// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 8
-// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
-// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// LEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// LEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// LEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_v_b_st17(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 8
-// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 4294967295
-// BEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
-// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// BEWIDTHNUM-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BEWIDTHNUM-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 8
-// BEWIDTHNUM-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 8
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i32
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[S:%.*]], align 1
+// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, ptr [[S]], align 1
+// BEWIDTHNUM-NEXT: store volatile i32 [[INC]], ptr [[S]], align 1
// BEWIDTHNUM-NEXT: ret void
//
void increment_v_b_st17(volatile struct st17 *s) {
@@ -4372,108 +3738,70 @@ void increment_v_b_st17(volatile struct st17 *s) {
// LE-LABEL: @increment_v_c_st17(
// LE-NEXT: entry:
-// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LE-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// LE-NEXT: ret void
//
// BE-LABEL: @increment_v_c_st17(
// BE-NEXT: entry:
-// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BE-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BE-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BE-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BE-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BE-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BE-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BE-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_v_c_st17(
// LENUMLOADS-NEXT: entry:
-// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_LOAD]], 32
-// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295
-// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_SHL]]
-// LENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// LENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_v_c_st17(
// BENUMLOADS-NEXT: entry:
-// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i40, ptr [[S:%.*]], align 1
-// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i40 [[BF_LOAD]], 32
-// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i40 [[BF_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_ASHR]] to i8
-// BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40
-// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i40 [[TMP1]], 255
-// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256
-// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[BF_VALUE]]
-// BENUMLOADS-NEXT: store volatile i40 [[BF_SET]], ptr [[S]], align 1
-// BENUMLOADS-NEXT: [[BF_RESULT_SHL:%.*]] = shl i40 [[BF_VALUE]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_ASHR:%.*]] = ashr i40 [[BF_RESULT_SHL]], 32
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = trunc i40 [[BF_RESULT_ASHR]] to i8
+// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BENUMLOADS-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_v_c_st17(
// LEWIDTH-NEXT: entry:
-// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
// LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_v_c_st17(
// BEWIDTH-NEXT: entry:
-// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
// BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_v_c_st17(
// LEWIDTHNUM-NEXT: entry:
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// LEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// LEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_v_c_st17(
// BEWIDTHNUM-NEXT: entry:
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i32 4
-// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], ptr [[S:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 1
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP1]], align 1
-// BEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[TMP1]], align 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[C]], align 1
+// BEWIDTHNUM-NEXT: store volatile i8 [[INC]], ptr [[C]], align 1
// BEWIDTHNUM-NEXT: ret void
//
void increment_v_c_st17(volatile struct st17 *s) {
@@ -4493,9 +3821,9 @@ struct zero_bitfield {
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LE-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LE-NEXT: ret void
//
// BE-LABEL: @increment_a_zero_bitfield(
@@ -4503,9 +3831,9 @@ struct zero_bitfield {
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BE-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BE-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BE-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BE-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BE-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BE-NEXT: ret void
//
// LENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4513,10 +3841,10 @@ struct zero_bitfield {
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LENUMLOADS-NEXT: ret void
//
// BENUMLOADS-LABEL: @increment_a_zero_bitfield(
@@ -4524,10 +3852,10 @@ struct zero_bitfield {
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BENUMLOADS-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BENUMLOADS-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BENUMLOADS-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BENUMLOADS-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BENUMLOADS-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BENUMLOADS-NEXT: ret void
//
// LEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4535,9 +3863,9 @@ struct zero_bitfield {
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTH-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// LEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_a_zero_bitfield(
@@ -4545,9 +3873,9 @@ struct zero_bitfield {
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
-// BEWIDTH-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTH-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTH-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4555,10 +3883,10 @@ struct zero_bitfield {
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// LEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// LEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// LEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// LEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_a_zero_bitfield(
@@ -4566,10 +3894,10 @@ struct zero_bitfield {
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[S:%.*]], align 4
// BEWIDTHNUM-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BEWIDTHNUM-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
-// BEWIDTHNUM-NEXT: [[TMP1:%.*]] = trunc i32 [[INC]] to i8
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = trunc i32 [[INC]] to i8
// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: store volatile i8 [[TMP1]], ptr [[S]], align 4
-// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTHNUM-NEXT: store volatile i8 [[TMP0]], ptr [[S]], align 4
+// BEWIDTHNUM-NEXT: [[BF_RESULT_CAST:%.*]] = sext i8 [[TMP0]] to i32
// BEWIDTHNUM-NEXT: ret void
//
void increment_a_zero_bitfield(volatile struct zero_bitfield *s) {
@@ -4692,9 +4020,9 @@ struct zero_bitfield_ok {
// LE-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
// LE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
// LE-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LE-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LE-NEXT: [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
// LE-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// LE-NEXT: [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
// LE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
// LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4716,9 +4044,9 @@ struct zero_bitfield_ok {
// BE-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
// BE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
// BE-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BE-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BE-NEXT: [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
// BE-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BE-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// BE-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
// BE-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4739,9 +4067,9 @@ struct zero_bitfield_ok {
// LENUMLOADS-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
// LENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
// LENUMLOADS-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// LENUMLOADS-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// LENUMLOADS-NEXT: [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
// LENUMLOADS-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// LENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// LENUMLOADS-NEXT: [[BF_SHL6:%.*]] = shl i16 [[BF_VALUE]], 8
// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], 255
// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL6]]
@@ -4763,9 +4091,9 @@ struct zero_bitfield_ok {
// BENUMLOADS-NEXT: [[CONV3:%.*]] = sext i8 [[BF_CAST]] to i32
// BENUMLOADS-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV3]], [[CONV]]
// BENUMLOADS-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD]] to i8
-// BENUMLOADS-NEXT: [[TMP2:%.*]] = zext i8 [[CONV4]] to i16
+// BENUMLOADS-NEXT: [[TMP0:%.*]] = zext i8 [[CONV4]] to i16
// BENUMLOADS-NEXT: [[BF_LOAD5:%.*]] = load volatile i16, ptr [[S]], align 4
-// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP2]], 255
+// BENUMLOADS-NEXT: [[BF_VALUE:%.*]] = and i16 [[TMP0]], 255
// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD5]], -256
// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_VALUE]]
// BENUMLOADS-NEXT: store volatile i16 [[BF_SET]], ptr [[S]], align 4
@@ -4780,12 +4108,12 @@ struct zero_bitfield_ok {
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTH-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
// LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
// LEWIDTH-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
// LEWIDTH-NEXT: ret void
//
// BEWIDTH-LABEL: @increment_a_zero_bitfield_ok(
@@ -4793,12 +4121,12 @@ struct zero_bitfield_ok {
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
// BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTH-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
// BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
// BEWIDTH-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
// BEWIDTH-NEXT: ret void
//
// LEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4807,13 +4135,13 @@ struct zero_bitfield_ok {
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// LEWIDTHNUM-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// LEWIDTHNUM-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
// LEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
// LEWIDTHNUM-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// LEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// LEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// LEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
// LEWIDTHNUM-NEXT: ret void
//
// BEWIDTHNUM-LABEL: @increment_a_zero_bitfield_ok(
@@ -4821,13 +4149,13 @@ struct zero_bitfield_ok {
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, ptr [[S:%.*]], align 4
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 8
// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
-// BEWIDTHNUM-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
-// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[S]], i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, ptr [[TMP0]], align 1
// BEWIDTHNUM-NEXT: [[CONV2:%.*]] = sext i8 [[BF_LOAD1]] to i32
// BEWIDTHNUM-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV2]], [[CONV]]
// BEWIDTHNUM-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8
-// BEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP2]], align 1
-// BEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP2]], align 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD4:%.*]] = load volatile i8, ptr [[TMP0]], align 1
+// BEWIDTHNUM-NEXT: store volatile i8 [[CONV3]], ptr [[TMP0]], align 1
// BEWIDTHNUM-NEXT: ret void
//
void increment_a_zero_bitfield_ok(volatile struct zero_bitfield_ok *s) {
diff --git a/clang/test/CodeGen/aapcs64-align.cpp b/clang/test/CodeGen/aapcs64-align.cpp
index 759413cbc4b5..de231f2123b9 100644
--- a/clang/test/CodeGen/aapcs64-align.cpp
+++ b/clang/test/CodeGen/aapcs64-align.cpp
@@ -75,8 +75,8 @@ void g4() {
f4m(1, 2, 3, 4, 5, s);
}
// CHECK: define{{.*}} void @g4()
-// CHECK: call void @f4(i32 noundef 1, [2 x i64] %{{.*}})
-// CHECK: void @f4m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [2 x i64] %{{.*}})
+// CHECK: call void @f4(i32 noundef 1, [2 x i64] [i64 30064771078, i64 0])
+// CHECK: void @f4m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [2 x i64] [i64 30064771078, i64 0])
// CHECK: declare void @f4(i32 noundef, [2 x i64])
// CHECK: declare void @f4m(i32 noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef, [2 x i64])
@@ -95,8 +95,8 @@ void f5m(int, int, int, int, int, P16);
f5m(1, 2, 3, 4, 5, s);
}
// CHECK: define{{.*}} void @g5()
-// CHECK: call void @f5(i32 noundef 1, [2 x i64] %{{.*}})
-// CHECK: void @f5m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [2 x i64] %{{.*}})
+// CHECK: call void @f5(i32 noundef 1, [2 x i64] [i64 30064771078, i64 0])
+// CHECK: void @f5m(i32 noundef 1, i32 noundef 2, i32 noundef 3, i32 noundef 4, i32 noundef 5, [2 x i64] [i64 30064771078, i64 0])
// CHECK: declare void @f5(i32 noundef, [2 x i64])
// CHECK: declare void @f5m(i32 noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef, [2 x i64])
diff --git a/clang/test/CodeGen/aarch64-ABI-align-packed.c b/clang/test/CodeGen/aarch64-ABI-align-packed.c
index 2b029f645895..13c68fe54b84 100644
--- a/clang/test/CodeGen/aarch64-ABI-align-packed.c
+++ b/clang/test/CodeGen/aarch64-ABI-align-packed.c
@@ -73,7 +73,7 @@ __attribute__((noinline)) void named_arg_non_packed_struct(double d0, double d1,
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6:[0-9]+]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_non_packed_struct(double d0, double d1, double d2, double d3,
@@ -128,7 +128,7 @@ __attribute__((noinline)) void named_arg_packed_struct(double d0, double d1, dou
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_struct(double d0, double d1, double d2, double d3,
@@ -183,7 +183,7 @@ __attribute__((noinline)) void named_arg_packed_member(double d0, double d1, dou
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_packed_member(double d0, double d1, double d2, double d3,
@@ -238,7 +238,7 @@ __attribute__((noinline)) void named_arg_aligned_struct_8(double d0, double d1,
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_struct_8(double d0, double d1, double d2, double d3,
@@ -293,7 +293,7 @@ __attribute__((noinline)) void named_arg_aligned_member_8(double d0, double d1,
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_aligned_member_8(double d0, double d1, double d2, double d3,
@@ -348,7 +348,7 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_8(double d0, doubl
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_8(double d0, double d1, double d2, double d3,
@@ -403,7 +403,7 @@ __attribute__((noinline)) void named_arg_pragma_packed_struct_4(double d0, doubl
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VL:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
-// CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[VL]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VL]])
// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VL]]) #[[ATTR6]]
// CHECK-NEXT: ret void
void variadic_pragma_packed_struct_4(double d0, double d1, double d2, double d3,
diff --git a/clang/test/CodeGen/aarch64-mixed-target-attributes.c b/clang/test/CodeGen/aarch64-mixed-target-attributes.c
new file mode 100644
index 000000000000..aef6ce36ab1c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-mixed-target-attributes.c
@@ -0,0 +1,278 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals --include-generated-funcs
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature -v9.5a -S -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature -fmv -S -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK-NOFMV
+
+// The following is guarded because in NOFMV we get an error for redefining the default.
+#ifdef __HAVE_FUNCTION_MULTI_VERSIONING
+int explicit_default(void) { return 0; }
+__attribute__((target_version("jscvt"))) int explicit_default(void) { return 1; }
+__attribute__((target_clones("dotprod", "lse"))) int explicit_default(void) { return 2; }
+__attribute__((target_version("rdma"))) int explicit_default(void) { return 3; }
+
+int foo(void) { return explicit_default(); }
+#endif
+
+__attribute__((target_version("jscvt"))) int implicit_default(void) { return 1; }
+__attribute__((target_clones("dotprod", "lse"))) int implicit_default(void) { return 2; }
+__attribute__((target_version("rdma"))) int implicit_default(void) { return 3; }
+
+int bar(void) { return implicit_default(); }
+
+// These shouldn't generate anything.
+int unused_version_declarations(void);
+__attribute__((target_clones("dotprod", "lse"))) int unused_version_declarations(void);
+__attribute__((target_version("jscvt"))) int unused_version_declarations(void);
+
+// These should generate the default (mangled) version and the resolver.
+int default_def_with_version_decls(void) { return 0; }
+__attribute__((target_clones("dotprod", "lse"))) int default_def_with_version_decls(void);
+__attribute__((target_version("jscvt"))) int default_def_with_version_decls(void);
+
+//.
+// CHECK: @__aarch64_cpu_features = external dso_local global { i64 }
+// CHECK: @explicit_default.ifunc = weak_odr alias i32 (), ptr @explicit_default
+// CHECK: @implicit_default.ifunc = weak_odr alias i32 (), ptr @implicit_default
+// CHECK: @default_def_with_version_decls.ifunc = weak_odr alias i32 (), ptr @default_def_with_version_decls
+// CHECK: @explicit_default = weak_odr ifunc i32 (), ptr @explicit_default.resolver
+// CHECK: @implicit_default = weak_odr ifunc i32 (), ptr @implicit_default.resolver
+// CHECK: @default_def_with_version_decls = weak_odr ifunc i32 (), ptr @default_def_with_version_decls.resolver
+//.
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@explicit_default.default
+// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 0
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@explicit_default._Mjscvt
+// CHECK-SAME: () #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@explicit_default._Mdotprod
+// CHECK-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@explicit_default._Mlse
+// CHECK-SAME: () #[[ATTR3:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@explicit_default.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @explicit_default._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 64
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 64
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @explicit_default._Mrdm
+// CHECK: resolver_else2:
+// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 16
+// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 16
+// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-NEXT: br i1 [[TMP11]], label [[RESOLVER_RETURN3:%.*]], label [[RESOLVER_ELSE4:%.*]]
+// CHECK: resolver_return3:
+// CHECK-NEXT: ret ptr @explicit_default._Mdotprod
+// CHECK: resolver_else4:
+// CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 128
+// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 128
+// CHECK-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
+// CHECK-NEXT: br i1 [[TMP15]], label [[RESOLVER_RETURN5:%.*]], label [[RESOLVER_ELSE6:%.*]]
+// CHECK: resolver_return5:
+// CHECK-NEXT: ret ptr @explicit_default._Mlse
+// CHECK: resolver_else6:
+// CHECK-NEXT: ret ptr @explicit_default.default
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@explicit_default._Mrdm
+// CHECK-SAME: () #[[ATTR4:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 3
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@foo
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i32 @explicit_default()
+// CHECK-NEXT: ret i32 [[CALL]]
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@implicit_default._Mjscvt
+// CHECK-SAME: () #[[ATTR1]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@implicit_default._Mdotprod
+// CHECK-SAME: () #[[ATTR2]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@implicit_default._Mlse
+// CHECK-SAME: () #[[ATTR3]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@implicit_default.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @implicit_default._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 64
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 64
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @implicit_default._Mrdm
+// CHECK: resolver_else2:
+// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 16
+// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 16
+// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-NEXT: br i1 [[TMP11]], label [[RESOLVER_RETURN3:%.*]], label [[RESOLVER_ELSE4:%.*]]
+// CHECK: resolver_return3:
+// CHECK-NEXT: ret ptr @implicit_default._Mdotprod
+// CHECK: resolver_else4:
+// CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], 128
+// CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[TMP13]], 128
+// CHECK-NEXT: [[TMP15:%.*]] = and i1 true, [[TMP14]]
+// CHECK-NEXT: br i1 [[TMP15]], label [[RESOLVER_RETURN5:%.*]], label [[RESOLVER_ELSE6:%.*]]
+// CHECK: resolver_return5:
+// CHECK-NEXT: ret ptr @implicit_default._Mlse
+// CHECK: resolver_else6:
+// CHECK-NEXT: ret ptr @implicit_default.default
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@implicit_default._Mrdm
+// CHECK-SAME: () #[[ATTR4]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 3
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@bar
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i32 @implicit_default()
+// CHECK-NEXT: ret i32 [[CALL]]
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@default_def_with_version_decls.default
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 0
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@implicit_default.default
+// CHECK-SAME: () #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@default_def_with_version_decls.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 16
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 16
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls._Mdotprod
+// CHECK: resolver_else2:
+// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 128
+// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 128
+// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-NEXT: br i1 [[TMP11]], label [[RESOLVER_RETURN3:%.*]], label [[RESOLVER_ELSE4:%.*]]
+// CHECK: resolver_return3:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls._Mlse
+// CHECK: resolver_else4:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls.default
+//
+//
+// CHECK-NOFMV: Function Attrs: noinline nounwind optnone
+// CHECK-NOFMV-LABEL: define {{[^@]+}}@implicit_default
+// CHECK-NOFMV-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NOFMV-NEXT: entry:
+// CHECK-NOFMV-NEXT: ret i32 2
+//
+//
+// CHECK-NOFMV: Function Attrs: noinline nounwind optnone
+// CHECK-NOFMV-LABEL: define {{[^@]+}}@bar
+// CHECK-NOFMV-SAME: () #[[ATTR0]] {
+// CHECK-NOFMV-NEXT: entry:
+// CHECK-NOFMV-NEXT: [[CALL:%.*]] = call i32 @implicit_default()
+// CHECK-NOFMV-NEXT: ret i32 [[CALL]]
+//
+//
+// CHECK-NOFMV: Function Attrs: noinline nounwind optnone
+// CHECK-NOFMV-LABEL: define {{[^@]+}}@default_def_with_version_decls
+// CHECK-NOFMV-SAME: () #[[ATTR0]] {
+// CHECK-NOFMV-NEXT: entry:
+// CHECK-NOFMV-NEXT: ret i32 0
+//
+//.
+// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-v9.5a" }
+// CHECK: attributes #[[ATTR1]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+jsconv,+neon,-v9.5a" }
+// CHECK: attributes #[[ATTR2]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+fp-armv8,+neon,-v9.5a" }
+// CHECK: attributes #[[ATTR3]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,-v9.5a" }
+// CHECK: attributes #[[ATTR4]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+rdm,-v9.5a" }
+// CHECK: attributes #[[ATTR5:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+fp-armv8,+neon,-v9.5a" }
+// CHECK: attributes #[[ATTR6:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+jsconv,+neon,-v9.5a" }
+// CHECK: attributes #[[ATTR7:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-v9.5a" }
+// CHECK: attributes #[[ATTR8:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,-v9.5a" }
+//.
+// CHECK-NOFMV: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fmv" }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
+// CHECK-NOFMV: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK-NOFMV: [[META1:![0-9]+]] = !{!"{{.*}}clang version {{.*}}"}
+//.
diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c
index 44b87029e7b3..ee4e88eda4ef 100644
--- a/clang/test/CodeGen/aarch64-varargs.c
+++ b/clang/test/CodeGen/aarch64-varargs.c
@@ -837,7 +837,7 @@ void check_start(int n, ...) {
va_list the_list;
va_start(the_list, n);
// CHECK: [[THE_LIST:%[a-z_0-9]+]] = alloca %struct.__va_list
-// CHECK: call void @llvm.va_start(ptr [[THE_LIST]])
+// CHECK: call void @llvm.va_start.p0(ptr [[THE_LIST]])
}
typedef struct {} empty;
diff --git a/clang/test/CodeGen/arm-bitfield-alignment.c b/clang/test/CodeGen/arm-bitfield-alignment.c
index e34789face55..5d0967ec7034 100644
--- a/clang/test/CodeGen/arm-bitfield-alignment.c
+++ b/clang/test/CodeGen/arm-bitfield-alignment.c
@@ -1,5 +1,7 @@
-// RUN: %clang_cc1 -triple arm-none-eabi -ffreestanding -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -triple aarch64 -ffreestanding -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple arm-none-eabi -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-32
+// RUN: FileCheck %s -check-prefixes=IR,IR-32 <%t
+// RUN: %clang_cc1 -triple aarch64 -fdump-record-layouts-simple -ffreestanding -emit-llvm -o %t %s | FileCheck %s -check-prefixes=LAYOUT,LAYOUT-64
+// RUN: FileCheck %s -check-prefixes=IR,IR-64 <%t
extern struct T {
int b0 : 8;
@@ -11,5 +13,18 @@ int func(void) {
return g.b1;
}
-// CHECK: @g = external global %struct.T, align 4
-// CHECK: %{{.*}} = load i64, ptr @g, align 4
+// IR: @g = external global %struct.T, align 4
+// IR-32: %{{.*}} = load i32, ptr @g, align 4
+// IR-64: %{{.*}} = load i64, ptr @g, align 4
+
+// LAYOUT-LABEL: LLVMType:%struct.T =
+// LAYOUT-32-SAME: type { i32, i8 }
+// LAYOUT-64-SAME: type { i64 }
+// LAYOUT: BitFields:[
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-32-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:32 Size:1 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: ]>
diff --git a/clang/test/CodeGen/arm-varargs.c b/clang/test/CodeGen/arm-varargs.c
index f754c7f52e59..ab4ac46924e6 100644
--- a/clang/test/CodeGen/arm-varargs.c
+++ b/clang/test/CodeGen/arm-varargs.c
@@ -264,5 +264,5 @@ void check_start(int n, ...) {
va_list the_list;
va_start(the_list, n);
// CHECK: [[THE_LIST:%[a-z0-9._]+]] = alloca %struct.__va_list
-// CHECK: call void @llvm.va_start(ptr [[THE_LIST]])
+// CHECK: call void @llvm.va_start.p0(ptr [[THE_LIST]])
}
diff --git a/clang/test/CodeGen/arm64-be-bitfield.c b/clang/test/CodeGen/arm64-be-bitfield.c
index 58c318539298..57e20b5b62b9 100644
--- a/clang/test/CodeGen/arm64-be-bitfield.c
+++ b/clang/test/CodeGen/arm64-be-bitfield.c
@@ -1,11 +1,25 @@
-// RUN: %clang_cc1 -triple aarch64_be-linux-gnu -ffreestanding -emit-llvm -O0 -o - %s | FileCheck --check-prefix IR %s
+// RUN: %clang_cc1 -triple aarch64_be-linux-gnu -ffreestanding -emit-llvm -O0 -o %t -fdump-record-layouts-simple %s | FileCheck %s --check-prefix=LAYOUT
+// RUN: FileCheck %s --check-prefix=IR <%t
struct bt3 { signed b2:10; signed b3:10; } b16;
// Get the high 32-bits and then shift appropriately for big-endian.
signed callee_b0f(struct bt3 bp11) {
// IR: callee_b0f(i64 [[ARG:%.*]])
-// IR: store i64 [[ARG]], ptr [[PTR:%.*]], align 8
-// IR: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}}, ptr align 8 [[PTR]], i64 4
+// IR: [[BP11:%.*]] = alloca %struct.bt3, align 4
+// IR: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.bt3, ptr [[BP11]], i32 0, i32 0
+// IR: [[COERCE_HIGHBITS:%.*]] = lshr i64 [[ARG]], 32
+// IR: [[COERCE_VAL_II:%.*]] = trunc i64 [[COERCE_HIGHBITS]] to i32
+// IR: store i32 [[COERCE_VAL_II]], ptr [[COERCE_DIVE]], align 4
+// IR: [[BF_LOAD:%.*]] = load i32, ptr [[BP11]], align 4
+// IR: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 22
+// IR: ret i32 [[BF_ASHR]]
return bp11.b2;
}
+
+// LAYOUT-LABEL: LLVMType:%struct.bt3 =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:22 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:12 Size:10 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
diff --git a/clang/test/CodeGen/attr-counted-by-debug-info.c b/clang/test/CodeGen/attr-counted-by-debug-info.c
new file mode 100644
index 000000000000..a6c2b1382b79
--- /dev/null
+++ b/clang/test/CodeGen/attr-counted-by-debug-info.c
@@ -0,0 +1,18 @@
+// RUN: %clang -emit-llvm -DCOUNTED_BY -S -g %s -o - | FileCheck %s
+// RUN: %clang -emit-llvm -S -g %s -o - | FileCheck %s
+
+#ifdef COUNTED_BY
+#define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+#define __counted_by(member)
+#endif
+
+struct {
+ int num_counters;
+ long value[] __counted_by(num_counters);
+} agent_send_response_port_num;
+
+// CHECK: !DICompositeType(tag: DW_TAG_array_type, baseType: ![[BT:.*]], elements: ![[ELEMENTS:.*]])
+// CHECK: ![[BT]] = !DIBasicType(name: "long", size: {{.*}}, encoding: DW_ATE_signed)
+// CHECK: ![[ELEMENTS]] = !{![[COUNT:.*]]}
+// CHECK: ![[COUNT]] = !DISubrange(count: -1) \ No newline at end of file
diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c
index e5685e39173b..1fb39f9a3466 100644
--- a/clang/test/CodeGen/attr-counted-by.c
+++ b/clang/test/CodeGen/attr-counted-by.c
@@ -1314,17 +1314,10 @@ int test14(int idx) {
// NO-SANITIZE-WITH-ATTR-LABEL: define dso_local i32 @test15(
// NO-SANITIZE-WITH-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR4]] {
// NO-SANITIZE-WITH-ATTR-NEXT: entry:
-// NO-SANITIZE-WITH-ATTR-NEXT: [[FOO:%.*]] = alloca [[STRUCT_ANON_8:%.*]], align 4
-// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[FOO]]) #[[ATTR12]]
-// NO-SANITIZE-WITH-ATTR-NEXT: store i32 1, ptr [[FOO]], align 4
-// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[FOO]], i64 4
-// NO-SANITIZE-WITH-ATTR-NEXT: store i32 2, ptr [[TMP0]], align 4
-// NO-SANITIZE-WITH-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[BLAH]], i64 0, i64 [[IDXPROM]]
-// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[FOO]]) #[[ATTR12]]
-// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP1]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds ([[STRUCT_ANON_8:%.*]], ptr @__const.test15.foo, i64 1, i32 0), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
+// NO-SANITIZE-WITH-ATTR-NEXT: ret i32 [[TMP0]]
//
// SANITIZE-WITHOUT-ATTR-LABEL: define dso_local i32 @test15(
// SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR0]] {
@@ -1342,17 +1335,10 @@ int test14(int idx) {
// NO-SANITIZE-WITHOUT-ATTR-LABEL: define dso_local i32 @test15(
// NO-SANITIZE-WITHOUT-ATTR-SAME: i32 noundef [[IDX:%.*]]) local_unnamed_addr #[[ATTR1]] {
// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[FOO:%.*]] = alloca [[STRUCT_ANON_8:%.*]], align 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[FOO]]) #[[ATTR9]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 1, ptr [[FOO]], align 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[FOO]], i64 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 2, ptr [[TMP0]], align 4
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[BLAH:%.*]] = getelementptr inbounds i8, ptr [[FOO]], i64 8
// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[IDX]] to i64
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr [[BLAH]], i64 0, i64 [[IDXPROM]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[FOO]]) #[[ATTR9]]
-// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP1]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i32], ptr getelementptr inbounds ([[STRUCT_ANON_8:%.*]], ptr @__const.test15.foo, i64 1, i32 0), i64 0, i64 [[IDXPROM]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i32 [[TMP0]]
//
int test15(int idx) {
struct {
diff --git a/clang/test/CodeGen/attr-target-clones-aarch64.c b/clang/test/CodeGen/attr-target-clones-aarch64.c
index 94095f9aa3e1..8c8b951e9118 100644
--- a/clang/test/CodeGen/attr-target-clones-aarch64.c
+++ b/clang/test/CodeGen/attr-target-clones-aarch64.c
@@ -29,8 +29,8 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK: @ftc_def.ifunc = weak_odr alias i32 (), ptr @ftc_def
// CHECK: @ftc_dup1.ifunc = weak_odr alias i32 (), ptr @ftc_dup1
// CHECK: @ftc_dup2.ifunc = weak_odr alias i32 (), ptr @ftc_dup2
-// CHECK: @ftc_inline1.ifunc = weak_odr alias i32 (), ptr @ftc_inline1
// CHECK: @ftc_inline2.ifunc = weak_odr alias i32 (), ptr @ftc_inline2
+// CHECK: @ftc_inline1.ifunc = weak_odr alias i32 (), ptr @ftc_inline1
// CHECK: @ftc_inline3.ifunc = weak_odr alias i32 (), ptr @ftc_inline3
// CHECK: @ftc = weak_odr ifunc i32 (), ptr @ftc.resolver
// CHECK: @ftc_def = weak_odr ifunc i32 (), ptr @ftc_def.resolver
@@ -52,12 +52,6 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: ret i32 0
//
//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 0
-//
-//
// CHECK-LABEL: @ftc.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -92,12 +86,6 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: ret i32 1
//
//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_def.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
-//
-//
// CHECK-LABEL: @ftc_def.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -126,12 +114,6 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: ret i32 2
//
//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_dup1.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 2
-//
-//
// CHECK-LABEL: @ftc_dup1.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -158,12 +140,6 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
// CHECK-NEXT: ret i32 3
//
//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_dup2.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 3
-//
-//
// CHECK-LABEL: @ftc_dup2.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -192,6 +168,12 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: @ftc_inline2._Mfp16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: @ftc_direct(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
@@ -287,45 +269,63 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline1._MrngMsimd(
+// CHECK-LABEL: @ftc_inline2._MfcmaMsve2-bitperm(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline1._MpredresMrcpc(
+// CHECK-LABEL: @ftc.default(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline1._Msve2-aesMwfxt(
+// CHECK-LABEL: @ftc_def.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline1.default(
+// CHECK-LABEL: @ftc_dup1.default(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline2._Mfp16(
+// CHECK-LABEL: @ftc_dup2.default(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 2
+// CHECK-NEXT: ret i32 3
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline2._MfcmaMsve2-bitperm(
+// CHECK-LABEL: @ftc_inline2.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: @ftc_inline2.default(
+// CHECK-LABEL: @ftc_inline1._MrngMsimd(
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 2
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: @ftc_inline1._MpredresMrcpc(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: @ftc_inline1._Msve2-aesMwfxt(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: @ftc_inline1.default(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
@@ -406,16 +406,16 @@ inline int __attribute__((target_clones("fp16", "sve2-bitperm+fcma", "default"))
//.
// CHECK: attributes #[[ATTR0:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+lse,+neon" }
// CHECK: attributes #[[ATTR1:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve,+sve2" }
-// CHECK: attributes #[[ATTR2:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
-// CHECK: attributes #[[ATTR3:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+sha2" }
-// CHECK: attributes #[[ATTR4:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+mte,+neon,+sha2" }
-// CHECK: attributes #[[ATTR5:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon" }
-// CHECK: attributes #[[ATTR6:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc,+dotprod,+fp-armv8,+neon" }
-// CHECK: attributes #[[ATTR7:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+rand" }
-// CHECK: attributes #[[ATTR8:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+predres,+rcpc" }
-// CHECK: attributes #[[ATTR9:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve,+sve2,+sve2-aes,+wfxt" }
-// CHECK: attributes #[[ATTR10:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
-// CHECK: attributes #[[ATTR11:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+complxnum,+fp-armv8,+fullfp16,+neon,+sve,+sve2,+sve2-bitperm" }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+sha2" }
+// CHECK: attributes #[[ATTR3:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+mte,+neon,+sha2" }
+// CHECK: attributes #[[ATTR4:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon" }
+// CHECK: attributes #[[ATTR5:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc,+dotprod,+fp-armv8,+neon" }
+// CHECK: attributes #[[ATTR6:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR7:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
+// CHECK: attributes #[[ATTR8:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+complxnum,+fp-armv8,+fullfp16,+neon,+sve,+sve2,+sve2-bitperm" }
+// CHECK: attributes #[[ATTR9:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+rand" }
+// CHECK: attributes #[[ATTR10:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+predres,+rcpc" }
+// CHECK: attributes #[[ATTR11:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve,+sve2,+sve2-aes,+wfxt" }
// CHECK: attributes #[[ATTR12:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bti" }
// CHECK: attributes #[[ATTR13:[0-9]+]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sb,+sve" }
//.
diff --git a/clang/test/CodeGen/attr-target-version.c b/clang/test/CodeGen/attr-target-version.c
index 25129605e76c..dd4cbbf5a898 100644
--- a/clang/test/CodeGen/attr-target-version.c
+++ b/clang/test/CodeGen/attr-target-version.c
@@ -109,21 +109,47 @@ int unused_with_implicit_default_def(void) { return 1; }
int unused_with_implicit_forward_default_def(void) { return 0; }
__attribute__((target_version("lse"))) int unused_with_implicit_forward_default_def(void) { return 1; }
-// This should generate a normal function.
+// This should generate a target version despite the default not being declared.
__attribute__((target_version("rdm"))) int unused_without_default(void) { return 0; }
+// These shouldn't generate anything.
+int unused_version_declarations(void);
+__attribute__((target_version("jscvt"))) int unused_version_declarations(void);
+__attribute__((target_version("rdma"))) int unused_version_declarations(void);
+
+// These should generate the default (mangled) version and the resolver.
+int default_def_with_version_decls(void) { return 0; }
+__attribute__((target_version("jscvt"))) int default_def_with_version_decls(void);
+__attribute__((target_version("rdma"))) int default_def_with_version_decls(void);
+
+// The following is guarded because in NOFMV we get errors for calling undeclared functions.
+#ifdef __HAVE_FUNCTION_MULTI_VERSIONING
+// This should generate a default declaration, two target versions and the resolver.
+__attribute__((target_version("jscvt"))) int used_def_without_default_decl(void) { return 1; }
+__attribute__((target_version("rdma"))) int used_def_without_default_decl(void) { return 2; }
+
+// This should generate a default declaration and the resolver.
+__attribute__((target_version("jscvt"))) int used_decl_without_default_decl(void);
+__attribute__((target_version("rdma"))) int used_decl_without_default_decl(void);
+
+int caller(void) { return used_def_without_default_decl() + used_decl_without_default_decl(); }
+#endif
+
//.
// CHECK: @__aarch64_cpu_features = external dso_local global { i64 }
// CHECK: @fmv.ifunc = weak_odr alias i32 (), ptr @fmv
// CHECK: @fmv_one.ifunc = weak_odr alias i32 (), ptr @fmv_one
// CHECK: @fmv_two.ifunc = weak_odr alias i32 (), ptr @fmv_two
// CHECK: @fmv_e.ifunc = weak_odr alias i32 (), ptr @fmv_e
+// CHECK: @fmv_d.ifunc = internal alias i32 (), ptr @fmv_d
// CHECK: @fmv_c.ifunc = weak_odr alias void (), ptr @fmv_c
// CHECK: @fmv_inline.ifunc = weak_odr alias i32 (), ptr @fmv_inline
-// CHECK: @fmv_d.ifunc = internal alias i32 (), ptr @fmv_d
// CHECK: @unused_with_default_def.ifunc = weak_odr alias i32 (), ptr @unused_with_default_def
// CHECK: @unused_with_implicit_default_def.ifunc = weak_odr alias i32 (), ptr @unused_with_implicit_default_def
// CHECK: @unused_with_implicit_forward_default_def.ifunc = weak_odr alias i32 (), ptr @unused_with_implicit_forward_default_def
+// CHECK: @default_def_with_version_decls.ifunc = weak_odr alias i32 (), ptr @default_def_with_version_decls
+// CHECK: @used_def_without_default_decl.ifunc = weak_odr alias i32 (), ptr @used_def_without_default_decl
+// CHECK: @used_decl_without_default_decl.ifunc = weak_odr alias i32 (), ptr @used_decl_without_default_decl
// CHECK: @fmv = weak_odr ifunc i32 (), ptr @fmv.resolver
// CHECK: @fmv_one = weak_odr ifunc i32 (), ptr @fmv_one.resolver
// CHECK: @fmv_two = weak_odr ifunc i32 (), ptr @fmv_two.resolver
@@ -131,97 +157,121 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
// CHECK: @fmv_e = weak_odr ifunc i32 (), ptr @fmv_e.resolver
// CHECK: @fmv_d = internal ifunc i32 (), ptr @fmv_d.resolver
// CHECK: @fmv_c = weak_odr ifunc void (), ptr @fmv_c.resolver
+// CHECK: @used_def_without_default_decl = weak_odr ifunc i32 (), ptr @used_def_without_default_decl.resolver
+// CHECK: @used_decl_without_default_decl = weak_odr ifunc i32 (), ptr @used_decl_without_default_decl.resolver
// CHECK: @unused_with_default_def = weak_odr ifunc i32 (), ptr @unused_with_default_def.resolver
// CHECK: @unused_with_implicit_default_def = weak_odr ifunc i32 (), ptr @unused_with_implicit_default_def.resolver
// CHECK: @unused_with_implicit_forward_default_def = weak_odr ifunc i32 (), ptr @unused_with_implicit_forward_default_def.resolver
+// CHECK: @default_def_with_version_decls = weak_odr ifunc i32 (), ptr @default_def_with_version_decls.resolver
//.
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv._Mflagm2Msme-i16i64
+// CHECK-LABEL: define {{[^@]+}}@fmv._MflagmMfp16fmlMrng
// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@fmv._Mflagm2Msme-i16i64
+// CHECK-SAME: () #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._MlseMsha2
-// CHECK-SAME: () #[[ATTR1:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._MdotprodMls64_accdata
-// CHECK-SAME: () #[[ATTR2:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._Mfp16fmlMmemtag
-// CHECK-SAME: () #[[ATTR3:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR4:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 5
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._MaesMfp
-// CHECK-SAME: () #[[ATTR4:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR5:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 6
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._McrcMls64_v
-// CHECK-SAME: () #[[ATTR5:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 7
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._Mbti
-// CHECK-SAME: () #[[ATTR6:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 8
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv._Msme2
-// CHECK-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR8:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 9
//
//
// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@fmv_one._Mls64Msimd
+// CHECK-SAME: () #[[ATTR5]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_one._Mdpb
-// CHECK-SAME: () #[[ATTR8:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR10:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@fmv_two._Mfp
+// CHECK-SAME: () #[[ATTR5]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_two._Msimd
-// CHECK-SAME: () #[[ATTR4]] {
+// CHECK-SAME: () #[[ATTR5]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_two._Mdgh
-// CHECK-SAME: () #[[ATTR9:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR11:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_two._Mfp16Msimd
-// CHECK-SAME: () #[[ATTR10:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR12:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@foo
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CALL:%.*]] = call i32 @fmv()
// CHECK-NEXT: [[CALL1:%.*]] = call i32 @fmv_one()
@@ -371,35 +421,49 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_e.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 20
//
//
// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@fmv_d._Msb
+// CHECK-SAME: () #[[ATTR13:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 0
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@fmv_d.default
+// CHECK-SAME: () #[[ATTR11]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 111
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_c._Mssbs
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_c.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@goo
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CALL:%.*]] = call i32 @fmv_inline()
// CHECK-NEXT: [[CALL1:%.*]] = call i32 @fmv_e()
@@ -587,7 +651,7 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@recur
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @reca()
// CHECK-NEXT: ret void
@@ -595,7 +659,7 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@main
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4
@@ -606,7 +670,7 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@hoo
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FP1:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[FP2:%.*]] = alloca ptr, align 8
@@ -623,228 +687,268 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@unused_with_forward_default_decl._Mmops
-// CHECK-SAME: () #[[ATTR12:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR14:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_extern_forward_default_decl._Mdotprod
-// CHECK-SAME: () #[[ATTR13:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR15:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_default_def.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_default_decl._Maes
+// CHECK-SAME: () #[[ATTR5]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_default_def.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_default_def._Msve
+// CHECK-SAME: () #[[ATTR16:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 0
+//
+//
+// CHECK: Function Attrs: noinline nounwind optnone
+// CHECK-LABEL: define {{[^@]+}}@unused_with_default_def.default
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_forward_default_def.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_default_def._Mfp16
+// CHECK-SAME: () #[[ATTR12]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_forward_default_def._Mlse
-// CHECK-SAME: () #[[ATTR14:[0-9]+]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_default_def.default
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv._MflagmMfp16fmlMrng
-// CHECK-SAME: () #[[ATTR15:[0-9]+]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_forward_default_def.default
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_one._Mls64Msimd
-// CHECK-SAME: () #[[ATTR4]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_forward_default_def._Mlse
+// CHECK-SAME: () #[[ATTR17:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_two._Mfp
-// CHECK-SAME: () #[[ATTR4]] {
+// CHECK-LABEL: define {{[^@]+}}@unused_without_default._Mrdm
+// CHECK-SAME: () #[[ATTR18:[0-9]+]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
+// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_default_decl._Maes
-// CHECK-SAME: () #[[ATTR4]] {
+// CHECK-LABEL: define {{[^@]+}}@default_def_with_version_decls.default
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 0
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_default_def._Msve
-// CHECK-SAME: () #[[ATTR16:[0-9]+]] {
+// CHECK-LABEL: define {{[^@]+}}@used_def_without_default_decl._Mjscvt
+// CHECK-SAME: () #[[ATTR21:[0-9]+]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 0
+// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_with_implicit_default_def._Mfp16
-// CHECK-SAME: () #[[ATTR10]] {
+// CHECK-LABEL: define {{[^@]+}}@used_def_without_default_decl._Mrdm
+// CHECK-SAME: () #[[ATTR18]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 0
+// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@unused_without_default
-// CHECK-SAME: () #[[ATTR17:[0-9]+]] {
+// CHECK-LABEL: define {{[^@]+}}@caller
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 0
+// CHECK-NEXT: [[CALL:%.*]] = call i32 @used_def_without_default_decl()
+// CHECK-NEXT: [[CALL1:%.*]] = call i32 @used_decl_without_default_decl()
+// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], [[CALL1]]
+// CHECK-NEXT: ret i32 [[ADD]]
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@used_def_without_default_decl.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @used_def_without_default_decl._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 64
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 64
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @used_def_without_default_decl._Mrdm
+// CHECK: resolver_else2:
+// CHECK-NEXT: ret ptr @used_def_without_default_decl.default
+//
+//
+// CHECK-LABEL: define {{[^@]+}}@used_decl_without_default_decl.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @used_decl_without_default_decl._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 64
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 64
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @used_decl_without_default_decl._Mrdm
+// CHECK: resolver_else2:
+// CHECK-NEXT: ret ptr @used_decl_without_default_decl.default
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mf64mmMpmullMsha1
-// CHECK-SAME: () #[[ATTR18:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR22:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MfcmaMfp16MrdmMsme
-// CHECK-SAME: () #[[ATTR19:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR23:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mf32mmMi8mmMsha3
-// CHECK-SAME: () #[[ATTR20:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR24:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 12
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MditMsve-ebf16
-// CHECK-SAME: () #[[ATTR21:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR25:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 8
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MdpbMrcpc2
-// CHECK-SAME: () #[[ATTR22:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR26:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 6
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mdpb2Mjscvt
-// CHECK-SAME: () #[[ATTR23:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR27:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 7
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MfrinttsMrcpc
-// CHECK-SAME: () #[[ATTR24:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR28:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MsveMsve-bf16
-// CHECK-SAME: () #[[ATTR25:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR29:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msve2-aesMsve2-sha3
-// CHECK-SAME: () #[[ATTR26:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR30:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 5
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Msve2Msve2-bitpermMsve2-pmull128
-// CHECK-SAME: () #[[ATTR27:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR31:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 9
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mmemtag2Msve2-sm4
-// CHECK-SAME: () #[[ATTR28:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR32:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 10
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mmemtag3MmopsMrcpc3
-// CHECK-SAME: () #[[ATTR29:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR33:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 11
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MaesMdotprod
-// CHECK-SAME: () #[[ATTR13]] {
+// CHECK-SAME: () #[[ATTR15]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 13
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._Mfp16fmlMsimd
-// CHECK-SAME: () #[[ATTR3]] {
+// CHECK-SAME: () #[[ATTR4]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 14
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MfpMsm4
-// CHECK-SAME: () #[[ATTR30:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR34:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 15
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline._MlseMrdm
-// CHECK-SAME: () #[[ATTR31:[0-9]+]] {
+// CHECK-SAME: () #[[ATTR35:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 16
//
//
// CHECK: Function Attrs: noinline nounwind optnone
// CHECK-LABEL: define {{[^@]+}}@fmv_inline.default
-// CHECK-SAME: () #[[ATTR9]] {
+// CHECK-SAME: () #[[ATTR11]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 3
//
//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_d._Msb
-// CHECK-SAME: () #[[ATTR32:[0-9]+]] {
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 0
-//
-//
-// CHECK: Function Attrs: noinline nounwind optnone
-// CHECK-LABEL: define {{[^@]+}}@fmv_d.default
-// CHECK-SAME: () #[[ATTR9]] {
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 1
-//
-//
// CHECK-LABEL: define {{[^@]+}}@unused_with_default_def.resolver() comdat {
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -887,6 +991,28 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
// CHECK-NEXT: ret ptr @unused_with_implicit_forward_default_def.default
//
//
+// CHECK-LABEL: define {{[^@]+}}@default_def_with_version_decls.resolver() comdat {
+// CHECK-NEXT: resolver_entry:
+// CHECK-NEXT: call void @__init_cpu_features_resolver()
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1048576
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 1048576
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[RESOLVER_RETURN:%.*]], label [[RESOLVER_ELSE:%.*]]
+// CHECK: resolver_return:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls._Mjscvt
+// CHECK: resolver_else:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 64
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 64
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[RESOLVER_RETURN1:%.*]], label [[RESOLVER_ELSE2:%.*]]
+// CHECK: resolver_return1:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls._Mrdm
+// CHECK: resolver_else2:
+// CHECK-NEXT: ret ptr @default_def_with_version_decls.default
+//
+//
// CHECK-NOFMV: Function Attrs: noinline nounwind optnone
// CHECK-NOFMV-LABEL: define {{[^@]+}}@foo
// CHECK-NOFMV-SAME: () #[[ATTR0:[0-9]+]] {
@@ -995,40 +1121,50 @@ __attribute__((target_version("rdm"))) int unused_without_default(void) { return
// CHECK-NOFMV-NEXT: entry:
// CHECK-NOFMV-NEXT: ret i32 0
//
+//
+// CHECK-NOFMV: Function Attrs: noinline nounwind optnone
+// CHECK-NOFMV-LABEL: define {{[^@]+}}@default_def_with_version_decls
+// CHECK-NOFMV-SAME: () #[[ATTR0]] {
+// CHECK-NOFMV-NEXT: entry:
+// CHECK-NOFMV-NEXT: ret i32 0
+//
//.
-// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+altnzcv,+bf16,+flagm,+sme,+sme-i16i64,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR1]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,+neon,+sha2,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR2]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+ls64,+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR3]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp16fml,+fullfp16,+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR4]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR5]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR6]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bti,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR7]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme2,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR8]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR9]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR10]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR11:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR12]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR13]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR14]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR15]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+flagm,+fp16fml,+fullfp16,+neon,+rand,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+flagm,+fp16fml,+fullfp16,+neon,+rand,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR1]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+altnzcv,+bf16,+flagm,+sme,+sme-i16i64,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR2]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,+neon,+sha2,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR3]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+ls64,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR4]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp16fml,+fullfp16,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR5]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR6]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR7]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bti,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR8]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme2,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR9:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR10]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR11]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR12]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR13]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+sb,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR14]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR15]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+neon,-fp-armv8,-v9.5a" }
// CHECK: attributes #[[ATTR16]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR17]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,+rdm,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR18]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+aes,+f64mm,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR19]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+complxnum,+fullfp16,+neon,+rdm,+sme,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR20]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+f32mm,+fullfp16,+i8mm,+neon,+sha2,+sha3,+sve,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR21]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+dit,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR22]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,+rcpc,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR23]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccdp,+ccpp,+jsconv,+neon,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR24]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fptoint,+rcpc,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR25]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR26]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,+sve,+sve2,+sve2-aes,+sve2-sha3,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR27]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,+sve,+sve2,+sve2-aes,+sve2-bitperm,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR28]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+mte,+neon,+sve,+sve2,+sve2-sm4,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR29]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops,+mte,+rcpc,+rcpc3,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR30]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,+sm4,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR31]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,+neon,+rdm,-fp-armv8,-v9.5a" }
-// CHECK: attributes #[[ATTR32]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+sb,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR17]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR18]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,+rdm,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR19:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+jsconv,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR20:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,+rdm,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR21]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+jsconv,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR22]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+aes,+f64mm,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR23]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+complxnum,+fullfp16,+neon,+rdm,+sme,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR24]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+f32mm,+fullfp16,+i8mm,+neon,+sha2,+sha3,+sve,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR25]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+dit,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR26]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccpp,+rcpc,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR27]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ccdp,+ccpp,+jsconv,+neon,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR28]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fptoint,+rcpc,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR29]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+fullfp16,+neon,+sve,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR30]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,+sve,+sve2,+sve2-aes,+sve2-sha3,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR31]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+neon,+sve,+sve2,+sve2-aes,+sve2-bitperm,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR32]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fullfp16,+mte,+neon,+sve,+sve2,+sve2-sm4,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR33]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops,+mte,+rcpc,+rcpc3,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR34]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon,+sm4,-fp-armv8,-v9.5a" }
+// CHECK: attributes #[[ATTR35]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse,+neon,+rdm,-fp-armv8,-v9.5a" }
//.
// CHECK-NOFMV: attributes #[[ATTR0]] = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fmv" }
// CHECK-NOFMV: attributes #[[ATTR1:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="-fmv" }
diff --git a/clang/test/CodeGen/bitfield-2.c b/clang/test/CodeGen/bitfield-2.c
index 3e0b30c7a17d..8688ba6390dd 100644
--- a/clang/test/CodeGen/bitfield-2.c
+++ b/clang/test/CodeGen/bitfield-2.c
@@ -271,11 +271,11 @@ _Bool test_6(void) {
// CHECK-RECORD: *** Dumping IRgen Record Layout
// CHECK-RECORD: Record: RecordDecl{{.*}}s7
// CHECK-RECORD: Layout: <CGRecordLayout
-// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
+// CHECK-RECORD: LLVMType:%struct.s7 = type <{ i32, i32, i32, i64, [12 x i8] }>
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:64 StorageOffset:12
+// CHECK-RECORD: <CGBitFieldInfo Offset:32 Size:29 IsSigned:1 StorageSize:64 StorageOffset:12
struct __attribute__((aligned(16))) s7 {
int a, b, c;
diff --git a/clang/test/CodeGen/bitfield-access-pad.c b/clang/test/CodeGen/bitfield-access-pad.c
new file mode 100644
index 000000000000..edda7b7798d0
--- /dev/null
+++ b/clang/test/CodeGen/bitfield-access-pad.c
@@ -0,0 +1,396 @@
+// Tests for bitfield access with zero-length bitfield padding
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-T %s
+
+// And now a few with -fno-bitfield-type-align. Precisely how this behaves is
+// ABI-dependent.
+// Cheap unaligned
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-ARM64-T %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-NT %s
+
+// Expensive unaligned
+// RUN: %clang_cc1 -triple=hexagon-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
+// RUN: %clang_cc1 -triple=mips-elf -fno-bitfield-type-align %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-STRICT-NT %s
+
+
+struct P1 {
+ unsigned a :8;
+ char :0;
+ unsigned b :8;
+} p1;
+// CHECK-LABEL: LLVMType:%struct.P1 =
+// LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, i8 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+// This will often be align(1) with -fno-bitfield-type-align
+struct P2 {
+ unsigned a :8;
+ char :0;
+ short :0;
+ unsigned b :8;
+} p2;
+// CHECK-LABEL: LLVMType:%struct.P2 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P3 {
+ unsigned a :8;
+ char :0;
+ short :0;
+ unsigned :0;
+ unsigned b :8;
+} p3;
+// CHECK-LABEL: LLVMType:%struct.P3 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P4 {
+ unsigned a :8;
+ short :0;
+ unsigned :0;
+ unsigned b :8;
+} p4;
+// CHECK-LABEL: LLVMType:%struct.P4 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P5 {
+ unsigned a :8;
+ unsigned :0;
+ unsigned b :8;
+} p5;
+// CHECK-LABEL: LLVMType:%struct.P5 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P6 {
+ unsigned a :8;
+ unsigned :0;
+ short :0;
+ char :0;
+ unsigned b :8;
+} p6;
+// CHECK-LABEL: LLVMType:%struct.P6 =
+// LAYOUT-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P7 {
+ unsigned a : 8;
+ short : 0;
+ unsigned char b : 8;
+} p7;
+// CHECK-LABEL: LLVMType:%struct.P7 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+// And with forced alignment for !useZeroLengthBitfieldAlignment machines (eg
+// hexagon)
+struct __attribute__ ((aligned (2))) P7_align {
+ unsigned a : 8;
+ short : 0;
+ unsigned char b : 8;
+} p7_align;
+// CHECK-LABEL: LLVMType:%struct.P7_align =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i8, i8 }
+// LAYOUT-STRICT-NT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P8 {
+ unsigned a : 7;
+ short : 0;
+ unsigned char b : 7;
+} p8;
+// CHECK-LABEL: LLVMType:%struct.P8 =
+// LAYOUT-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-ARM64-T-SAME: type { i8, i8, i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct P9 {
+ unsigned a : 7;
+ char : 0;
+ unsigned short b : 7;
+} p9;
+// CHECK-LABEL: LLVMType:%struct.P9 =
+// LAYOUT-T-SAME: type { i8, i8, [2 x i8] }
+// LAYOUT-ARM64-T-SAME: type { i8, i8 }
+// LAYOUT-NT-SAME: type { i16 }
+// LAYOUT-STRICT-NT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i8, [3 x i8], i8, [3 x i8] }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:16 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct __attribute__((aligned(4))) P10 {
+ unsigned a : 7;
+ unsigned short b : 7;
+ unsigned c : 7;
+ char : 0;
+} p10;
+// CHECK-LABEL: LLVMType:%struct.P10 =
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct __attribute__((aligned(4))) P11 {
+ unsigned a : 7;
+ unsigned short b : 7;
+ unsigned c : 10;
+ char : 0; // at a char boundary
+} p11;
+// CHECK-LABEL: LLVMType:%struct.P11 =
+// LAYOUT-T-SAME: type { i32 }
+// LAYOUT-ARM64-T-SAME: type { i32 }
+// LAYOUT-NT-SAME: type { i32 }
+// LAYOUT-STRICT-NT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type { i32 }
+// CHECK: BitFields:[
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-ARM64-T-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:10 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGen/bitfield-access-unit.c b/clang/test/CodeGen/bitfield-access-unit.c
new file mode 100644
index 000000000000..1aed2e7202fc
--- /dev/null
+++ b/clang/test/CodeGen/bitfield-access-unit.c
@@ -0,0 +1,302 @@
+// Check arches with 32bit ints. (Not you, AVR & MSP430)
+
+// Configs that have cheap unaligned access
+
+// 64-bit Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64-DWN %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64 %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64 %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+
+// 64-bit Big Endian
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX64,CHECK-64,LAYOUT-64,LAYOUT-64-FLEX %s
+
+// 32-bit Little Endian
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32,LAYOUT-DWN32-FLEX %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+
+// 32-bit Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-FLEX,LAYOUT-FLEX32 %s
+
+// Configs that have expensive unaligned access
+// 64-bit Little Endian
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf -target-feature -ual %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+
+// 64-big Big endian
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT,CHECK-64,LAYOUT-64,LAYOUT-64-STRICT %s
+
+// 32-bit Little Endian
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32,LAYOUT-DWN32-STRICT %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -target-feature +strict-align -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+
+// 32-bit Big Endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT,LAYOUT-STRICT %s
+
+// Both le64-elf and m68-elf are strict alignment ISAs with 4-byte aligned
+// 64-bit or 2-byte aligned 32-bit integer types. This more compex to describe here.
+
+// If unaligned access is expensive don't stick these together.
+struct A {
+ char a : 7;
+ char b : 7;
+} a;
+// CHECK-LABEL: LLVMType:%struct.A =
+// LAYOUT-FLEX-SAME: type { i16 }
+// LAYOUT-STRICT-SAME: type { i8, i8 }
+// LAYOUT-DWN32-SAME: type { i16 }
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:1
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// But do here.
+struct __attribute__((aligned(2))) B {
+ char a : 7;
+ char b : 7;
+} b;
+// CHECK-LABEL: LLVMType:%struct.B =
+// LAYOUT-SAME: type { i16 }
+// LAYOUT-DWN32-SAME: type { i16 }
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:0
+// CHECK-NEXT: ]>
+
+// Not here -- poor alignment within struct
+struct C {
+ int f1;
+ char f2;
+ char a : 7;
+ char b : 7;
+} c;
+// CHECK-LABEL: LLVMType:%struct.C =
+// LAYOUT-FLEX-SAME: type <{ i32, i8, i16, i8 }>
+// LAYOUT-STRICT-SAME: type { i32, i8, i8, i8 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8, i16, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:5
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:16 StorageOffset:5
+// CHECK-NEXT: ]>
+
+// Not here, we're packed
+struct __attribute__((packed)) D {
+ int f1;
+ int a : 8;
+ int b : 8;
+ char _;
+} d;
+// CHECK-LABEL: LLVMType:%struct.D =
+// LAYOUT-FLEX-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-STRICT-SAME: type <{ i32, i8, i8, i8 }>
+// LAYOUT-DWN32-FLEX-SAME: type <{ i32, i16, i8 }>
+// LAYOUT-DWN32-STRICT-SAME: type <{ i32, i8, i8, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
+
+// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:16 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:4
+// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:5
+// CHECK-NEXT: ]>
+
+struct E {
+ char a : 7;
+ short b : 13;
+ unsigned c : 12;
+} e;
+// CHECK-LABEL: LLVMType:%struct.E =
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i16 }
+// LAYOUT-STRICT-SAME: type { i32, i16 }
+// LAYOUT-DWN32-SAME: type { i32 }
+// CHECK: BitFields:[
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
+
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct F {
+ char a : 7;
+ short b : 13;
+ unsigned c : 12;
+ signed char d : 7;
+} f;
+// CHECK-LABEL: LLVMType:%struct.F =
+// LAYOUT-FLEX64-SAME: type { i64 }
+// LAYOUT-FLEX32-SAME: type { i32, i32 }
+// LAYOUT-STRICT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-FLEX64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-FLEX32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
+
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:4
+// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+struct G {
+ char a : 7;
+ short b : 13;
+ unsigned c : 12;
+ signed char d : 7;
+ signed char e;
+} g;
+// CHECK-LABEL: LLVMType:%struct.G =
+// LAYOUT-SAME: type { i32, i16, i8, i8 }
+// LAYOUT-DWN32-SAME: type <{ i32, i8, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:16 StorageOffset:4
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:6
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:13 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:12 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
+// CHECK-NEXT: ]>
+
+#if _LP64
+struct A64 {
+ int a : 16;
+ short b : 8;
+ long c : 16;
+ int d : 16;
+ signed char e : 8;
+} a64;
+// CHECK-64-LABEL: LLVMType:%struct.A64 =
+// LAYOUT-64-SAME: type { i64 }
+// LAYOUT-64-DWN-SAME: type { i64 }
+// CHECK-64: BitFields:[
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// CHECK-64-NEXT: ]>
+
+struct B64 {
+ int a : 16;
+ short b : 8;
+ long c : 16;
+ int d : 16;
+ signed char e; // not a bitfield
+} b64;
+// CHECK-64-LABEL: LLVMType:%struct.B64 =
+// LAYOUT-64-FLEX-SAME: type <{ i16, i8, i32, i8 }>
+// LAYOUT-64-STRICT-SAME: type <{ i16, i8, i16, i16, i8 }>
+// LAYOUT-64-DWN-SAME: type <{ i16, i8, i32, i8 }>
+// CHECK-64: BitFields:[
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:3
+// LAYOUT-64-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:5
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:3
+// CHECK-64-NEXT: ]>
+
+struct C64 {
+ int a : 15;
+ short b : 8;
+ long c : 16;
+ int d : 15;
+ signed char e : 7;
+} c64;
+// CHECK-64-LABEL: LLVMType:%struct.C64 =
+// LAYOUT-64-SAME: type { i64 }
+// LAYOUT-64-DWN-SAME: type { i64 }
+// CHECK-64: BitFields:[
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:1 StorageSize:64 StorageOffset:0
+// LAYOUT-64-DWN-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:64 StorageOffset:0
+// CHECK-64-NEXT: ]>
+
+#endif
diff --git a/clang/test/CodeGen/builtins.c b/clang/test/CodeGen/builtins.c
index 4f9641d357b7..407e0857d223 100644
--- a/clang/test/CodeGen/builtins.c
+++ b/clang/test/CodeGen/builtins.c
@@ -983,4 +983,208 @@ void test_builtin_popcountg(unsigned char uc, unsigned short us,
// CHECK-NEXT: ret void
}
+// CHECK-LABEL: define{{.*}} void @test_builtin_clzg
+void test_builtin_clzg(unsigned char uc, unsigned short us, unsigned int ui,
+ unsigned long ul, unsigned long long ull,
+ unsigned __int128 ui128, unsigned _BitInt(128) ubi128,
+ signed char sc, short s, int i) {
+ volatile int lz;
+ lz = __builtin_clzg(uc);
+ // CHECK: %1 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %2 = call i8 @llvm.ctlz.i8(i8 %1, i1 true)
+ // CHECK-NEXT: %cast = sext i8 %2 to i32
+ // CHECK-NEXT: store volatile i32 %cast, ptr %lz, align 4
+ lz = __builtin_clzg(us);
+ // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %4 = call i16 @llvm.ctlz.i16(i16 %3, i1 true)
+ // CHECK-NEXT: %cast1 = sext i16 %4 to i32
+ // CHECK-NEXT: store volatile i32 %cast1, ptr %lz, align 4
+ lz = __builtin_clzg(ui);
+ // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %6 = call i32 @llvm.ctlz.i32(i32 %5, i1 true)
+ // CHECK-NEXT: store volatile i32 %6, ptr %lz, align 4
+ lz = __builtin_clzg(ul);
+ // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %8 = call i64 @llvm.ctlz.i64(i64 %7, i1 true)
+ // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %lz, align 4
+ lz = __builtin_clzg(ull);
+ // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %10 = call i64 @llvm.ctlz.i64(i64 %9, i1 true)
+ // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %lz, align 4
+ lz = __builtin_clzg(ui128);
+ // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %12 = call i128 @llvm.ctlz.i128(i128 %11, i1 true)
+ // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %lz, align 4
+ lz = __builtin_clzg(ubi128);
+ // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %14 = call i128 @llvm.ctlz.i128(i128 %13, i1 true)
+ // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %lz, align 4
+ lz = __builtin_clzg(uc, sc);
+ // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %16 = call i8 @llvm.ctlz.i8(i8 %15, i1 true)
+ // CHECK-NEXT: %cast6 = sext i8 %16 to i32
+ // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
+ // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
+ // CHECK-NEXT: %conv = sext i8 %17 to i32
+ // CHECK-NEXT: %clzg = select i1 %iszero, i32 %conv, i32 %cast6
+ // CHECK-NEXT: store volatile i32 %clzg, ptr %lz, align 4
+ lz = __builtin_clzg(us, uc);
+ // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %19 = call i16 @llvm.ctlz.i16(i16 %18, i1 true)
+ // CHECK-NEXT: %cast7 = sext i16 %19 to i32
+ // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
+ // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %conv9 = zext i8 %20 to i32
+ // CHECK-NEXT: %clzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
+ // CHECK-NEXT: store volatile i32 %clzg10, ptr %lz, align 4
+ lz = __builtin_clzg(ui, s);
+ // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %22 = call i32 @llvm.ctlz.i32(i32 %21, i1 true)
+ // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
+ // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
+ // CHECK-NEXT: %conv12 = sext i16 %23 to i32
+ // CHECK-NEXT: %clzg13 = select i1 %iszero11, i32 %conv12, i32 %22
+ // CHECK-NEXT: store volatile i32 %clzg13, ptr %lz, align 4
+ lz = __builtin_clzg(ul, us);
+ // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %25 = call i64 @llvm.ctlz.i64(i64 %24, i1 true)
+ // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
+ // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
+ // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %conv16 = zext i16 %26 to i32
+ // CHECK-NEXT: %clzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
+ // CHECK-NEXT: store volatile i32 %clzg17, ptr %lz, align 4
+ lz = __builtin_clzg(ull, i);
+ // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %28 = call i64 @llvm.ctlz.i64(i64 %27, i1 true)
+ // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
+ // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
+ // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg20 = select i1 %iszero19, i32 %29, i32 %cast18
+ // CHECK-NEXT: store volatile i32 %clzg20, ptr %lz, align 4
+ lz = __builtin_clzg(ui128, i);
+ // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %31 = call i128 @llvm.ctlz.i128(i128 %30, i1 true)
+ // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
+ // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
+ // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg23 = select i1 %iszero22, i32 %32, i32 %cast21
+ // CHECK-NEXT: store volatile i32 %clzg23, ptr %lz, align 4
+ lz = __builtin_clzg(ubi128, i);
+ // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %34 = call i128 @llvm.ctlz.i128(i128 %33, i1 true)
+ // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
+ // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
+ // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %clzg26 = select i1 %iszero25, i32 %35, i32 %cast24
+ // CHECK-NEXT: store volatile i32 %clzg26, ptr %lz, align 4
+ // CHECK-NEXT: ret void
+}
+
+// CHECK-LABEL: define{{.*}} void @test_builtin_ctzg
+void test_builtin_ctzg(unsigned char uc, unsigned short us, unsigned int ui,
+ unsigned long ul, unsigned long long ull,
+ unsigned __int128 ui128, unsigned _BitInt(128) ubi128,
+ signed char sc, short s, int i) {
+ volatile int tz;
+ tz = __builtin_ctzg(uc);
+ // CHECK: %1 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %2 = call i8 @llvm.cttz.i8(i8 %1, i1 true)
+ // CHECK-NEXT: %cast = sext i8 %2 to i32
+ // CHECK-NEXT: store volatile i32 %cast, ptr %tz, align 4
+ tz = __builtin_ctzg(us);
+ // CHECK-NEXT: %3 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %4 = call i16 @llvm.cttz.i16(i16 %3, i1 true)
+ // CHECK-NEXT: %cast1 = sext i16 %4 to i32
+ // CHECK-NEXT: store volatile i32 %cast1, ptr %tz, align 4
+ tz = __builtin_ctzg(ui);
+ // CHECK-NEXT: %5 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %6 = call i32 @llvm.cttz.i32(i32 %5, i1 true)
+ // CHECK-NEXT: store volatile i32 %6, ptr %tz, align 4
+ tz = __builtin_ctzg(ul);
+ // CHECK-NEXT: %7 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %8 = call i64 @llvm.cttz.i64(i64 %7, i1 true)
+ // CHECK-NEXT: %cast2 = trunc i64 %8 to i32
+ // CHECK-NEXT: store volatile i32 %cast2, ptr %tz, align 4
+ tz = __builtin_ctzg(ull);
+ // CHECK-NEXT: %9 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %10 = call i64 @llvm.cttz.i64(i64 %9, i1 true)
+ // CHECK-NEXT: %cast3 = trunc i64 %10 to i32
+ // CHECK-NEXT: store volatile i32 %cast3, ptr %tz, align 4
+ tz = __builtin_ctzg(ui128);
+ // CHECK-NEXT: %11 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %12 = call i128 @llvm.cttz.i128(i128 %11, i1 true)
+ // CHECK-NEXT: %cast4 = trunc i128 %12 to i32
+ // CHECK-NEXT: store volatile i32 %cast4, ptr %tz, align 4
+ tz = __builtin_ctzg(ubi128);
+ // CHECK-NEXT: %13 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %14 = call i128 @llvm.cttz.i128(i128 %13, i1 true)
+ // CHECK-NEXT: %cast5 = trunc i128 %14 to i32
+ // CHECK-NEXT: store volatile i32 %cast5, ptr %tz, align 4
+ tz = __builtin_ctzg(uc, sc);
+ // CHECK-NEXT: %15 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %16 = call i8 @llvm.cttz.i8(i8 %15, i1 true)
+ // CHECK-NEXT: %cast6 = sext i8 %16 to i32
+ // CHECK-NEXT: %iszero = icmp eq i8 %15, 0
+ // CHECK-NEXT: %17 = load i8, ptr %sc.addr, align 1
+ // CHECK-NEXT: %conv = sext i8 %17 to i32
+ // CHECK-NEXT: %ctzg = select i1 %iszero, i32 %conv, i32 %cast6
+ // CHECK-NEXT: store volatile i32 %ctzg, ptr %tz, align 4
+ tz = __builtin_ctzg(us, uc);
+ // CHECK-NEXT: %18 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %19 = call i16 @llvm.cttz.i16(i16 %18, i1 true)
+ // CHECK-NEXT: %cast7 = sext i16 %19 to i32
+ // CHECK-NEXT: %iszero8 = icmp eq i16 %18, 0
+ // CHECK-NEXT: %20 = load i8, ptr %uc.addr, align 1
+ // CHECK-NEXT: %conv9 = zext i8 %20 to i32
+ // CHECK-NEXT: %ctzg10 = select i1 %iszero8, i32 %conv9, i32 %cast7
+ // CHECK-NEXT: store volatile i32 %ctzg10, ptr %tz, align 4
+ tz = __builtin_ctzg(ui, s);
+ // CHECK-NEXT: %21 = load i32, ptr %ui.addr, align 4
+ // CHECK-NEXT: %22 = call i32 @llvm.cttz.i32(i32 %21, i1 true)
+ // CHECK-NEXT: %iszero11 = icmp eq i32 %21, 0
+ // CHECK-NEXT: %23 = load i16, ptr %s.addr, align 2
+ // CHECK-NEXT: %conv12 = sext i16 %23 to i32
+ // CHECK-NEXT: %ctzg13 = select i1 %iszero11, i32 %conv12, i32 %22
+ // CHECK-NEXT: store volatile i32 %ctzg13, ptr %tz, align 4
+ tz = __builtin_ctzg(ul, us);
+ // CHECK-NEXT: %24 = load i64, ptr %ul.addr, align 8
+ // CHECK-NEXT: %25 = call i64 @llvm.cttz.i64(i64 %24, i1 true)
+ // CHECK-NEXT: %cast14 = trunc i64 %25 to i32
+ // CHECK-NEXT: %iszero15 = icmp eq i64 %24, 0
+ // CHECK-NEXT: %26 = load i16, ptr %us.addr, align 2
+ // CHECK-NEXT: %conv16 = zext i16 %26 to i32
+ // CHECK-NEXT: %ctzg17 = select i1 %iszero15, i32 %conv16, i32 %cast14
+ // CHECK-NEXT: store volatile i32 %ctzg17, ptr %tz, align 4
+ tz = __builtin_ctzg(ull, i);
+ // CHECK-NEXT: %27 = load i64, ptr %ull.addr, align 8
+ // CHECK-NEXT: %28 = call i64 @llvm.cttz.i64(i64 %27, i1 true)
+ // CHECK-NEXT: %cast18 = trunc i64 %28 to i32
+ // CHECK-NEXT: %iszero19 = icmp eq i64 %27, 0
+ // CHECK-NEXT: %29 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg20 = select i1 %iszero19, i32 %29, i32 %cast18
+ // CHECK-NEXT: store volatile i32 %ctzg20, ptr %tz, align 4
+ tz = __builtin_ctzg(ui128, i);
+ // CHECK-NEXT: %30 = load i128, ptr %ui128.addr, align 16
+ // CHECK-NEXT: %31 = call i128 @llvm.cttz.i128(i128 %30, i1 true)
+ // CHECK-NEXT: %cast21 = trunc i128 %31 to i32
+ // CHECK-NEXT: %iszero22 = icmp eq i128 %30, 0
+ // CHECK-NEXT: %32 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg23 = select i1 %iszero22, i32 %32, i32 %cast21
+ // CHECK-NEXT: store volatile i32 %ctzg23, ptr %tz, align 4
+ tz = __builtin_ctzg(ubi128, i);
+ // CHECK-NEXT: %33 = load i128, ptr %ubi128.addr, align 8
+ // CHECK-NEXT: %34 = call i128 @llvm.cttz.i128(i128 %33, i1 true)
+ // CHECK-NEXT: %cast24 = trunc i128 %34 to i32
+ // CHECK-NEXT: %iszero25 = icmp eq i128 %33, 0
+ // CHECK-NEXT: %35 = load i32, ptr %i.addr, align 4
+ // CHECK-NEXT: %ctzg26 = select i1 %iszero25, i32 %35, i32 %cast24
+ // CHECK-NEXT: store volatile i32 %ctzg26, ptr %tz, align 4
+ // CHECK-NEXT: ret void
+}
+
#endif
diff --git a/clang/test/CodeGen/cfi-check-attrs.c b/clang/test/CodeGen/cfi-check-attrs.c
new file mode 100644
index 000000000000..375aa30074d8
--- /dev/null
+++ b/clang/test/CodeGen/cfi-check-attrs.c
@@ -0,0 +1,5 @@
+// RUN: %clang_cc1 -triple arm-unknown-linux -funwind-tables=1 -fsanitize-cfi-cross-dso -emit-llvm -o - %s | FileCheck %s
+
+// CHECK: define weak {{.*}}void @__cfi_check({{.*}} [[ATTR:#[0-9]*]]
+
+// CHECK: attributes [[ATTR]] = {{.*}} uwtable(sync)
diff --git a/clang/test/CodeGen/cfi-check-fail.c b/clang/test/CodeGen/cfi-check-fail.c
index 2f12cee9dec6..15f6c77abf2b 100644
--- a/clang/test/CodeGen/cfi-check-fail.c
+++ b/clang/test/CodeGen/cfi-check-fail.c
@@ -72,7 +72,7 @@ void caller(void (*f)(void)) {
// CHECK: [[CONT5]]:
// CHECK: ret void
-// CHECK: define weak void @__cfi_check(i64 %[[TYPE:.*]], ptr %[[ADDR:.*]], ptr %[[DATA:.*]]) align 4096
+// CHECK: define weak void @__cfi_check(i64 noundef %[[TYPE:.*]], ptr noundef %[[ADDR:.*]], ptr noundef %[[DATA:.*]]){{.*}} align 4096
// CHECK-NOT: }
// CHECK: call void @__cfi_check_fail(ptr %[[DATA]], ptr %[[ADDR]])
// CHECK-NEXT: ret void
diff --git a/clang/test/CodeGen/debug-info-bitfield-0-struct.c b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
index 0535b6267714..9fadf898e346 100644
--- a/clang/test/CodeGen/debug-info-bitfield-0-struct.c
+++ b/clang/test/CodeGen/debug-info-bitfield-0-struct.c
@@ -101,8 +101,10 @@ struct None_B {
int y : 4;
};
-struct None_C {
- // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
+// AMDGCN does not do unaligned access cheaply, so the bitfield access units
+// would remain single bytes, without the aligned attribure
+struct __attribute__((aligned(4))) None_C {
+ // BOTH-DAG: ![[NONE_C:[0-9]+]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "None_C", file: !{{[0-9]+}}, line: {{[0-9]+}}, size: 32, align: 32, elements: ![[NONE_C_ELEMENTS:[0-9]+]])
// BOTH-DAG: ![[NONE_C_ELEMENTS]] = !{![[NONE_C_X:[0-9]+]], ![[NONE_C_Y:[0-9]+]], ![[NONE_C_A:[0-9]+]], ![[NONE_C_B:[0-9]+]]}
// BOTH-DAG: ![[NONE_C_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, flags: DIFlagBitField, extraData: i64 0)
// BOTH-DAG: ![[NONE_C_Y]] = !DIDerivedType(tag: DW_TAG_member, name: "y", scope: ![[NONE_C]], file: !{{[0-9]+}}, line: {{[0-9]+}}, baseType: !{{[0-9]+}}, size: 8, offset: 8, flags: DIFlagBitField, extraData: i64 0)
diff --git a/clang/test/CodeGen/debug-info-cc.c b/clang/test/CodeGen/debug-info-cc.c
index 2664bcd4cb6b..2bfb1c28e935 100644
--- a/clang/test/CodeGen/debug-info-cc.c
+++ b/clang/test/CodeGen/debug-info-cc.c
@@ -64,11 +64,10 @@ __attribute__((swiftcall)) int add_swiftcall(int a, int b) {
return a+b;
}
-// [FIXME: swiftasynccc] Update debuginfo tag to SwiftAsync once LLVM support lands.
// LINUX: !DISubprogram({{.*}}"add_swiftasynccall", {{.*}}type: ![[FTY:[0-9]+]]
-// LINUX: ![[FTY]] = !DISubroutineType({{.*}}cc: DW_CC_LLVM_Swift,
-__attribute__((swiftasynccall)) int add_swiftasynccall(int a, int b, int c) {
- return a+b+c;
+// LINUX: ![[FTY]] = !DISubroutineType({{.*}}cc: DW_CC_LLVM_SwiftTail,
+__attribute__((swiftasynccall)) int add_swiftasynccall(int a, int b) {
+ return a+b;
}
// LINUX: !DISubprogram({{.*}}"add_inteloclbicc", {{.*}}type: ![[FTY:[0-9]+]]
diff --git a/clang/test/CodeGen/flexible-array-init.c b/clang/test/CodeGen/flexible-array-init.c
index bae926da5feb..15a30c15ac96 100644
--- a/clang/test/CodeGen/flexible-array-init.c
+++ b/clang/test/CodeGen/flexible-array-init.c
@@ -3,9 +3,15 @@
struct { int x; int y[]; } a = { 1, 7, 11 };
// CHECK: @a ={{.*}} global { i32, [2 x i32] } { i32 1, [2 x i32] [i32 7, i32 11] }
+struct { int y[]; } a1 = { 8, 12 };
+// CHECK: @a1 ={{.*}} global { [2 x i32] } { [2 x i32] [i32 8, i32 12] }
+
struct { int x; int y[]; } b = { 1, { 13, 15 } };
// CHECK: @b ={{.*}} global { i32, [2 x i32] } { i32 1, [2 x i32] [i32 13, i32 15] }
+struct { int y[]; } b1 = { { 14, 16 } };
+// CHECK: @b1 ={{.*}} global { [2 x i32] } { [2 x i32] [i32 14, i32 16] }
+
// sizeof(c) == 8, so this global should be at least 8 bytes.
struct { int x; char c; char y[]; } c = { 1, 2, { 13, 15 } };
// CHECK: @c ={{.*}} global { i32, i8, [2 x i8] } { i32 1, i8 2, [2 x i8] c"\0D\0F" }
@@ -21,10 +27,79 @@ struct __attribute((packed, aligned(4))) { char a; int x; char z[]; } e = { 1, 2
struct { int x; char y[]; } f = { 1, { 13, 15 } };
// CHECK: @f ={{.*}} global <{ i32, [2 x i8] }> <{ i32 1, [2 x i8] c"\0D\0F" }>
-union {
- struct {
- int a;
- char b[];
- } x;
-} in_union = {};
-// CHECK: @in_union ={{.*}} global %union.anon zeroinitializer
+struct __attribute((packed)) { short a; char z[]; } g = { 2, { 11, 13, 15 } };
+// CHECK: @g ={{.*}} <{ i16, [3 x i8] }> <{ i16 2, [3 x i8] c"\0B\0D\0F" }>,
+
+// Last member is the potential flexible array, unnamed initializer skips it.
+struct { int a; union { int b; short x; }; int c; int d; } h = {1, 2, {}, 3};
+// CHECK: @h = global %struct.anon{{.*}} { i32 1, %union.anon{{.*}} { i32 2 }, i32 0, i32 3 }
+struct { int a; union { int b; short x[0]; }; int c; int d; } h0 = {1, 2, {}, 3};
+// CHECK: @h0 = global %struct.anon{{.*}} { i32 1, %union.anon{{.*}} { i32 2 }, i32 0, i32 3 }
+struct { int a; union { int b; short x[1]; }; int c; int d; } h1 = {1, 2, {}, 3};
+// CHECK: @h1 = global %struct.anon{{.*}} { i32 1, %union.anon{{.*}} { i32 2 }, i32 0, i32 3 }
+struct {
+ int a;
+ union {
+ int b;
+ struct {
+ struct { } __ununsed;
+ short x[];
+ };
+ };
+ int c;
+ int d;
+} hiding = {1, 2, {}, 3};
+// CHECK: @hiding = global %struct.anon{{.*}} { i32 1, %union.anon{{.*}} { i32 2 }, i32 0, i32 3 }
+struct { int a; union { int b; short x[]; }; int c; int d; } hf = {1, 2, {}, 3};
+// CHECK: @hf = global %struct.anon{{.*}} { i32 1, %union.anon{{.*}} { i32 2 }, i32 0, i32 3 }
+
+// First member is the potential flexible array, initialization requires braces.
+struct { int a; union { short x; int b; }; int c; int d; } i = {1, 2, {}, 3};
+// CHECK: @i = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] undef }, i32 0, i32 3 }
+struct { int a; union { short x[0]; int b; }; int c; int d; } i0 = {1, {}, 2, 3};
+// CHECK: @i0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 2, i32 3 }
+struct { int a; union { short x[1]; int b; }; int c; int d; } i1 = {1, {2}, {}, 3};
+// CHECK: @i1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] undef }, i32 0, i32 3 }
+struct { int a; union { short x[]; int b; }; int c; int d; } i_f = {4, {}, {}, 6};
+// CHECK: @i_f = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 4, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 0, i32 6 }
+
+// Named initializers; order doesn't matter.
+struct { int a; union { int b; short x; }; int c; int d; } hn = {.a = 1, .x = 2, .c = 3};
+// CHECK: @hn = global { i32, { i16, [2 x i8] }, i32, i32 } { i32 1, { i16, [2 x i8] } { i16 2, [2 x i8] undef }, i32 3, i32 0 }
+struct { int a; union { int b; short x[0]; }; int c; int d; } hn0 = {.a = 1, .x = {2}, .c = 3};
+// CHECK: @hn0 = global { i32, { [0 x i16], [4 x i8] }, i32, i32 } { i32 1, { [0 x i16], [4 x i8] } { [0 x i16] zeroinitializer, [4 x i8] undef }, i32 3, i32 0 }
+struct { int a; union { int b; short x[1]; }; int c; int d; } hn1 = {.a = 1, .x = {2}, .c = 3};
+// CHECK: @hn1 = global { i32, { [1 x i16], [2 x i8] }, i32, i32 } { i32 1, { [1 x i16], [2 x i8] } { [1 x i16] [i16 2], [2 x i8] undef }, i32 3, i32 0 }
+
+struct { char a[]; } empty_struct = {};
+// CHECK: @empty_struct ={{.*}} global %struct.anon{{.*}} zeroinitializer, align 1
+
+struct { char a[]; } empty_struct0 = {0};
+// CHECK: @empty_struct0 = global { [1 x i8] } zeroinitializer, align 1
+
+union { struct { int a; char b[]; }; } struct_in_union = {};
+// CHECK: @struct_in_union = global %union.anon{{.*}} zeroinitializer, align 4
+
+union { struct { int a; char b[]; }; } struct_in_union0 = {0};
+// CHECK: @struct_in_union0 = global %union.anon{{.*}} zeroinitializer, align 4
+
+union { int a; char b[]; } trailing_in_union = {};
+// CHECK: @trailing_in_union = global %union.anon{{.*}} zeroinitializer, align 4
+
+union { int a; char b[]; } trailing_in_union0 = {0};
+// CHECK: @trailing_in_union0 = global %union.anon{{.*}} zeroinitializer, align 4
+
+union { char a[]; } only_in_union = {};
+// CHECK: @only_in_union = global %union.anon{{.*}} zeroinitializer, align 1
+
+union { char a[]; } only_in_union0 = {0};
+// CHECK: @only_in_union0 = global { [1 x i8] } zeroinitializer, align 1
+
+union { char a[]; int b; } first_in_union = {};
+// CHECK: @first_in_union = global { [0 x i8], [4 x i8] } { [0 x i8] zeroinitializer, [4 x i8] undef }, align 4
+
+union { char a[]; int b; } first_in_union0 = {0};
+// CHECK: @first_in_union0 = global { [1 x i8], [3 x i8] } { [1 x i8] zeroinitializer, [3 x i8] undef }, align 4
+
+union { char a[]; int b; } first_in_union123 = { {1, 2, 3} };
+// CHECK: @first_in_union123 = global { [3 x i8], i8 } { [3 x i8] c"\01\02\03", i8 undef }, align 4
diff --git a/clang/test/CodeGen/flexible-array-init.cpp b/clang/test/CodeGen/flexible-array-init.cpp
new file mode 100644
index 000000000000..d067a614e1af
--- /dev/null
+++ b/clang/test/CodeGen/flexible-array-init.cpp
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -triple i386-unknown-unknown -x c++ -emit-llvm -o - %s | FileCheck %s
+
+union _u { char a[]; } u = {};
+union _u0 { char a[]; } u0 = {0};
+
+// CHECK: %union._u = type { [0 x i8] }
+
+// CHECK: @u = global %union._u zeroinitializer, align 1
+// CHECK: @u0 = global { [1 x i8] } zeroinitializer, align 1
+
+union { char a[]; } z = {};
+// CHECK: @z = internal global %union.{{.*}} zeroinitializer, align 1
+union { char a[]; } z0 = {0};
+// CHECK: @z0 = internal global { [1 x i8] } zeroinitializer, align 1
+
+/* C++ requires global anonymous unions have static storage, so we have to
+ reference them to keep them in the IR output. */
+char keep(int pick)
+{
+ if (pick)
+ return z.a[0];
+ else
+ return z0.a[0];
+}
diff --git a/clang/test/CodeGen/hexagon-linux-vararg.c b/clang/test/CodeGen/hexagon-linux-vararg.c
index 033e72ab449d..84945e872d28 100644
--- a/clang/test/CodeGen/hexagon-linux-vararg.c
+++ b/clang/test/CodeGen/hexagon-linux-vararg.c
@@ -9,7 +9,7 @@ struct AAA {
int d;
};
-// CHECK: call void @llvm.va_start(ptr %arraydecay)
+// CHECK: call void @llvm.va_start.p0(ptr %arraydecay)
// CHECK: %arraydecay1 = getelementptr inbounds [1 x %struct.__va_list_tag],
// ptr %ap, i32 0, i32 0
// CHECK: br label %vaarg.maybe_reg
diff --git a/clang/test/CodeGen/mips-varargs.c b/clang/test/CodeGen/mips-varargs.c
index 052aedd1cd1e..029f000c121a 100644
--- a/clang/test/CodeGen/mips-varargs.c
+++ b/clang/test/CodeGen/mips-varargs.c
@@ -29,7 +29,7 @@ int test_i32(char *fmt, ...) {
// ALL: [[V:%.*]] = alloca i32, align 4
// NEW: [[PROMOTION_TEMP:%.*]] = alloca i32, align 4
//
-// ALL: call void @llvm.va_start(ptr %va)
+// ALL: call void @llvm.va_start.p0(ptr %va)
// ALL: [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
// O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T:i32]] [[$CHUNKSIZE:4]]
// NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T:i32|i64]] [[$CHUNKSIZE:8]]
@@ -45,7 +45,7 @@ int test_i32(char *fmt, ...) {
// NEW: [[ARG:%.+]] = load i32, ptr [[PROMOTION_TEMP]], align 4
// ALL: store i32 [[ARG]], ptr [[V]], align 4
//
-// ALL: call void @llvm.va_end(ptr %va)
+// ALL: call void @llvm.va_end.p0(ptr %va)
// ALL: }
long long test_i64(char *fmt, ...) {
@@ -61,7 +61,7 @@ long long test_i64(char *fmt, ...) {
// ALL-LABEL: define{{.*}} i64 @test_i64(ptr{{.*}} %fmt, ...)
//
// ALL: %va = alloca ptr, align [[$PTRALIGN]]
-// ALL: call void @llvm.va_start(ptr %va)
+// ALL: call void @llvm.va_start.p0(ptr %va)
// ALL: [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
//
// i64 is 8-byte aligned, while this is within O32's stack alignment there's no
@@ -74,7 +74,7 @@ long long test_i64(char *fmt, ...) {
//
// ALL: [[ARG:%.+]] = load i64, ptr [[AP_CUR]], align 8
//
-// ALL: call void @llvm.va_end(ptr %va)
+// ALL: call void @llvm.va_end.p0(ptr %va)
// ALL: }
char *test_ptr(char *fmt, ...) {
@@ -92,7 +92,7 @@ char *test_ptr(char *fmt, ...) {
// ALL: %va = alloca ptr, align [[$PTRALIGN]]
// ALL: [[V:%.*]] = alloca ptr, align [[$PTRALIGN]]
// N32: [[AP_CAST:%.+]] = alloca ptr, align 4
-// ALL: call void @llvm.va_start(ptr %va)
+// ALL: call void @llvm.va_start.p0(ptr %va)
// ALL: [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T]] [[$CHUNKSIZE]]
// ALL: store ptr [[AP_NEXT]], ptr %va, align [[$PTRALIGN]]
@@ -109,7 +109,7 @@ char *test_ptr(char *fmt, ...) {
// N64: [[ARG:%.+]] = load ptr, ptr [[AP_CUR]], align [[$PTRALIGN]]
// ALL: store ptr [[ARG]], ptr [[V]], align [[$PTRALIGN]]
//
-// ALL: call void @llvm.va_end(ptr %va)
+// ALL: call void @llvm.va_end.p0(ptr %va)
// ALL: }
int test_v4i32(char *fmt, ...) {
@@ -128,7 +128,7 @@ int test_v4i32(char *fmt, ...) {
//
// ALL: %va = alloca ptr, align [[$PTRALIGN]]
// ALL: [[V:%.+]] = alloca <4 x i32>, align 16
-// ALL: call void @llvm.va_start(ptr %va)
+// ALL: call void @llvm.va_start.p0(ptr %va)
// ALL: [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
//
// Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of
@@ -152,7 +152,7 @@ int test_v4i32(char *fmt, ...) {
// N32: [[ARG:%.+]] = load <4 x i32>, ptr [[AP_CUR]], align 16
// ALL: store <4 x i32> [[ARG]], ptr [[V]], align 16
//
-// ALL: call void @llvm.va_end(ptr %va)
+// ALL: call void @llvm.va_end.p0(ptr %va)
// ALL: [[VECEXT:%.+]] = extractelement <4 x i32> {{.*}}, i32 0
// ALL: ret i32 [[VECEXT]]
// ALL: }
diff --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c
index 53ed5e9ad8f8..1861c6886a35 100644
--- a/clang/test/CodeGen/no-bitfield-type-align.c
+++ b/clang/test/CodeGen/no-bitfield-type-align.c
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -fdump-record-layouts-simple -emit-llvm -o %t %s | FileCheck %s -check-prefix=LAYOUT
+// RUN: FileCheck %s <%t
struct S {
unsigned short: 0;
@@ -7,6 +8,13 @@ struct S {
unsigned short f2:15;
};
+// LAYOUT-LABEL: LLVMType:%struct.S =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:15 Size:15 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
// CHECK: define{{.*}} void @test_zero_width_bitfield(ptr noundef %[[A:.*]])
// CHECK: %[[BF_LOAD:.*]] = load i32, ptr %[[V1:.*]], align 1
// CHECK: %[[BF_CLEAR:.*]] = and i32 %[[BF_LOAD]], 32767
diff --git a/clang/test/CodeGen/pr53127.cpp b/clang/test/CodeGen/pr53127.cpp
index 97fe1291352d..5a52b4860eec 100644
--- a/clang/test/CodeGen/pr53127.cpp
+++ b/clang/test/CodeGen/pr53127.cpp
@@ -34,7 +34,7 @@ void operator delete(void*);
// CHECK-NEXT: br i1 [[CALL6]], label [[COND_TRUE7:%.*]], label [[COND_FALSE8:%.*]]
// CHECK: cond.true7:
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[L]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.va_start(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
// CHECK-NEXT: br label [[COND_END9:%.*]]
// CHECK: cond.false8:
// CHECK-NEXT: br label [[COND_END9]]
@@ -44,7 +44,7 @@ void operator delete(void*);
// CHECK: cond.true11:
// CHECK-NEXT: [[ARRAYDECAY12:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[L]], i64 0, i64 0
// CHECK-NEXT: [[ARRAYDECAY13:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[L2]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.va_copy(ptr [[ARRAYDECAY12]], ptr [[ARRAYDECAY13]])
+// CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[ARRAYDECAY12]], ptr [[ARRAYDECAY13]])
// CHECK-NEXT: br label [[COND_END15:%.*]]
// CHECK: cond.false14:
// CHECK-NEXT: br label [[COND_END15]]
diff --git a/clang/test/CodeGen/struct-x86-darwin.c b/clang/test/CodeGen/struct-x86-darwin.c
index 5191441cabaf..e79ecefb880d 100644
--- a/clang/test/CodeGen/struct-x86-darwin.c
+++ b/clang/test/CodeGen/struct-x86-darwin.c
@@ -1,25 +1,70 @@
-// RUN: %clang_cc1 %s -emit-llvm -triple=i686-apple-darwin9 -o - | FileCheck %s
-// CHECK: STest1 = type { i32, [4 x i16], double }
-// CHECK: STest2 = type { i16, i16, i32, i32 }
-// CHECK: STest3 = type { i8, i16, i32 }
-// CHECK: STestB1 = type { i8, i8 }
-// CHECK: STestB2 = type { i8, i8, i8 }
-// CHECK: STestB3 = type { i8, i8 }
-// CHECK: STestB4 = type { i8, i8, i8, i8 }
-// CHECK: STestB5 = type { i8, i16, i8 }
-// CHECK: STestB6 = type { i8, i8, i16 }
+// RUN: %clang_cc1 %s -emit-llvm -o /dev/null -triple=i686-apple-darwin9 -fdump-record-layouts-simple | FileCheck %s
+
// Test struct layout for x86-darwin target
struct STest1 {int x; short y[4]; double z; } st1;
struct STest2 {short a,b; int c,d; } st2;
struct STest3 {char a; short b; int c; } st3;
-// Bitfields
+// Bitfields
struct STestB1 {char a; char b:2; } stb1;
struct STestB2 {char a; char b:5; char c:4; } stb2;
struct STestB3 {char a; char b:2; } stb3;
struct STestB4 {char a; short b:2; char c; } stb4;
struct STestB5 {char a; short b:10; char c; } stb5;
-struct STestB6 {int a:1; char b; int c:13 } stb6;
+struct STestB6 {int a:1; char b; int c:13; } stb6;
// Packed struct STestP1 {char a; short b; int c; } __attribute__((__packed__)) stp1;
+
+// CHECK-LABEL: LLVMType:%struct.STest1 =
+// CHECK-SAME: type { i32, [4 x i16], double }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STest2 =
+// CHECK-SAME: type { i16, i16, i32, i32 }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STest3 =
+// CHECK-SAME: type { i8, i16, i32 }
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB1 =
+// CHECK-SAME: type { i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB2 =
+// CHECK-SAME: type <{ i8, i16 }>
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:16 StorageOffset:1
+// CHECK-NEXT: <CGBitFieldInfo Offset:8 Size:4 IsSigned:1 StorageSize:16 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB3 =
+// CHECK-SAME: type { i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB4 =
+// CHECK-SAME: type { i8, i8, i8, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:1 StorageSize:8 StorageOffset:1
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB5 =
+// CHECK-SAME: type { i8, i16, i8 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// CHECK-LABEL: LLVMType:%struct.STestB6 =
+// CHECK-SAME: type { i8, i8, i16 }
+// CHECK: BitFields:[
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:1 StorageSize:8 StorageOffset:0
+// CHECK-NEXT: <CGBitFieldInfo Offset:0 Size:13 IsSigned:1 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGen/tbaa-struct.cpp b/clang/test/CodeGen/tbaa-struct.cpp
index 9b4b7415142d..ca076ce5aa27 100644
--- a/clang/test/CodeGen/tbaa-struct.cpp
+++ b/clang/test/CodeGen/tbaa-struct.cpp
@@ -197,7 +197,7 @@ void copy12(UnionMember2 *a1, UnionMember2 *a2) {
// CHECK-OLD: [[TS6]] = !{i64 0, i64 2, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE:!.+]]}
// CHECK-OLD: [[TAG_DOUBLE]] = !{[[DOUBLE:!.+]], [[DOUBLE]], i64 0}
// CHECK-OLD [[DOUBLE]] = !{!"double", [[CHAR]], i64 0}
-// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 1, [[TAG_CHAR]], i64 4, i64 1, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
+// CHECK-OLD: [[TS7]] = !{i64 0, i64 1, [[TAG_CHAR]], i64 1, i64 1, [[TAG_CHAR]], i64 2, i64 1, [[TAG_CHAR]], i64 3, i64 2, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]], i64 16, i64 1, [[TAG_CHAR]]}
// CHECK-OLD: [[TS8]] = !{i64 0, i64 4, [[TAG_CHAR]], i64 8, i64 8, [[TAG_DOUBLE]]}
// CHECK-OLD: [[TS9]] = !{i64 0, i64 8, [[TAG_CHAR]], i64 8, i64 4, [[TAG_INT]]}
// CHECK-OLD: [[TS10]] = !{i64 0, i64 4, [[TAG_INT]], i64 8, i64 8, [[TAG_CHAR]]}
diff --git a/clang/test/CodeGen/ubsan-builtin-checks.c b/clang/test/CodeGen/ubsan-builtin-checks.c
index 2bc32d8df485..c7f6078f903b 100644
--- a/clang/test/CodeGen/ubsan-builtin-checks.c
+++ b/clang/test/CodeGen/ubsan-builtin-checks.c
@@ -23,6 +23,9 @@ void check_ctz(int n) {
// CHECK: call void @__ubsan_handle_invalid_builtin
__builtin_ctzll(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_ctzg((unsigned int)n);
}
// CHECK: define{{.*}} void @check_clz
@@ -44,4 +47,7 @@ void check_clz(int n) {
// CHECK: call void @__ubsan_handle_invalid_builtin
__builtin_clzll(n);
+
+ // CHECK: call void @__ubsan_handle_invalid_builtin
+ __builtin_clzg((unsigned int)n);
}
diff --git a/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c
new file mode 100644
index 000000000000..b087da34c3df
--- /dev/null
+++ b/clang/test/CodeGen/varargs-with-nonzero-default-address-space.c
@@ -0,0 +1,46 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// RUN: %clang_cc1 -triple spirv64-unknown-unknown -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+
+struct x {
+ double b;
+ long a;
+};
+
+// CHECK-LABEL: define spir_func void @testva(
+// CHECK-SAME: i32 noundef [[N:%.*]], ...) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[AP:%.*]] = alloca ptr addrspace(4), align 8
+// CHECK-NEXT: [[T:%.*]] = alloca [[STRUCT_X:%.*]], align 8
+// CHECK-NEXT: [[AP2:%.*]] = alloca ptr addrspace(4), align 8
+// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[VARET:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr [[N_ADDR]] to ptr addrspace(4)
+// CHECK-NEXT: [[AP_ASCAST:%.*]] = addrspacecast ptr [[AP]] to ptr addrspace(4)
+// CHECK-NEXT: [[T_ASCAST:%.*]] = addrspacecast ptr [[T]] to ptr addrspace(4)
+// CHECK-NEXT: [[AP2_ASCAST:%.*]] = addrspacecast ptr [[AP2]] to ptr addrspace(4)
+// CHECK-NEXT: [[V_ASCAST:%.*]] = addrspacecast ptr [[V]] to ptr addrspace(4)
+// CHECK-NEXT: [[VARET_ASCAST:%.*]] = addrspacecast ptr [[VARET]] to ptr addrspace(4)
+// CHECK-NEXT: store i32 [[N]], ptr addrspace(4) [[N_ADDR_ASCAST]], align 4
+// CHECK-NEXT: call void @llvm.va_start.p4(ptr addrspace(4) [[AP_ASCAST]])
+// CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr addrspace(4) [[AP_ASCAST]], ptr
+// CHECK-NEXT: call void @llvm.memcpy.p4.p0.i64(ptr addrspace(4) align 8 [[T_ASCAST]], ptr align 8 [[TMP0]], i64 16, i1 false)
+// CHECK-NEXT: call void @llvm.va_copy.p4(ptr addrspace(4) [[AP2_ASCAST]], ptr addrspace(4) [[AP_ASCAST]])
+// CHECK-NEXT: [[TMP1:%.*]] = va_arg ptr addrspace(4) [[AP2_ASCAST]], i32
+// CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(4) [[VARET_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[VARET_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[TMP2]], ptr addrspace(4) [[V_ASCAST]], align 4
+// CHECK-NEXT: call void @llvm.va_end.p4(ptr addrspace(4) [[AP2_ASCAST]])
+// CHECK-NEXT: call void @llvm.va_end.p4(ptr addrspace(4) [[AP_ASCAST]])
+// CHECK-NEXT: ret void
+
+void testva(int n, ...) {
+ __builtin_va_list ap;
+ __builtin_va_start(ap, n);
+ struct x t = __builtin_va_arg(ap, struct x);
+ __builtin_va_list ap2;
+ __builtin_va_copy(ap2, ap);
+ int v = __builtin_va_arg(ap2, int);
+ __builtin_va_end(ap2);
+ __builtin_va_end(ap);
+}
diff --git a/clang/test/CodeGen/xcore-abi.c b/clang/test/CodeGen/xcore-abi.c
index 4dd0f221533b..bb8d2fec46bd 100644
--- a/clang/test/CodeGen/xcore-abi.c
+++ b/clang/test/CodeGen/xcore-abi.c
@@ -28,7 +28,7 @@ void testva (int n, ...) {
// CHECK: [[AP:%[a-z0-9]+]] = alloca ptr, align 4
// CHECK: [[V5:%[a-z0-9]+]] = alloca %struct.x, align 4
// CHECK: [[TMP:%[a-z0-9]+]] = alloca [4 x i32], align 4
- // CHECK: call void @llvm.va_start(ptr [[AP]])
+ // CHECK: call void @llvm.va_start.p0(ptr [[AP]])
char* v1 = va_arg (ap, char*);
f(v1);
diff --git a/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp b/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
index 14963867798d..7953f902bf09 100644
--- a/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
+++ b/clang/test/CodeGenCXX/attr-target-clones-aarch64.cpp
@@ -56,13 +56,6 @@ void run_foo_tml() {
// CHECK-NEXT: ret i32 1
//
//
-// CHECK-LABEL: @_Z7foo_ovli.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
-// CHECK-NEXT: ret i32 1
-//
-//
// CHECK-LABEL: @_Z7foo_ovli.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -82,11 +75,6 @@ void run_foo_tml() {
// CHECK-NEXT: ret i32 2
//
//
-// CHECK-LABEL: @_Z7foo_ovlv.default(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 2
-//
-//
// CHECK-LABEL: @_Z7foo_ovlv.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -182,6 +170,18 @@ void run_foo_tml() {
// CHECK-NEXT: ret i32 4
//
//
+// CHECK-LABEL: @_Z7foo_ovli.default(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
+// CHECK-NEXT: ret i32 1
+//
+//
+// CHECK-LABEL: @_Z7foo_ovlv.default(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 2
+//
+//
// CHECK-LABEL: @_ZN7MyClassIssE7foo_tmlEv._Mfrintts(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
@@ -231,8 +231,8 @@ void run_foo_tml() {
//
//.
// CHECK: attributes #[[ATTR0:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
-// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
-// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ls64" }
+// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ls64" }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
// CHECK: attributes #[[ATTR3:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fptoint" }
// CHECK: attributes #[[ATTR4:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme-f64f64" }
//.
diff --git a/clang/test/CodeGenCXX/attr-target-version.cpp b/clang/test/CodeGenCXX/attr-target-version.cpp
index e06121d1a719..8b7273fe3bb5 100644
--- a/clang/test/CodeGenCXX/attr-target-version.cpp
+++ b/clang/test/CodeGenCXX/attr-target-version.cpp
@@ -35,7 +35,7 @@ struct MyClass {
int unused_with_implicit_forward_default_def(void);
int __attribute__((target_version("lse"))) unused_with_implicit_forward_default_def(void);
- // This should generate a normal function.
+ // This should generate a target version despite the default not being declared.
int __attribute__((target_version("rdm"))) unused_without_default(void);
};
@@ -75,6 +75,13 @@ int bar() {
// CHECK: @_ZN7MyClass32unused_with_implicit_default_defEv = weak_odr ifunc i32 (ptr), ptr @_ZN7MyClass32unused_with_implicit_default_defEv.resolver
// CHECK: @_ZN7MyClass40unused_with_implicit_forward_default_defEv = weak_odr ifunc i32 (ptr), ptr @_ZN7MyClass40unused_with_implicit_forward_default_defEv.resolver
//.
+// CHECK-LABEL: @_Z3fooi._Mbf16Msme-f64f64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
+// CHECK-NEXT: ret i32 1
+//
+//
// CHECK-LABEL: @_Z3fooi.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
@@ -82,6 +89,11 @@ int bar() {
// CHECK-NEXT: ret i32 2
//
//
+// CHECK-LABEL: @_Z3foov._Mebf16Msm4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: ret i32 3
+//
+//
// CHECK-LABEL: @_Z3foov.default(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 4
@@ -189,6 +201,14 @@ int bar() {
// CHECK-NEXT: ret i32 1
//
//
+// CHECK-LABEL: @_ZN7MyClass22unused_without_defaultEv._Mrdm(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
+// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
+// CHECK-NEXT: ret i32 0
+//
+//
// CHECK-LABEL: @_Z3barv(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[M:%.*]] = alloca [[STRUCT_MYCLASS:%.*]], align 1
@@ -250,26 +270,6 @@ int bar() {
// CHECK-NEXT: ret ptr @_Z3foov.default
//
//
-// CHECK-LABEL: @_Z3fooi._Mbf16Msme-f64f64(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
-// CHECK-NEXT: store i32 [[TMP0:%.*]], ptr [[DOTADDR]], align 4
-// CHECK-NEXT: ret i32 1
-//
-//
-// CHECK-LABEL: @_Z3foov._Mebf16Msm4(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i32 3
-//
-//
-// CHECK-LABEL: @_ZN7MyClass22unused_without_defaultEv(
-// CHECK-NEXT: entry:
-// CHECK-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
-// CHECK-NEXT: store ptr [[THIS:%.*]], ptr [[THIS_ADDR]], align 8
-// CHECK-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-NEXT: ret i32 0
-//
-//
// CHECK-LABEL: @_ZN7MyClass23unused_with_default_defEv.resolver(
// CHECK-NEXT: resolver_entry:
// CHECK-NEXT: call void @__init_cpu_features_resolver()
@@ -312,16 +312,16 @@ int bar() {
// CHECK-NEXT: ret ptr @_ZN7MyClass40unused_with_implicit_forward_default_defEv.default
//
//.
-// CHECK: attributes #[[ATTR0:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
-// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc" }
-// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+fp-armv8,+neon" }
-// CHECK: attributes #[[ATTR3:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops" }
-// CHECK: attributes #[[ATTR4:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon" }
-// CHECK: attributes #[[ATTR5:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve" }
-// CHECK: attributes #[[ATTR6:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
-// CHECK: attributes #[[ATTR7:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse" }
-// CHECK: attributes #[[ATTR8:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme-f64f64" }
-// CHECK: attributes #[[ATTR9:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+fp-armv8,+neon,+sm4" }
+// CHECK: attributes #[[ATTR0:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+sme,+sme-f64f64" }
+// CHECK: attributes #[[ATTR1:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #[[ATTR2:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+bf16,+fp-armv8,+neon,+sm4" }
+// CHECK: attributes #[[ATTR3:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+crc" }
+// CHECK: attributes #[[ATTR4:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+dotprod,+fp-armv8,+neon" }
+// CHECK: attributes #[[ATTR5:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+mops" }
+// CHECK: attributes #[[ATTR6:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon" }
+// CHECK: attributes #[[ATTR7:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve" }
+// CHECK: attributes #[[ATTR8:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon" }
+// CHECK: attributes #[[ATTR9:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+lse" }
// CHECK: attributes #[[ATTR10:[0-9]+]] = { mustprogress noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+neon,+rdm" }
// CHECK: attributes #[[ATTR11:[0-9]+]] = { "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
//.
diff --git a/clang/test/CodeGenCXX/auto-var-init.cpp b/clang/test/CodeGenCXX/auto-var-init.cpp
index 991eb73fe45c..7803ed5b633f 100644
--- a/clang/test/CodeGenCXX/auto-var-init.cpp
+++ b/clang/test/CodeGenCXX/auto-var-init.cpp
@@ -1,8 +1,8 @@
// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks %s -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK,CHECK-O0
// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=pattern %s -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK-O0,PATTERN,PATTERN-O0
-// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=pattern %s -O1 -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK-O1,PATTERN,PATTERN-O1
+// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=pattern %s -O1 -emit-llvm -o - | FileCheck %s -check-prefixes=PATTERN,PATTERN-O1
// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=zero %s -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK-O0,ZERO,ZERO-O0
-// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=zero %s -O1 -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK-O1,ZERO,ZERO-O1
+// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-unknown -fblocks -ftrivial-auto-var-init=zero %s -O1 -emit-llvm -o - | FileCheck %s -check-prefixes=ZERO,ZERO-O1
// RUN: %clang_cc1 -std=c++14 -triple i386-unknown-unknown -fblocks -ftrivial-auto-var-init=pattern %s -emit-llvm -o - | FileCheck %s -check-prefixes=CHECK-O0,PATTERN,PATTERN-O0
#pragma clang diagnostic ignored "-Winaccessible-base"
@@ -1303,9 +1303,10 @@ TEST_CUSTOM(semivolatile, semivolatile, { 0x44444444, 0x44444444 });
// CHECK-O0: call void @llvm.memcpy
// CHECK-NOT: !annotation
// CHECK-O0: call void @{{.*}}used{{.*}}%custom)
-// CHECK-O1: store i32 1145324612, ptr %custom, align 4
-// CHECK-O1-NEXT: %[[I:[^ ]*]] = getelementptr inbounds i8, ptr %custom, i64 4
-// CHECK-O1-NEXT: store i32 1145324612, ptr %[[I]], align 4
+// PATTERN-O1: store i32 1145324612, ptr %custom, align 4
+// PATTERN-O1-NEXT: %[[I:[^ ]*]] = getelementptr inbounds i8, ptr %custom, i64 4
+// PATTERN-O1-NEXT: store i32 1145324612, ptr %[[I]], align 4
+// ZERO-O1: store i64 4919131752989213764, ptr %custom, align 8
// CHECK-NOT: !annotation
TEST_UNINIT(semivolatileinit, semivolatileinit);
@@ -1418,7 +1419,8 @@ TEST_CUSTOM(matching, matching, { .f = 0xf00f });
// CHECK-O0: call void @llvm.memcpy
// CHECK-NOT: !annotation
// CHECK-O0: call void @{{.*}}used{{.*}}%custom)
-// CHECK-O1: store float 6.145500e+04, ptr {{.*}}, align 4
+// PATTERN-O1: store float 6.145500e+04, ptr {{.*}}, align 4
+// ZERO-O1: store i32 1198526208, ptr %custom, align 4
// CHECK-NOT: !annotation
TEST_UNINIT(matchingreverse, matchingreverse);
@@ -1445,7 +1447,8 @@ TEST_CUSTOM(matchingreverse, matchingreverse, { .i = 0xf00f });
// CHECK-O0: call void @llvm.memcpy
// CHECK-NOT: !annotation
// CHECK-O0: call void @{{.*}}used{{.*}}%custom)
-// CHECK-O1: store i32 61455, ptr %custom, align 4
+// PATTERN-O1: store i32 61455, ptr %custom, align 4
+// ZERO-O1: store i32 61455, ptr %custom, align 4
// CHECK-NOT: !annotation
TEST_UNINIT(unmatched, unmatched);
@@ -1471,7 +1474,8 @@ TEST_CUSTOM(unmatched, unmatched, { .i = 0x3badbeef });
// CHECK-O0: call void @llvm.memcpy
// CHECK-NOT: !annotation
// CHECK-O0: call void @{{.*}}used{{.*}}%custom)
-// CHECK-O1: store i32 1001242351, ptr {{.*}}, align 4
+// PATTERN-O1: store i32 1001242351, ptr {{.*}}, align 4
+// ZERO-O1: store i32 1001242351, ptr {{.*}}, align 4
// CHECK-NOT: !annotation
TEST_UNINIT(unmatchedreverse, unmatchedreverse);
@@ -1504,9 +1508,7 @@ TEST_CUSTOM(unmatchedreverse, unmatchedreverse, { .c = 42 });
// PATTERN-O1-NEXT: store i8 -86, ptr %[[I]], align {{.*}}
// PATTERN-O1-NEXT: %[[I:[^ ]*]] = getelementptr inbounds i8, ptr %custom, i64 3
// PATTERN-O1-NEXT: store i8 -86, ptr %[[I]], align {{.*}}
-// ZERO-O1: store i8 42, ptr {{.*}}, align 4
-// ZERO-O1-NEXT: %[[I:[^ ]*]] = getelementptr inbounds i8, ptr %custom, i64 1
-// ZERO-O1-NEXT: call void @llvm.memset.{{.*}}({{.*}}, i8 0, i64 3, {{.*}})
+// ZERO-O1: store i32 42, ptr {{.*}}, align 4
TEST_UNINIT(unmatchedfp, unmatchedfp);
// CHECK-LABEL: @test_unmatchedfp_uninit()
@@ -1531,7 +1533,8 @@ TEST_CUSTOM(unmatchedfp, unmatchedfp, { .d = 3.1415926535897932384626433 });
// CHECK-O0: call void @llvm.memcpy
// CHECK-NOT: !annotation
// CHECK-O0: call void @{{.*}}used{{.*}}%custom)
-// CHECK-O1: store double 0x400921FB54442D18, ptr %custom, align 8
+// PATTERN-O1: store double 0x400921FB54442D18, ptr %custom, align 8
+// ZERO-O1: store i64 4614256656552045848, ptr %custom, align 8
// CHECK-NOT: !annotation
TEST_UNINIT(emptyenum, emptyenum);
diff --git a/clang/test/CodeGenCXX/bitfield-access-empty.cpp b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
new file mode 100644
index 000000000000..c5e6f55ffa69
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-access-empty.cpp
@@ -0,0 +1,150 @@
+// Check if we can merge bitfields across empty members
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+struct Empty {};
+
+struct P1 {
+ unsigned a : 16;
+ [[no_unique_address]] Empty e;
+ unsigned b : 16;
+} p1;
+// CHECK-LABEL: LLVMType:%struct.P1 =
+// LAYOUT-SAME: type { i16, i16 }
+// LAYOUT-DWN32-SAME: type { i16, i16 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P1 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+struct P2 {
+ unsigned a : 15;
+ [[no_unique_address]] Empty e;
+ unsigned b : 15;
+} p2;
+// CHECK-LABEL: LLVMType:%struct.P2 =
+// LAYOUT-SAME: type { i16, i16 }
+// LAYOUT-DWN32-SAME: type { i16, i16 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P2 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:15 IsSigned:0 StorageSize:16 StorageOffset:2
+// CHECK-NEXT: ]>
+
+struct P3 {
+ unsigned a : 16;
+ Empty e;
+ unsigned b : 16;
+} p3;
+// CHECK-LABEL: LLVMType:%struct.P3 =
+// LAYOUT-SAME: type { i16, %struct.Empty, i16, [2 x i8] }
+// LAYOUT-DWN32-SAME: type <{ i16, %struct.Empty, i16 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P3 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:4
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:3
+// CHECK-NEXT: ]>
+
+struct P4 {
+ unsigned : 0;
+} p4;
+// CHECK-LABEL: LLVMType:%struct.P4 =
+// LAYOUT-SAME: type { {{.+}} }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P4 =
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+struct P5 {
+ ~P5();
+ unsigned : 0;
+} p5;
+// CHECK-LABEL: LLVMType:%struct.P5 =
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P5.base = type {}
+// CHECK: BitFields:[
+// CHECK-NEXT: ]>
+
+struct P6 {
+ unsigned a : 16;
+ unsigned b : 8;
+ [[no_unique_address]] Empty e;
+ unsigned c;
+} p6;
+// CHECK-LABEL: LLVMType:%struct.P6 =
+// LAYOUT-SAME: type { i32, i32 }
+// LAYOUT-DWN32-SAME: type { i32, i32 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P6 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:32 StorageOffset:0
+// CHECK-NEXT: ]>
+
+struct P7 {
+ unsigned a : 16;
+ unsigned b : 8;
+ Empty e;
+ unsigned c;
+} p7;
+// CHECK-LABEL: LLVMType:%struct.P7 =
+// LAYOUT-SAME: type { i16, i8, %struct.Empty, i32 }
+// LAYOUT-DWN32-SAME: type { i16, i8, %struct.Empty, i32 }
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.P7 =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:0 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:0 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-access-tail.cpp b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
new file mode 100644
index 000000000000..68716fdf3b1d
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-access-tail.cpp
@@ -0,0 +1,115 @@
+// Check we use tail padding if it is known to be safe
+
+// Configs that have cheap unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=aarch64-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=aarch64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arm-apple-darwin %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT-DWN32 %s
+// RUN: %clang_cc1 -triple=arm-none-eabi %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=i686-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpcle-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=ve-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=wasm64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=x86_64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big Endian
+// RUN: %clang_cc1 -triple=powerpc-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=powerpc64-linux-gnu %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=systemz %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Configs that have expensive unaligned access
+// Little Endian
+// RUN: %clang_cc1 -triple=amdgcn-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=arc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=bpf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=csky %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=hexagon-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=le64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=loongarch32-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=nvptx-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv32 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=riscv64 %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=spir-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=xcore-none-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Big endian
+// RUN: %clang_cc1 -triple=lanai-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=m68k-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=mips64-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=sparc-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+// RUN: %clang_cc1 -triple=tce-elf %s -emit-llvm -o /dev/null -fdump-record-layouts-simple | FileCheck --check-prefixes CHECK,LAYOUT %s
+
+// Can use tail padding
+struct Pod {
+ int a : 16;
+ int b : 8;
+} P;
+// CHECK-LABEL: LLVMType:%struct.Pod =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.Pod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:32 StorageOffset:0
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// No tail padding
+struct __attribute__((packed)) PPod {
+ int a : 16;
+ int b : 8;
+} PP;
+// CHECK-LABEL: LLVMType:%struct.PPod =
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PPod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// Cannot use tail padding
+struct NonPod {
+ ~NonPod();
+ int a : 16;
+ int b : 8;
+} NP;
+// CHECK-LABEL: LLVMType:%struct.NonPod =
+// LAYOUT-SAME: type <{ i16, i8, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.
+// LAYOUT-SAME: NonPod.base = type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: NonPod = type <{ i16, i8 }>
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
+
+// No tail padding
+struct __attribute__((packed)) PNonPod {
+ ~PNonPod();
+ int a : 16;
+ int b : 8;
+} PNP;
+// CHECK-LABEL: LLVMType:%struct.PNonPod =
+// LAYOUT-SAME: type <{ i16, i8 }>
+// LAYOUT-DWN32-SAME: type <{ i16, i8 }>
+// CHECK-NEXT: NonVirtualBaseLLVMType:%struct.PNonPod =
+// CHECK: BitFields:[
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:16 IsSigned:1 StorageSize:16 StorageOffset:0
+// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:8 IsSigned:1 StorageSize:8 StorageOffset:2
+// CHECK-NEXT: ]>
diff --git a/clang/test/CodeGenCXX/bitfield-ir.cpp b/clang/test/CodeGenCXX/bitfield-ir.cpp
new file mode 100644
index 000000000000..76c144072da6
--- /dev/null
+++ b/clang/test/CodeGenCXX/bitfield-ir.cpp
@@ -0,0 +1,101 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -O2 -emit-llvm -o - %s | FileCheck %s
+
+struct Tail {
+ ~Tail();
+ int a : 16;
+ int b : 8;
+};
+
+struct Char {
+ int a : 16;
+ int b : 8;
+ char c;
+};
+
+struct Int {
+ int a : 16;
+ int b : 8;
+ int c;
+};
+
+
+// CHECK-LABEL: define dso_local void @_Z1AP4Tail
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT: store i16 [[INC]], ptr [[P]], align 4
+// CHECK-NEXT: ret void
+//
+void A (Tail *p) {
+ p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP4Tail
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT: store i8 [[INC]], ptr [[B]], align 2
+// CHECK-NEXT: ret void
+//
+void B (Tail *p) {
+ p->b++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1AP4Char
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[P]], align 4
+// CHECK-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// CHECK-NEXT: store i16 [[INC]], ptr [[P]], align 4
+// CHECK-NEXT: ret void
+//
+void A (Char *p) {
+ p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP4Char
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[B:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 2
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i8, ptr [[B]], align 2
+// CHECK-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// CHECK-NEXT: store i8 [[INC]], ptr [[B]], align 2
+// CHECK-NEXT: ret void
+//
+void B (Char *p) {
+ p->b++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1AP3Int
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[P]], align 4
+// CHECK-NEXT: [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// CHECK-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
+// CHECK-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -65536
+// CHECK-NEXT: [[BF_SET:%.*]] = or disjoint i32 [[BF_VALUE]], [[BF_CLEAR]]
+// CHECK-NEXT: store i32 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT: ret void
+//
+void A (Int *p) {
+ p->a++;
+}
+
+// CHECK-LABEL: define dso_local void @_Z1BP3Int
+// CHECK-SAME: (ptr nocapture noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[BF_LOAD:%.*]] = load i32, ptr [[P]], align 4
+// CHECK-NEXT: [[BF_VALUE:%.*]] = add i32 [[BF_LOAD]], 65536
+// CHECK-NEXT: [[BF_SHL2:%.*]] = and i32 [[BF_VALUE]], 16711680
+// CHECK-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16711681
+// CHECK-NEXT: [[BF_SET:%.*]] = or disjoint i32 [[BF_SHL2]], [[BF_CLEAR]]
+// CHECK-NEXT: store i32 [[BF_SET]], ptr [[P]], align 4
+// CHECK-NEXT: ret void
+//
+void B (Int *p) {
+ p->b++;
+}
diff --git a/clang/test/CodeGenCXX/bitfield.cpp b/clang/test/CodeGenCXX/bitfield.cpp
index a478eb44915e..7545e02840e6 100644
--- a/clang/test/CodeGenCXX/bitfield.cpp
+++ b/clang/test/CodeGenCXX/bitfield.cpp
@@ -1,7 +1,9 @@
-// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s \
-// RUN: | FileCheck -check-prefix=CHECK-X86-64 %s
-// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -emit-llvm -o - %s \
-// RUN: | FileCheck -check-prefix=CHECK-PPC64 %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fdump-record-layouts-simple \
+// RUN: -emit-llvm -o %t %s | FileCheck -check-prefixes=LAYOUT,LAYOUT-X86-64 %s
+// RUN: FileCheck -check-prefix=CHECK-X86-64 %s <%t
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -fdump-record-layouts-simple\
+// RUN: -emit-llvm -o %t %s | FileCheck -check-prefixes=LAYOUT,LAYOUT-PPC64 %s
+// RUN: FileCheck -check-prefix=CHECK-PPC64 %s <%t
//
// Tests for bitfield access patterns in C++ with special attention to
// conformance to C++11 memory model requirements.
@@ -19,6 +21,27 @@ namespace N0 {
unsigned b70 : 6;
unsigned b71 : 2;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N0::S" =
+// LAYOUT-SAME: type { i64 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:14 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:14 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:16 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:22 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:24 Size:30 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:54 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:56 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:62 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:50 Size:14 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:48 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:42 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:40 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:10 Size:30 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:2 Size:6 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:2 IsSigned:0 StorageSize:64 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
unsigned read00(S* s) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N06read00
// CHECK-X86-64: %[[val:.*]] = load i64, ptr %{{.*}}
@@ -149,6 +172,13 @@ namespace N1 {
unsigned b : 1;
char c;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N1::S" =
+// LAYOUT-SAME: type { i8, i8, i8, i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:1 IsSigned:0 StorageSize:8 StorageOffset:1
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:7 Size:1 IsSigned:0 StorageSize:8 StorageOffset:1
+// LAYOUT-NEXT: ]>
+
unsigned read(S* s) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N14read
// CHECK-X86-64: %[[ptr:.*]] = getelementptr inbounds %{{.*}}, ptr %{{.*}}, i32 0, i32 1
@@ -193,6 +223,13 @@ namespace N2 {
unsigned b : 24;
void *p;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N2::S" =
+// LAYOUT-SAME: type { i32, ptr }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
unsigned read(S* s) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N24read
// CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
@@ -230,6 +267,13 @@ namespace N3 {
struct S {
unsigned b : 24;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N3::S" =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
unsigned read(S* s) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N34read
// CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
@@ -276,6 +320,14 @@ namespace N4 {
char c;
};
#endif
+// LAYOUT-LABEL: LLVMType:%"struct.N4::Base" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8] }>
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N4::Base.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
unsigned read(Base* s) {
// FIXME: We should widen this load as long as the function isn't being
// instrumented by ThreadSanitizer.
@@ -317,6 +369,22 @@ namespace N5 {
struct X { unsigned b : 24; char c; } x;
struct Y { unsigned b : 24; } y;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N5::U::X" =
+// LAYOUT-SAME: type { [3 x i8], i8 }
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N5::U::X" =
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
+// LAYOUT-LABEL: LLVMType:%"struct.N5::U::Y" =
+// LAYOUT-SAME: type { i32 }
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N5::U::Y" =
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:8 Size:24 IsSigned:0 StorageSize:32 StorageOffset:0
+// LAYOUT-NEXT: ]>
+
unsigned read(U* u) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N54read
// CHECK-X86-64: %[[val:.*]] = load i32, ptr %{{.*}}
@@ -360,6 +428,15 @@ namespace N6 {
unsigned char : 0;
unsigned char b2 : 8;
};
+// LAYOUT-LABEL: LLVMType:%"struct.N6::S" =
+// LAYOUT-SAME: type { [3 x i8], i8 }
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:0 StorageSize:8 StorageOffset:3
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:0
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:8 IsSigned:0 StorageSize:8 StorageOffset:3
+// LAYOUT-NEXT: ]>
+
unsigned read(S* s) {
// CHECK-X86-64-LABEL: define{{.*}} i32 @_ZN2N64read
// CHECK-X86-64: %[[val1:.*]] = load i24, ptr %{{.*}}
@@ -416,6 +493,22 @@ namespace N7 {
char c;
};
#endif
+// LAYOUT-LABEL: LLVMType:%"struct.N7::B1" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8] }>
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N7::B1.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
+// LAYOUT-LABEL: LLVMType:%"struct.N7::B2" =
+// LAYOUT-SAME: type <{ ptr, [3 x i8], [5 x i8], %"struct.N7::B1.base", [5 x i8] }>
+// LAYOUT-NEXT: NonVirtualBaseLLVMType:%"struct.N7::B2.base" = type <{ ptr, [3 x i8] }>
+// LAYOUT: BitFields:[
+// LAYOUT-X86-64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-PPC64-NEXT: <CGBitFieldInfo Offset:0 Size:24 IsSigned:0 StorageSize:24 StorageOffset:8
+// LAYOUT-NEXT: ]>
+
unsigned read(B2* s) {
// FIXME: We should widen this load as long as the function isn't being
// instrumented by ThreadSanitizer.
diff --git a/clang/test/CodeGenCXX/ext-int.cpp b/clang/test/CodeGenCXX/ext-int.cpp
index 5a4270aef285..a1d17c840ee4 100644
--- a/clang/test/CodeGenCXX/ext-int.cpp
+++ b/clang/test/CodeGenCXX/ext-int.cpp
@@ -159,9 +159,9 @@ void TakesVarargs(int i, ...) {
// WIN: %[[ARGS:.+]] = alloca ptr
__builtin_va_start(args, i);
// LIN64: %[[STARTAD:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[ARGS]]
- // LIN64: call void @llvm.va_start(ptr %[[STARTAD]])
- // LIN32: call void @llvm.va_start(ptr %[[ARGS]])
- // WIN: call void @llvm.va_start(ptr %[[ARGS]])
+ // LIN64: call void @llvm.va_start.p0(ptr %[[STARTAD]])
+ // LIN32: call void @llvm.va_start.p0(ptr %[[ARGS]])
+ // WIN: call void @llvm.va_start.p0(ptr %[[ARGS]])
_BitInt(92) A = __builtin_va_arg(args, _BitInt(92));
// LIN64: %[[AD1:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[ARGS]]
@@ -302,9 +302,9 @@ void TakesVarargs(int i, ...) {
__builtin_va_end(args);
// LIN64: %[[ENDAD:.+]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %[[ARGS]]
- // LIN64: call void @llvm.va_end(ptr %[[ENDAD]])
- // LIN32: call void @llvm.va_end(ptr %[[ARGS]])
- // WIN: call void @llvm.va_end(ptr %[[ARGS]])
+ // LIN64: call void @llvm.va_end.p0(ptr %[[ENDAD]])
+ // LIN32: call void @llvm.va_end.p0(ptr %[[ARGS]])
+ // WIN: call void @llvm.va_end.p0(ptr %[[ARGS]])
}
void typeid_tests() {
// LIN: define{{.*}} void @_Z12typeid_testsv()
diff --git a/clang/test/CodeGenCXX/ibm128-declarations.cpp b/clang/test/CodeGenCXX/ibm128-declarations.cpp
index 5ee4f354d379..e0187e20cde4 100644
--- a/clang/test/CodeGenCXX/ibm128-declarations.cpp
+++ b/clang/test/CodeGenCXX/ibm128-declarations.cpp
@@ -107,13 +107,13 @@ int main(void) {
// CHECK: define dso_local noundef ppc_fp128 @_Z10func_vaargiz(i32 noundef signext %n, ...)
// CHECK: entry:
// CHECK: store i32 %n, ptr %n.addr, align 4
-// CHECK: call void @llvm.va_start(ptr %ap)
+// CHECK: call void @llvm.va_start.p0(ptr %ap)
// CHECK: %argp.cur = load ptr, ptr %ap, align 8
// CHECK: %argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 16
// CHECK: store ptr %argp.next, ptr %ap, align 8
// CHECK: %0 = load ppc_fp128, ptr %argp.cur, align 8
// CHECK: store ppc_fp128 %0, ptr %r, align 16
-// CHECK: call void @llvm.va_end(ptr %ap)
+// CHECK: call void @llvm.va_end.p0(ptr %ap)
// CHECK: %1 = load ppc_fp128, ptr %r, align 16
// CHECK: ret ppc_fp128 %1
// CHECK: }
diff --git a/clang/test/CodeGenCXX/mangle-ms-back-references.cpp b/clang/test/CodeGenCXX/mangle-ms-back-references.cpp
index b27a9c5acacb..8707bff95340 100644
--- a/clang/test/CodeGenCXX/mangle-ms-back-references.cpp
+++ b/clang/test/CodeGenCXX/mangle-ms-back-references.cpp
@@ -1,5 +1,18 @@
// RUN: %clang_cc1 -fms-extensions -fblocks -emit-llvm %s -o - -triple=i386-pc-win32 | FileCheck %s
+namespace NS {
+// The name "RT1" for the name of the class below has been specifically
+// chosen to ensure that back reference lookup does not match against the
+// implicitly generated "$RT1" name of the reference temporary symbol.
+struct RT1 {
+ static const RT1& singleton;
+ int i;
+};
+const RT1& RT1::singleton = RT1{1};
+}
+// CHECK: "?$RT1@singleton@RT1@NS@@2ABU23@B"
+// CHECK: "?singleton@RT1@NS@@2ABU12@B"
+
void f1(const char* a, const char* b) {}
// CHECK: "?f1@@YAXPBD0@Z"
diff --git a/clang/test/CodeGenCXX/x86_64-vaarg.cpp b/clang/test/CodeGenCXX/x86_64-vaarg.cpp
index f0177906a09a..985a0cc41a14 100644
--- a/clang/test/CodeGenCXX/x86_64-vaarg.cpp
+++ b/clang/test/CodeGenCXX/x86_64-vaarg.cpp
@@ -1,10 +1,9 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -x c -o - %s | FileCheck %s
typedef struct { struct {} a; } empty;
-// CHECK-LABEL: @{{.*}}empty_record_test
+// CHECK-LABEL: @_Z17empty_record_testiz(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32, align 4
@@ -12,12 +11,57 @@ typedef struct { struct {} a; } empty;
// CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_EMPTY]], align 1
// CHECK-NEXT: store i32 [[Z:%.*]], ptr [[Z_ADDR]], align 4
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.va_start(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
-// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[RETVAL]], ptr align 1 [[TMP]], i64 {{.*}}, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[RETVAL]], ptr align 1 [[TMP]], i64 1, i1 false)
// CHECK-NEXT: ret void
+//
empty empty_record_test(int z, ...) {
__builtin_va_list list;
__builtin_va_start(list, z);
return __builtin_va_arg(list, empty);
}
+
+typedef struct {
+ struct{} a;
+ double b;
+} s1;
+
+// CHECK-LABEL: @_Z1fiz(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_S1:%.*]], align 8
+// CHECK-NEXT: [[Z_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[LIST:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+// CHECK-NEXT: store i32 [[Z:%.*]], ptr [[Z_ADDR]], align 4
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[ARRAYDECAY]])
+// CHECK-NEXT: [[ARRAYDECAY1:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], ptr [[LIST]], i64 0, i64 0
+// CHECK-NEXT: [[FP_OFFSET_P:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG:%.*]], ptr [[ARRAYDECAY1]], i32 0, i32 1
+// CHECK-NEXT: [[FP_OFFSET:%.*]] = load i32, ptr [[FP_OFFSET_P]], align 4
+// CHECK-NEXT: [[FITS_IN_FP:%.*]] = icmp ule i32 [[FP_OFFSET]], 160
+// CHECK-NEXT: br i1 [[FITS_IN_FP]], label [[VAARG_IN_REG:%.*]], label [[VAARG_IN_MEM:%.*]]
+// CHECK: vaarg.in_reg:
+// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG]], ptr [[ARRAYDECAY1]], i32 0, i32 3
+// CHECK-NEXT: [[REG_SAVE_AREA:%.*]] = load ptr, ptr [[TMP0]], align 16
+// CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[REG_SAVE_AREA]], i32 [[FP_OFFSET]]
+// CHECK-NEXT: [[TMP2:%.*]] = add i32 [[FP_OFFSET]], 16
+// CHECK-NEXT: store i32 [[TMP2]], ptr [[FP_OFFSET_P]], align 4
+// CHECK-NEXT: br label [[VAARG_END:%.*]]
+// CHECK: vaarg.in_mem:
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA_P:%.*]] = getelementptr inbounds [[STRUCT___VA_LIST_TAG]], ptr [[ARRAYDECAY1]], i32 0, i32 2
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA:%.*]] = load ptr, ptr [[OVERFLOW_ARG_AREA_P]], align 8
+// CHECK-NEXT: [[OVERFLOW_ARG_AREA_NEXT:%.*]] = getelementptr i8, ptr [[OVERFLOW_ARG_AREA]], i32 16
+// CHECK-NEXT: store ptr [[OVERFLOW_ARG_AREA_NEXT]], ptr [[OVERFLOW_ARG_AREA_P]], align 8
+// CHECK-NEXT: br label [[VAARG_END]]
+// CHECK: vaarg.end:
+// CHECK-NEXT: [[VAARG_ADDR:%.*]] = phi ptr [ [[TMP1]], [[VAARG_IN_REG]] ], [ [[OVERFLOW_ARG_AREA]], [[VAARG_IN_MEM]] ]
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RETVAL]], ptr align 8 [[VAARG_ADDR]], i64 16, i1 false)
+// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[RETVAL]], i64 8
+// CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[TMP3]], align 8
+// CHECK-NEXT: ret double [[TMP4]]
+//
+s1 f(int z, ...) {
+ __builtin_va_list list;
+ __builtin_va_start(list, z);
+ return __builtin_va_arg(list, s1);
+}
diff --git a/clang/test/CodeGenHLSL/builtins/RWBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/builtins/RWBuffer-elementtype.hlsl
index 87002ccd462d..036c9c28ef27 100644
--- a/clang/test/CodeGenHLSL/builtins/RWBuffer-elementtype.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/RWBuffer-elementtype.hlsl
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.2-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s
RWBuffer<int16_t> BufI16;
RWBuffer<uint16_t> BufU16;
diff --git a/clang/test/CodeGenHLSL/builtins/bitreverse.hlsl b/clang/test/CodeGenHLSL/builtins/bitreverse.hlsl
deleted file mode 100644
index e7609a2b61e2..000000000000
--- a/clang/test/CodeGenHLSL/builtins/bitreverse.hlsl
+++ /dev/null
@@ -1,155 +0,0 @@
-// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
-// RUN: dxil-pc-shadermodel6.3-library %s -D__HLSL_ENABLE_16_BIT \
-// RUN: -emit-llvm -disable-llvm-passes -O3 -o - | FileCheck %s
-
-#ifdef __HLSL_ENABLE_16_BIT
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.bitreverse.i16(
-int16_t test_bitreverse_short(int16_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.bitreverse.v2i16(
-int16_t2 test_bitreverse_short2(int16_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.bitreverse.v3i16
-int16_t3 test_bitreverse_short3(int16_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.bitreverse.v4i16
-int16_t4 test_bitreverse_short4(int16_t4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.bitreverse.i16(
-uint16_t test_bitreverse_ushort(uint16_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.bitreverse.v2i16
-uint16_t2 test_bitreverse_ushort2(uint16_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.bitreverse.v3i16
-uint16_t3 test_bitreverse_ushort3(uint16_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.bitreverse.v4i16
-uint16_t4 test_bitreverse_ushort4(uint16_t4 p0)
-{
- return reversebits(p0);
-}
-#endif
-
-// CHECK: define noundef i32 @
-// CHECK: call i32 @llvm.bitreverse.i32(
-int test_bitreverse_int(int p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i32> @
-// CHECK: call <2 x i32> @llvm.bitreverse.v2i32
-int2 test_bitreverse_int2(int2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i32> @
-// CHECK: call <3 x i32> @llvm.bitreverse.v3i32
-int3 test_bitreverse_int3(int3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i32> @
-// CHECK: call <4 x i32> @llvm.bitreverse.v4i32
-int4 test_bitreverse_int4(int4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i32 @
-// CHECK: call i32 @llvm.bitreverse.i32(
-int test_bitreverse_uint(uint p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i32> @
-// CHECK: call <2 x i32> @llvm.bitreverse.v2i32
-uint2 test_bitreverse_uint2(uint2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i32> @
-// CHECK: call <3 x i32> @llvm.bitreverse.v3i32
-uint3 test_bitreverse_uint3(uint3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i32> @
-// CHECK: call <4 x i32> @llvm.bitreverse.v4i32
-uint4 test_bitreverse_uint4(uint4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i64 @
-// CHECK: call i64 @llvm.bitreverse.i64(
-int64_t test_bitreverse_long(int64_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i64> @
-// CHECK: call <2 x i64> @llvm.bitreverse.v2i64
-int64_t2 test_bitreverse_long2(int64_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i64> @
-// CHECK: call <3 x i64> @llvm.bitreverse.v3i64
-int64_t3 test_bitreverse_long3(int64_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i64> @
-// CHECK: call <4 x i64> @llvm.bitreverse.v4i64
-int64_t4 test_bitreverse_long4(int64_t4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i64 @
-// CHECK: call i64 @llvm.bitreverse.i64(
-uint64_t test_bitreverse_long(uint64_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i64> @
-// CHECK: call <2 x i64> @llvm.bitreverse.v2i64
-uint64_t2 test_bitreverse_long2(uint64_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i64> @
-// CHECK: call <3 x i64> @llvm.bitreverse.v3i64
-uint64_t3 test_bitreverse_long3(uint64_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i64> @
-// CHECK: call <4 x i64> @llvm.bitreverse.v4i64
-uint64_t4 test_bitreverse_long4(uint64_t4 p0)
-{
- return reversebits(p0);
-}
diff --git a/clang/test/CodeGenHLSL/builtins/ceil.hlsl b/clang/test/CodeGenHLSL/builtins/ceil.hlsl
index 06d0d4c2cf54..be7725cd4d66 100644
--- a/clang/test/CodeGenHLSL/builtins/ceil.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/ceil.hlsl
@@ -41,16 +41,3 @@ float3 test_ceil_float3(float3 p0) { return ceil(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.ceil.v4f32(
float4 test_ceil_float4(float4 p0) { return ceil(p0); }
-
-// CHECK: define noundef double @
-// CHECK: call double @llvm.ceil.f64(
-double test_ceil_double(double p0) { return ceil(p0); }
-// CHECK: define noundef <2 x double> @
-// CHECK: call <2 x double> @llvm.ceil.v2f64(
-double2 test_ceil_double2(double2 p0) { return ceil(p0); }
-// CHECK: define noundef <3 x double> @
-// CHECK: call <3 x double> @llvm.ceil.v3f64(
-double3 test_ceil_double3(double3 p0) { return ceil(p0); }
-// CHECK: define noundef <4 x double> @
-// CHECK: call <4 x double> @llvm.ceil.v4f64(
-double4 test_ceil_double4(double4 p0) { return ceil(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/dot.hlsl b/clang/test/CodeGenHLSL/builtins/dot.hlsl
index 0f993193c00c..307d71cce3cb 100644
--- a/clang/test/CodeGenHLSL/builtins/dot.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/dot.hlsl
@@ -110,21 +110,21 @@ uint64_t test_dot_ulong4(uint64_t4 p0, uint64_t4 p1) { return dot(p0, p1); }
// NO_HALF: ret float %dx.dot
half test_dot_half(half p0, half p1) { return dot(p0, p1); }
-// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v2f16(<2 x half> %0, <2 x half> %1)
+// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot2.v2f16(<2 x half> %0, <2 x half> %1)
// NATIVE_HALF: ret half %dx.dot
-// NO_HALF: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %1)
+// NO_HALF: %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %0, <2 x float> %1)
// NO_HALF: ret float %dx.dot
half test_dot_half2(half2 p0, half2 p1) { return dot(p0, p1); }
-// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v3f16(<3 x half> %0, <3 x half> %1)
+// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot3.v3f16(<3 x half> %0, <3 x half> %1)
// NATIVE_HALF: ret half %dx.dot
-// NO_HALF: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %1)
+// NO_HALF: %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %0, <3 x float> %1)
// NO_HALF: ret float %dx.dot
half test_dot_half3(half3 p0, half3 p1) { return dot(p0, p1); }
-// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot.v4f16(<4 x half> %0, <4 x half> %1)
+// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot4.v4f16(<4 x half> %0, <4 x half> %1)
// NATIVE_HALF: ret half %dx.dot
-// NO_HALF: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %0, <4 x float> %1)
+// NO_HALF: %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %0, <4 x float> %1)
// NO_HALF: ret float %dx.dot
half test_dot_half4(half4 p0, half4 p1) { return dot(p0, p1); }
@@ -132,34 +132,34 @@ half test_dot_half4(half4 p0, half4 p1) { return dot(p0, p1); }
// CHECK: ret float %dx.dot
float test_dot_float(float p0, float p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %0, <2 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float2(float2 p0, float2 p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %0, <3 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float3(float3 p0, float3 p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %0, <4 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %0, <4 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float4(float4 p0, float4 p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %splat.splat, <2 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %splat.splat, <2 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float2_splat(float p0, float2 p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %splat.splat, <3 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %splat.splat, <3 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float3_splat(float p0, float3 p1) { return dot(p0, p1); }
-// CHECK: %dx.dot = call float @llvm.dx.dot.v4f32(<4 x float> %splat.splat, <4 x float> %1)
+// CHECK: %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %splat.splat, <4 x float> %1)
// CHECK: ret float %dx.dot
float test_dot_float4_splat(float p0, float4 p1) { return dot(p0, p1); }
// CHECK: %conv = sitofp i32 %1 to float
// CHECK: %splat.splatinsert = insertelement <2 x float> poison, float %conv, i64 0
// CHECK: %splat.splat = shufflevector <2 x float> %splat.splatinsert, <2 x float> poison, <2 x i32> zeroinitializer
-// CHECK: %dx.dot = call float @llvm.dx.dot.v2f32(<2 x float> %0, <2 x float> %splat.splat)
+// CHECK: %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %0, <2 x float> %splat.splat)
// CHECK: ret float %dx.dot
float test_builtin_dot_float2_int_splat(float2 p0, int p1) {
return dot(p0, p1);
@@ -168,7 +168,7 @@ float test_builtin_dot_float2_int_splat(float2 p0, int p1) {
// CHECK: %conv = sitofp i32 %1 to float
// CHECK: %splat.splatinsert = insertelement <3 x float> poison, float %conv, i64 0
// CHECK: %splat.splat = shufflevector <3 x float> %splat.splatinsert, <3 x float> poison, <3 x i32> zeroinitializer
-// CHECK: %dx.dot = call float @llvm.dx.dot.v3f32(<3 x float> %0, <3 x float> %splat.splat)
+// CHECK: %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %0, <3 x float> %splat.splat)
// CHECK: ret float %dx.dot
float test_builtin_dot_float3_int_splat(float3 p0, int p1) {
return dot(p0, p1);
diff --git a/clang/test/CodeGenHLSL/builtins/floor.hlsl b/clang/test/CodeGenHLSL/builtins/floor.hlsl
index d2a2f6e52f1e..07803bfae3be 100644
--- a/clang/test/CodeGenHLSL/builtins/floor.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/floor.hlsl
@@ -41,16 +41,3 @@ float3 test_floor_float3(float3 p0) { return floor(p0); }
// CHECK: define noundef <4 x float> @
// CHECK: call <4 x float> @llvm.floor.v4f32(
float4 test_floor_float4(float4 p0) { return floor(p0); }
-
-// CHECK: define noundef double @
-// CHECK: call double @llvm.floor.f64(
-double test_floor_double(double p0) { return floor(p0); }
-// CHECK: define noundef <2 x double> @
-// CHECK: call <2 x double> @llvm.floor.v2f64(
-double2 test_floor_double2(double2 p0) { return floor(p0); }
-// CHECK: define noundef <3 x double> @
-// CHECK: call <3 x double> @llvm.floor.v3f64(
-double3 test_floor_double3(double3 p0) { return floor(p0); }
-// CHECK: define noundef <4 x double> @
-// CHECK: call <4 x double> @llvm.floor.v4f64(
-double4 test_floor_double4(double4 p0) { return floor(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/pow.hlsl b/clang/test/CodeGenHLSL/builtins/pow.hlsl
index e996ca2f3364..057cd7215aa5 100644
--- a/clang/test/CodeGenHLSL/builtins/pow.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/pow.hlsl
@@ -39,16 +39,3 @@ float3 test_pow_float3(float3 p0, float3 p1) { return pow(p0, p1); }
// CHECK: define noundef <4 x float> @"?test_pow_float4
// CHECK: call <4 x float> @llvm.pow.v4f32
float4 test_pow_float4(float4 p0, float4 p1) { return pow(p0, p1); }
-
-// CHECK: define noundef double @"?test_pow_double@@YANNN@Z"(
-// CHECK: call double @llvm.pow.f64(
-double test_pow_double(double p0, double p1) { return pow(p0, p1); }
-// CHECK: define noundef <2 x double> @"?test_pow_double2@@YAT?$__vector@N$01@__clang@@T12@0@Z"(
-// CHECK: call <2 x double> @llvm.pow.v2f64
-double2 test_pow_double2(double2 p0, double2 p1) { return pow(p0, p1); }
-// CHECK: define noundef <3 x double> @"?test_pow_double3@@YAT?$__vector@N$02@__clang@@T12@0@Z"(
-// CHECK: call <3 x double> @llvm.pow.v3f64
-double3 test_pow_double3(double3 p0, double3 p1) { return pow(p0, p1); }
-// CHECK: define noundef <4 x double> @"?test_pow_double4@@YAT?$__vector@N$03@__clang@@T12@0@Z"(
-// CHECK: call <4 x double> @llvm.pow.v4f64
-double4 test_pow_double4(double4 p0, double4 p1) { return pow(p0, p1); }
diff --git a/clang/test/CodeGenHLSL/builtins/reversebits.hlsl b/clang/test/CodeGenHLSL/builtins/reversebits.hlsl
index 6da7d289f82e..a319417e97a4 100644
--- a/clang/test/CodeGenHLSL/builtins/reversebits.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/reversebits.hlsl
@@ -5,31 +5,6 @@
#ifdef __HLSL_ENABLE_16_BIT
// CHECK: define noundef i16 @
// CHECK: call i16 @llvm.bitreverse.i16(
-int16_t test_bitreverse_short(int16_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i16> @
-// CHECK: call <2 x i16> @llvm.bitreverse.v2i16(
-int16_t2 test_bitreverse_short2(int16_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i16> @
-// CHECK: call <3 x i16> @llvm.bitreverse.v3i16
-int16_t3 test_bitreverse_short3(int16_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i16> @
-// CHECK: call <4 x i16> @llvm.bitreverse.v4i16
-int16_t4 test_bitreverse_short4(int16_t4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i16 @
-// CHECK: call i16 @llvm.bitreverse.i16(
uint16_t test_bitreverse_ushort(uint16_t p0)
{
return reversebits(p0);
@@ -56,31 +31,6 @@ uint16_t4 test_bitreverse_ushort4(uint16_t4 p0)
// CHECK: define noundef i32 @
// CHECK: call i32 @llvm.bitreverse.i32(
-int test_bitreverse_int(int p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i32> @
-// CHECK: call <2 x i32> @llvm.bitreverse.v2i32
-int2 test_bitreverse_int2(int2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i32> @
-// CHECK: call <3 x i32> @llvm.bitreverse.v3i32
-int3 test_bitreverse_int3(int3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i32> @
-// CHECK: call <4 x i32> @llvm.bitreverse.v4i32
-int4 test_bitreverse_int4(int4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i32 @
-// CHECK: call i32 @llvm.bitreverse.i32(
int test_bitreverse_uint(uint p0)
{
return reversebits(p0);
@@ -106,31 +56,6 @@ uint4 test_bitreverse_uint4(uint4 p0)
// CHECK: define noundef i64 @
// CHECK: call i64 @llvm.bitreverse.i64(
-int64_t test_bitreverse_long(int64_t p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <2 x i64> @
-// CHECK: call <2 x i64> @llvm.bitreverse.v2i64
-int64_t2 test_bitreverse_long2(int64_t2 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <3 x i64> @
-// CHECK: call <3 x i64> @llvm.bitreverse.v3i64
-int64_t3 test_bitreverse_long3(int64_t3 p0)
-{
- return reversebits(p0);
-}
-// CHECK: define noundef <4 x i64> @
-// CHECK: call <4 x i64> @llvm.bitreverse.v4i64
-int64_t4 test_bitreverse_long4(int64_t4 p0)
-{
- return reversebits(p0);
-}
-
-// CHECK: define noundef i64 @
-// CHECK: call i64 @llvm.bitreverse.i64(
uint64_t test_bitreverse_long(uint64_t p0)
{
return reversebits(p0);
diff --git a/clang/test/CodeGenHLSL/builtins/round.hlsl b/clang/test/CodeGenHLSL/builtins/round.hlsl
index b9f35bd3712d..33d761dbdfbe 100644
--- a/clang/test/CodeGenHLSL/builtins/round.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/round.hlsl
@@ -7,47 +7,47 @@
// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
// NATIVE_HALF: define noundef half @
-// NATIVE_HALF: %elt.round = call half @llvm.round.f16(
-// NATIVE_HALF: ret half %elt.round
+// NATIVE_HALF: %elt.roundeven = call half @llvm.roundeven.f16(
+// NATIVE_HALF: ret half %elt.roundeven
// NO_HALF: define noundef float @"?test_round_half@@YA$halff@$halff@@Z"(
-// NO_HALF: %elt.round = call float @llvm.round.f32(
-// NO_HALF: ret float %elt.round
+// NO_HALF: %elt.roundeven = call float @llvm.roundeven.f32(
+// NO_HALF: ret float %elt.roundeven
half test_round_half(half p0) { return round(p0); }
// NATIVE_HALF: define noundef <2 x half> @
-// NATIVE_HALF: %elt.round = call <2 x half> @llvm.round.v2f16
-// NATIVE_HALF: ret <2 x half> %elt.round
+// NATIVE_HALF: %elt.roundeven = call <2 x half> @llvm.roundeven.v2f16
+// NATIVE_HALF: ret <2 x half> %elt.roundeven
// NO_HALF: define noundef <2 x float> @
-// NO_HALF: %elt.round = call <2 x float> @llvm.round.v2f32(
-// NO_HALF: ret <2 x float> %elt.round
+// NO_HALF: %elt.roundeven = call <2 x float> @llvm.roundeven.v2f32(
+// NO_HALF: ret <2 x float> %elt.roundeven
half2 test_round_half2(half2 p0) { return round(p0); }
// NATIVE_HALF: define noundef <3 x half> @
-// NATIVE_HALF: %elt.round = call <3 x half> @llvm.round.v3f16
-// NATIVE_HALF: ret <3 x half> %elt.round
+// NATIVE_HALF: %elt.roundeven = call <3 x half> @llvm.roundeven.v3f16
+// NATIVE_HALF: ret <3 x half> %elt.roundeven
// NO_HALF: define noundef <3 x float> @
-// NO_HALF: %elt.round = call <3 x float> @llvm.round.v3f32(
-// NO_HALF: ret <3 x float> %elt.round
+// NO_HALF: %elt.roundeven = call <3 x float> @llvm.roundeven.v3f32(
+// NO_HALF: ret <3 x float> %elt.roundeven
half3 test_round_half3(half3 p0) { return round(p0); }
// NATIVE_HALF: define noundef <4 x half> @
-// NATIVE_HALF: %elt.round = call <4 x half> @llvm.round.v4f16
-// NATIVE_HALF: ret <4 x half> %elt.round
+// NATIVE_HALF: %elt.roundeven = call <4 x half> @llvm.roundeven.v4f16
+// NATIVE_HALF: ret <4 x half> %elt.roundeven
// NO_HALF: define noundef <4 x float> @
-// NO_HALF: %elt.round = call <4 x float> @llvm.round.v4f32(
-// NO_HALF: ret <4 x float> %elt.round
+// NO_HALF: %elt.roundeven = call <4 x float> @llvm.roundeven.v4f32(
+// NO_HALF: ret <4 x float> %elt.roundeven
half4 test_round_half4(half4 p0) { return round(p0); }
// CHECK: define noundef float @
-// CHECK: %elt.round = call float @llvm.round.f32(
-// CHECK: ret float %elt.round
+// CHECK: %elt.roundeven = call float @llvm.roundeven.f32(
+// CHECK: ret float %elt.roundeven
float test_round_float(float p0) { return round(p0); }
// CHECK: define noundef <2 x float> @
-// CHECK: %elt.round = call <2 x float> @llvm.round.v2f32
-// CHECK: ret <2 x float> %elt.round
+// CHECK: %elt.roundeven = call <2 x float> @llvm.roundeven.v2f32
+// CHECK: ret <2 x float> %elt.roundeven
float2 test_round_float2(float2 p0) { return round(p0); }
// CHECK: define noundef <3 x float> @
-// CHECK: %elt.round = call <3 x float> @llvm.round.v3f32
-// CHECK: ret <3 x float> %elt.round
+// CHECK: %elt.roundeven = call <3 x float> @llvm.roundeven.v3f32
+// CHECK: ret <3 x float> %elt.roundeven
float3 test_round_float3(float3 p0) { return round(p0); }
// CHECK: define noundef <4 x float> @
-// CHECK: %elt.round = call <4 x float> @llvm.round.v4f32
-// CHECK: ret <4 x float> %elt.round
+// CHECK: %elt.roundeven = call <4 x float> @llvm.roundeven.v4f32
+// CHECK: ret <4 x float> %elt.roundeven
float4 test_round_float4(float4 p0) { return round(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/sqrt.hlsl b/clang/test/CodeGenHLSL/builtins/sqrt.hlsl
index 2c2a09617cf8..adbbf69a8e06 100644
--- a/clang/test/CodeGenHLSL/builtins/sqrt.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/sqrt.hlsl
@@ -1,29 +1,53 @@
-// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
-// RUN: dxil-pc-shadermodel6.2-library %s -fnative-half-type \
-// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF
-using hlsl::sqrt;
+// NATIVE_HALF: define noundef half @
+// NATIVE_HALF: %{{.*}} = call half @llvm.sqrt.f16(
+// NATIVE_HALF: ret half %{{.*}}
+// NO_HALF: define noundef float @"?test_sqrt_half@@YA$halff@$halff@@Z"(
+// NO_HALF: %{{.*}} = call float @llvm.sqrt.f32(
+// NO_HALF: ret float %{{.*}}
+half test_sqrt_half(half p0) { return sqrt(p0); }
+// NATIVE_HALF: define noundef <2 x half> @
+// NATIVE_HALF: %{{.*}} = call <2 x half> @llvm.sqrt.v2f16
+// NATIVE_HALF: ret <2 x half> %{{.*}}
+// NO_HALF: define noundef <2 x float> @
+// NO_HALF: %{{.*}} = call <2 x float> @llvm.sqrt.v2f32(
+// NO_HALF: ret <2 x float> %{{.*}}
+half2 test_sqrt_half2(half2 p0) { return sqrt(p0); }
+// NATIVE_HALF: define noundef <3 x half> @
+// NATIVE_HALF: %{{.*}} = call <3 x half> @llvm.sqrt.v3f16
+// NATIVE_HALF: ret <3 x half> %{{.*}}
+// NO_HALF: define noundef <3 x float> @
+// NO_HALF: %{{.*}} = call <3 x float> @llvm.sqrt.v3f32(
+// NO_HALF: ret <3 x float> %{{.*}}
+half3 test_sqrt_half3(half3 p0) { return sqrt(p0); }
+// NATIVE_HALF: define noundef <4 x half> @
+// NATIVE_HALF: %{{.*}} = call <4 x half> @llvm.sqrt.v4f16
+// NATIVE_HALF: ret <4 x half> %{{.*}}
+// NO_HALF: define noundef <4 x float> @
+// NO_HALF: %{{.*}} = call <4 x float> @llvm.sqrt.v4f32(
+// NO_HALF: ret <4 x float> %{{.*}}
+half4 test_sqrt_half4(half4 p0) { return sqrt(p0); }
-double sqrt_d(double x)
-{
- return sqrt(x);
-}
-
-// CHECK: define noundef double @"?sqrt_d@@YANN@Z"(
-// CHECK: call double @llvm.sqrt.f64(double %0)
-
-float sqrt_f(float x)
-{
- return sqrt(x);
-}
-
-// CHECK: define noundef float @"?sqrt_f@@YAMM@Z"(
-// CHECK: call float @llvm.sqrt.f32(float %0)
-
-half sqrt_h(half x)
-{
- return sqrt(x);
-}
-
-// CHECK: define noundef half @"?sqrt_h@@YA$f16@$f16@@Z"(
-// CHECK: call half @llvm.sqrt.f16(half %0)
+// CHECK: define noundef float @
+// CHECK: %{{.*}} = call float @llvm.sqrt.f32(
+// CHECK: ret float %{{.*}}
+float test_sqrt_float(float p0) { return sqrt(p0); }
+// CHECK: define noundef <2 x float> @
+// CHECK: %{{.*}} = call <2 x float> @llvm.sqrt.v2f32
+// CHECK: ret <2 x float> %{{.*}}
+float2 test_sqrt_float2(float2 p0) { return sqrt(p0); }
+// CHECK: define noundef <3 x float> @
+// CHECK: %{{.*}} = call <3 x float> @llvm.sqrt.v3f32
+// CHECK: ret <3 x float> %{{.*}}
+float3 test_sqrt_float3(float3 p0) { return sqrt(p0); }
+// CHECK: define noundef <4 x float> @
+// CHECK: %{{.*}} = call <4 x float> @llvm.sqrt.v4f32
+// CHECK: ret <4 x float> %{{.*}}
+float4 test_sqrt_float4(float4 p0) { return sqrt(p0); }
diff --git a/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_do_while.hlsl b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_do_while.hlsl
new file mode 100644
index 000000000000..9481b0d60a27
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_do_while.hlsl
@@ -0,0 +1,40 @@
+// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-pc-vulkan-library %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+
+// CHECK: define spir_func void @main() [[A0:#[0-9]+]] {
+void main() {
+// CHECK: entry:
+// CHECK: %[[CT_ENTRY:[0-9]+]] = call token @llvm.experimental.convergence.entry()
+// CHECK: br label %[[LABEL_WHILE_COND:.+]]
+ int cond = 0;
+
+// CHECK: [[LABEL_WHILE_COND]]:
+// CHECK: %[[CT_LOOP:[0-9]+]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %[[CT_ENTRY]]) ]
+// CHECK: br label %[[LABEL_WHILE_BODY:.+]]
+ while (true) {
+
+// CHECK: [[LABEL_WHILE_BODY]]:
+// CHECK: br i1 {{%.+}}, label %[[LABEL_IF_THEN:.+]], label %[[LABEL_IF_END:.+]]
+
+// CHECK: [[LABEL_IF_THEN]]:
+// CHECK: call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %[[CT_LOOP]]) ]
+// CHECK: br label %[[LABEL_WHILE_END:.+]]
+ if (cond == 2) {
+ uint index = WaveGetLaneIndex();
+ break;
+ }
+
+// CHECK: [[LABEL_IF_END]]:
+// CHECK: br label %[[LABEL_WHILE_COND]]
+ cond++;
+ }
+
+// CHECK: [[LABEL_WHILE_END]]:
+// CHECK: ret void
+}
+
+// CHECK-DAG: declare i32 @__hlsl_wave_get_lane_index() [[A1:#[0-9]+]]
+
+// CHECK-DAG: attributes [[A0]] = {{{.*}}convergent{{.*}}}
+// CHECK-DAG: attributes [[A1]] = {{{.*}}convergent{{.*}}}
+
diff --git a/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_simple.hlsl b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_simple.hlsl
new file mode 100644
index 000000000000..8f52d81091c1
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_simple.hlsl
@@ -0,0 +1,14 @@
+// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-pc-vulkan-library %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+
+// CHECK: define spir_func noundef i32 @_Z6test_1v() [[A0:#[0-9]+]] {
+// CHECK: %[[CI:[0-9]+]] = call token @llvm.experimental.convergence.entry()
+// CHECK: call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %[[CI]]) ]
+uint test_1() {
+ return WaveGetLaneIndex();
+}
+
+// CHECK: declare i32 @__hlsl_wave_get_lane_index() [[A1:#[0-9]+]]
+
+// CHECK-DAG: attributes [[A0]] = { {{.*}}convergent{{.*}} }
+// CHECK-DAG: attributes [[A1]] = { {{.*}}convergent{{.*}} }
diff --git a/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_subcall.hlsl b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_subcall.hlsl
new file mode 100644
index 000000000000..379c8f118f52
--- /dev/null
+++ b/clang/test/CodeGenHLSL/builtins/wave_get_lane_index_subcall.hlsl
@@ -0,0 +1,21 @@
+// RUN: %clang_cc1 -std=hlsl2021 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-pc-vulkan-library %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+
+// CHECK: define spir_func noundef i32 @_Z6test_1v() [[A0:#[0-9]+]] {
+// CHECK: %[[C1:[0-9]+]] = call token @llvm.experimental.convergence.entry()
+// CHECK: call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %[[C1]]) ]
+uint test_1() {
+ return WaveGetLaneIndex();
+}
+
+// CHECK-DAG: declare i32 @__hlsl_wave_get_lane_index() [[A1:#[0-9]+]]
+
+// CHECK: define spir_func noundef i32 @_Z6test_2v() [[A0]] {
+// CHECK: %[[C2:[0-9]+]] = call token @llvm.experimental.convergence.entry()
+// CHECK: call spir_func noundef i32 @_Z6test_1v() [ "convergencectrl"(token %[[C2]]) ]
+uint test_2() {
+ return test_1();
+}
+
+// CHECK-DAG: attributes [[A0]] = {{{.*}}convergent{{.*}}}
+// CHECK-DAG: attributes [[A1]] = {{{.*}}convergent{{.*}}}
diff --git a/clang/test/CodeGenOpenCL/amdgpu-printf.cl b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
index 6c84485b66b4..edf6dbf8657c 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-printf.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-printf.cl
@@ -30,14 +30,7 @@ __kernel void test_printf_int(int i) {
// CHECK-NEXT: [[S:%.*]] = alloca [4 x i8], align 1, addrspace(5)
// CHECK-NEXT: store i32 [[I:%.*]], ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA8]]
// CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[S]]) #[[ATTR5:[0-9]+]]
-// CHECK-NEXT: [[LOC0:%.*]] = getelementptr i8, ptr addrspace(5) [[S]], i64 0
-// CHECK-NEXT: store i8 102, ptr addrspace(5) [[LOC0]], align 1
-// CHECK-NEXT: [[LOC1:%.*]] = getelementptr i8, ptr addrspace(5) [[S]], i64 1
-// CHECK-NEXT: store i8 111, ptr addrspace(5) [[LOC1]], align 1
-// CHECK-NEXT: [[LOC2:%.*]] = getelementptr i8, ptr addrspace(5) [[S]], i64 2
-// CHECK-NEXT: store i8 111, ptr addrspace(5) [[LOC2]], align 1
-// CHECK-NEXT: [[LOC3:%.*]] = getelementptr i8, ptr addrspace(5) [[S]], i64 3
-// CHECK-NEXT: store i8 0, ptr addrspace(5) [[LOC3]], align 1
+// CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) align 1 [[S]], ptr addrspace(4) align 1 @__const.test_printf_str_int.s, i64 4, i1 false)
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr addrspace(5) [[S]], i64 0, i64 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(5) [[I_ADDR]], align 4, !tbaa [[TBAA8]]
// CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) noundef @.str.2, ptr addrspace(5) noundef [[ARRAYDECAY]], i32 noundef [[TMP2]]) #[[ATTR4]]
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx11-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx11-err.cl
index f7afb7cb97ed..1e78ab283486 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx11-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx11-err.cl
@@ -4,23 +4,14 @@
// REQUIRES: amdgpu-registered-target
typedef int v2i __attribute__((ext_vector_type(2)));
-typedef half v8h __attribute__((ext_vector_type(8)));
typedef short v8s __attribute__((ext_vector_type(8)));
-
-typedef half v4h __attribute__((ext_vector_type(4)));
typedef short v4s __attribute__((ext_vector_type(4)));
-
-
-void amdgcn_global_load_tr(global v2i* v2i_inptr, global v8s* v8s_inptr, global v8h* v8h_inptr,
- global int* int_inptr, global v4s* v4s_inptr, global v4h* v4h_inptr)
+void amdgcn_global_load_tr(global v2i* v2i_inptr, global v8s* v8s_inptr, global int* int_inptr, global v4s* v4s_inptr)
{
- v2i out_1 = __builtin_amdgcn_global_load_tr_v2i32(v2i_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v2i32' needs target feature gfx12-insts,wavefrontsize32}}
- v8s out_2 = __builtin_amdgcn_global_load_tr_v8i16(v8s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v8i16' needs target feature gfx12-insts,wavefrontsize32}}
- v8h out_3 = __builtin_amdgcn_global_load_tr_v8f16(v8h_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v8f16' needs target feature gfx12-insts,wavefrontsize32}}
+ v2i out_1 = __builtin_amdgcn_global_load_tr_b64_v2i32(v2i_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b64_v2i32' needs target feature gfx12-insts,wavefrontsize32}}
+ v8s out_2 = __builtin_amdgcn_global_load_tr_b128_v8i16(v8s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b128_v8i16' needs target feature gfx12-insts,wavefrontsize32}}
- int out_4 = __builtin_amdgcn_global_load_tr_i32(int_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_i32' needs target feature gfx12-insts,wavefrontsize64}}
- v4s out_5 = __builtin_amdgcn_global_load_tr_v4i16(v4s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v4i16' needs target feature gfx12-insts,wavefrontsize64}}
- v4h out_6 = __builtin_amdgcn_global_load_tr_v4f16(v4h_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v4f16' needs target feature gfx12-insts,wavefrontsize64}}
+ int out_3 = __builtin_amdgcn_global_load_tr_b64_i32(int_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b64_i32' needs target feature gfx12-insts,wavefrontsize64}}
+ v4s out_4 = __builtin_amdgcn_global_load_tr_b128_v4i16(v4s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b128_v4i16' needs target feature gfx12-insts,wavefrontsize64}}
}
-
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w32-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w32-err.cl
index 04ac0a66db7c..1acc4cd7adc9 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w32-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w32-err.cl
@@ -3,13 +3,10 @@
// REQUIRES: amdgpu-registered-target
-typedef half v4h __attribute__((ext_vector_type(4)));
typedef short v4s __attribute__((ext_vector_type(4)));
-void amdgcn_global_load_tr(global int* int_inptr, global v4s* v4s_inptr, global v4h* v4h_inptr)
+void amdgcn_global_load_tr(global int* int_inptr, global v4s* v4s_inptr)
{
- int out_4 = __builtin_amdgcn_global_load_tr_i32(int_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_i32' needs target feature gfx12-insts,wavefrontsize64}}
- v4s out_5 = __builtin_amdgcn_global_load_tr_v4i16(v4s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v4i16' needs target feature gfx12-insts,wavefrontsize64}}
- v4h out_6 = __builtin_amdgcn_global_load_tr_v4f16(v4h_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v4f16' needs target feature gfx12-insts,wavefrontsize64}}
+ int out_1 = __builtin_amdgcn_global_load_tr_b64_i32(int_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b64_i32' needs target feature gfx12-insts,wavefrontsize64}}
+ v4s out_2 = __builtin_amdgcn_global_load_tr_b128_v4i16(v4s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b128_v4i16' needs target feature gfx12-insts,wavefrontsize64}}
}
-
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w64-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w64-err.cl
index 113b54b853a9..96b0e4c3993a 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w64-err.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-gfx12-w64-err.cl
@@ -4,13 +4,10 @@
// REQUIRES: amdgpu-registered-target
typedef int v2i __attribute__((ext_vector_type(2)));
-typedef half v8h __attribute__((ext_vector_type(8)));
typedef short v8s __attribute__((ext_vector_type(8)));
-void amdgcn_global_load_tr(global v2i* v2i_inptr, global v8s* v8s_inptr, global v8h* v8h_inptr)
+void amdgcn_global_load_tr(global v2i* v2i_inptr, global v8s* v8s_inptr)
{
- v2i out_1 = __builtin_amdgcn_global_load_tr_v2i32(v2i_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v2i32' needs target feature gfx12-insts,wavefrontsize32}}
- v8s out_2 = __builtin_amdgcn_global_load_tr_v8i16(v8s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v8i16' needs target feature gfx12-insts,wavefrontsize32}}
- v8h out_3 = __builtin_amdgcn_global_load_tr_v8f16(v8h_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_v8f16' needs target feature gfx12-insts,wavefrontsize32}}
+ v2i out_1 = __builtin_amdgcn_global_load_tr_b64_v2i32(v2i_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b64_v2i32' needs target feature gfx12-insts,wavefrontsize32}}
+ v8s out_2 = __builtin_amdgcn_global_load_tr_b128_v8i16(v8s_inptr); // expected-error{{'__builtin_amdgcn_global_load_tr_b128_v8i16' needs target feature gfx12-insts,wavefrontsize32}}
}
-
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w32.cl
index b5fcad68a470..126d7d6fb7b0 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w32.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w32.cl
@@ -3,46 +3,24 @@
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1200 -target-feature +wavefrontsize32 -S -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-GFX1200
typedef int v2i __attribute__((ext_vector_type(2)));
-typedef half v8h __attribute__((ext_vector_type(8)));
typedef short v8s __attribute__((ext_vector_type(8)));
-// Wave32
-
-//
-// amdgcn_global_load_tr
-//
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_v2i32(
+// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_b64_v2i32(
// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1) [[INPTR:%.*]])
+// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]])
// CHECK-GFX1200-NEXT: ret <2 x i32> [[TMP0]]
//
-v2i test_amdgcn_global_load_tr_v2i32(global v2i* inptr)
+v2i test_amdgcn_global_load_tr_b64_v2i32(global v2i* inptr)
{
- return __builtin_amdgcn_global_load_tr_v2i32(inptr);
+ return __builtin_amdgcn_global_load_tr_b64_v2i32(inptr);
}
-//
-// amdgcn_global_load_tr
-//
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_v8i16(
+// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_b128_v8i16(
// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1) [[INPTR:%.*]])
+// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) [[INPTR:%.*]])
// CHECK-GFX1200-NEXT: ret <8 x i16> [[TMP0]]
//
-v8s test_amdgcn_global_load_tr_v8i16(global v8s* inptr)
+v8s test_amdgcn_global_load_tr_b128_v8i16(global v8s* inptr)
{
- return __builtin_amdgcn_global_load_tr_v8i16(inptr);
+ return __builtin_amdgcn_global_load_tr_b128_v8i16(inptr);
}
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_v8f16(
-// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1) [[INPTR:%.*]])
-// CHECK-GFX1200-NEXT: ret <8 x half> [[TMP0]]
-//
-v8h test_amdgcn_global_load_tr_v8f16(global v8h* inptr)
-{
- return __builtin_amdgcn_global_load_tr_v8f16(inptr);
-}
-
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w64.cl
index 9c48ac071b4d..7c70ccf73ad3 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w64.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-tr-w64.cl
@@ -2,46 +2,24 @@
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1200 -target-feature +wavefrontsize64 -S -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-GFX1200
-typedef half v4h __attribute__((ext_vector_type(4)));
typedef short v4s __attribute__((ext_vector_type(4)));
-// Wave64
-
-//
-// amdgcn_global_load_tr
-//
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_i32(
+// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_b64_i32(
// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1) [[INPTR:%.*]])
+// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1) [[INPTR:%.*]])
// CHECK-GFX1200-NEXT: ret i32 [[TMP0]]
//
-int test_amdgcn_global_load_tr_i32(global int* inptr)
+int test_amdgcn_global_load_tr_b64_i32(global int* inptr)
{
- return __builtin_amdgcn_global_load_tr_i32(inptr);
+ return __builtin_amdgcn_global_load_tr_b64_i32(inptr);
}
-//
-// amdgcn_global_load_tr
-//
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_v4i16(
+// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_b128_v4i16(
// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1) [[INPTR:%.*]])
+// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1) [[INPTR:%.*]])
// CHECK-GFX1200-NEXT: ret <4 x i16> [[TMP0]]
//
-v4s test_amdgcn_global_load_tr_v4i16(global v4s* inptr)
+v4s test_amdgcn_global_load_tr_b128_v4i16(global v4s* inptr)
{
- return __builtin_amdgcn_global_load_tr_v4i16(inptr);
+ return __builtin_amdgcn_global_load_tr_b128_v4i16(inptr);
}
-
-// CHECK-GFX1200-LABEL: @test_amdgcn_global_load_tr_v4f16(
-// CHECK-GFX1200-NEXT: entry:
-// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1) [[INPTR:%.*]])
-// CHECK-GFX1200-NEXT: ret <4 x half> [[TMP0]]
-//
-v4h test_amdgcn_global_load_tr_v4f16(global v4h* inptr)
-{
- return __builtin_amdgcn_global_load_tr_v4f16(inptr);
-}
-
diff --git a/clang/test/Driver/aarch64-ptrauth.c b/clang/test/Driver/aarch64-ptrauth.c
new file mode 100644
index 000000000000..1a69b2c6edfb
--- /dev/null
+++ b/clang/test/Driver/aarch64-ptrauth.c
@@ -0,0 +1,5 @@
+// RUN: %clang -### -c --target=aarch64 -fno-ptrauth-intrinsics -fptrauth-intrinsics %s 2>&1 | FileCheck %s --check-prefix=INTRIN
+// INTRIN: "-cc1"{{.*}} "-fptrauth-intrinsics"
+
+// RUN: not %clang -### -c --target=x86_64 -fptrauth-intrinsics %s 2>&1 | FileCheck %s --check-prefix=ERR
+// ERR: error: unsupported option '-fptrauth-intrinsics' for target '{{.*}}'
diff --git a/clang/test/Driver/aarch64-sve.c b/clang/test/Driver/aarch64-sve.c
index f34b2700deb9..4a33c2e3c8d3 100644
--- a/clang/test/Driver/aarch64-sve.c
+++ b/clang/test/Driver/aarch64-sve.c
@@ -6,12 +6,11 @@
// RUN: %clang --target=aarch64 -march=armv8.6a -### -c %s 2>&1 | FileCheck -check-prefix=GENERICV8A-NOSVE %s
// GENERICV8A-NOSVE-NOT: "-target-feature" "+sve"
-// The 32-bit floating point matrix multiply extension is enabled by default
-// for armv8.6-a targets (or later) with SVE, and can optionally be enabled for
-// any target from armv8.2a onwards (we don't enforce not using it with earlier
-// targets).
+// The 32-bit floating point matrix multiply extension is an optional feature
+// that can be used for any target from armv8.2a and onwards. This can be
+// enabled using the `+f32mm` option.`.
// RUN: %clang --target=aarch64 -march=armv8.6a -### -c %s 2>&1 | FileCheck -check-prefix=NO-F32MM %s
-// RUN: %clang --target=aarch64 -march=armv8.6a+sve -### -c %s 2>&1 | FileCheck -check-prefix=F32MM %s
+// RUN: %clang --target=aarch64 -march=armv8.6a+sve+f32mm -### -c %s 2>&1 | FileCheck -check-prefix=F32MM %s
// RUN: %clang --target=aarch64 -march=armv8.5a+f32mm -### -c %s 2>&1 | FileCheck -check-prefix=F32MM %s
// NO-F32MM-NOT: "-target-feature" "+f32mm"
// F32MM: "-target-feature" "+f32mm"
diff --git a/clang/test/Driver/clang-offload-bundler-asserts-on.c b/clang/test/Driver/clang-offload-bundler-asserts-on.c
index 521c8641ff54..eb11d5fbbee4 100644
--- a/clang/test/Driver/clang-offload-bundler-asserts-on.c
+++ b/clang/test/Driver/clang-offload-bundler-asserts-on.c
@@ -1,6 +1,6 @@
// REQUIRES: x86-registered-target
// REQUIRES: asserts
-// UNSUPPORTED: target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
+// UNSUPPORTED: target={{.*}}-macosx{{.*}}, target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
// Generate the file we can bundle.
// RUN: %clang -O0 -target %itanium_abi_triple %s -c -o %t.o
diff --git a/clang/test/Driver/clang-offload-bundler-standardize.c b/clang/test/Driver/clang-offload-bundler-standardize.c
index 6a24968c30ef..91dc8947aabb 100644
--- a/clang/test/Driver/clang-offload-bundler-standardize.c
+++ b/clang/test/Driver/clang-offload-bundler-standardize.c
@@ -1,6 +1,6 @@
// REQUIRES: x86-registered-target
// REQUIRES: asserts
-// UNSUPPORTED: target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
+// UNSUPPORTED: target={{.*}}-macosx{{.*}}, target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
// REQUIRES: asserts
// Generate the file we can bundle.
diff --git a/clang/test/Driver/clang-offload-bundler.c b/clang/test/Driver/clang-offload-bundler.c
index f3cd2493e052..a56a5424abf8 100644
--- a/clang/test/Driver/clang-offload-bundler.c
+++ b/clang/test/Driver/clang-offload-bundler.c
@@ -1,5 +1,5 @@
// REQUIRES: x86-registered-target
-// UNSUPPORTED: target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
+// UNSUPPORTED: target={{.*}}-macosx{{.*}}, target={{.*}}-darwin{{.*}}, target={{.*}}-aix{{.*}}
//
// Generate all the types of files we can bundle.
diff --git a/clang/test/Driver/darwin-ld-reexports.c b/clang/test/Driver/darwin-ld-reexports.c
new file mode 100644
index 000000000000..2e96db49a8a3
--- /dev/null
+++ b/clang/test/Driver/darwin-ld-reexports.c
@@ -0,0 +1,21 @@
+// RUN: touch %t.o
+// RUN: %clang -target arm64-apple-darwin13 -### \
+// RUN: -reexport_framework Foo -reexport-lBar -reexport_library Baz %t.o 2> %t.log
+
+// Check older spellings also work.
+// RUN: %clang -target arm64-apple-darwin13 -### \
+// RUN: -Xlinker -reexport_framework -Xlinker Forest \
+// RUN: -Xlinker -reexport-lBranch \
+// RUN: -Xlinker -reexport_library -Xlinker Flower %t.o 2>> %t.log
+// RUN: FileCheck -check-prefix=LINK_REEXPORT %s < %t.log
+
+// LINK_REEXPORT: {{ld(.exe)?"}}
+// LINK_REEXPORT: "-reexport_framework" "Foo"
+// LINK_REEXPORT: "-reexport-lBar"
+// LINK_REEXPORT: "-reexport_library" "Baz"
+// LINK_REEXPORT: "-reexport_framework" "Forest"
+// LINK_REEXPORT: "-reexport-lBranch"
+// LINK_REEXPORT: "-reexport_library" "Flower"
+
+// Make sure arguments are not repeated.
+// LINK_REEXPORT-NOT: "-reexport
diff --git a/clang/test/Driver/fat-archive-unbundle-ext.c b/clang/test/Driver/fat-archive-unbundle-ext.c
index b409aa6313b1..e98b872f0c0c 100644
--- a/clang/test/Driver/fat-archive-unbundle-ext.c
+++ b/clang/test/Driver/fat-archive-unbundle-ext.c
@@ -1,5 +1,5 @@
// REQUIRES: x86-registered-target
-// UNSUPPORTED: target={{.*-windows.*}}, target={{.*-darwin.*}}, target={{.*}}-aix{{.*}}
+// UNSUPPORTED: target={{.*-windows.*}}, target={{.*}}-macosx{{.*}}, target={{.*-darwin.*}}, target={{.*}}-aix{{.*}}
// Generate dummy fat object
// RUN: %clang -O0 -target %itanium_abi_triple %s -c -o %t.host.o
diff --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c
index 754752641352..d01445e3aed0 100644
--- a/clang/test/Driver/linker-wrapper-image.c
+++ b/clang/test/Driver/linker-wrapper-image.c
@@ -26,11 +26,11 @@
// OPENMP: @.omp_offloading.device_image = internal unnamed_addr constant [[[SIZE:[0-9]+]] x i8] c"\10\FF\10\AD{{.*}}", section ".llvm.offloading", align 8
// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr getelementptr inbounds ([[[BEGIN:[0-9]+]] x i8], ptr @.omp_offloading.device_image, i64 1, i64 0), ptr getelementptr inbounds ([[[END:[0-9]+]] x i8], ptr @.omp_offloading.device_image, i64 1, i64 0), ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
-// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
-// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_unreg, ptr null }]
+// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 101, ptr @.omp_offloading.descriptor_reg, ptr null }]
// OPENMP: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
// OPENMP-NEXT: entry:
+// OPENMP-NEXT: %0 = call i32 @atexit(ptr @.omp_offloading.descriptor_unreg)
// OPENMP-NEXT: call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
// OPENMP-NEXT: ret void
// OPENMP-NEXT: }
@@ -62,7 +62,7 @@
// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, ptr @.fatbin_image, ptr null }, section ".nvFatBinSegment", align 8
// CUDA-NEXT: @.cuda.binary_handle = internal global ptr null
-// CUDA: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.cuda.fatbin_reg, ptr null }]
+// CUDA: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 101, ptr @.cuda.fatbin_reg, ptr null }]
// CUDA: define internal void @.cuda.fatbin_reg() section ".text.startup" {
// CUDA-NEXT: entry:
@@ -162,7 +162,7 @@
// HIP-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1212764230, i32 1, ptr @.fatbin_image, ptr null }, section ".hipFatBinSegment", align 8
// HIP-NEXT: @.hip.binary_handle = internal global ptr null
-// HIP: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.hip.fatbin_reg, ptr null }]
+// HIP: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 101, ptr @.hip.fatbin_reg, ptr null }]
// HIP: define internal void @.hip.fatbin_reg() section ".text.startup" {
// HIP-NEXT: entry:
diff --git a/clang/test/Driver/modules-print-library-module-manifest-path.cpp b/clang/test/Driver/modules-print-library-module-manifest-path.cpp
index 24797002b80f..3ba2709ad95c 100644
--- a/clang/test/Driver/modules-print-library-module-manifest-path.cpp
+++ b/clang/test/Driver/modules-print-library-module-manifest-path.cpp
@@ -3,6 +3,7 @@
// RUN: rm -rf %t && split-file %s %t && cd %t
// RUN: mkdir -p %t/Inputs/usr/lib/x86_64-linux-gnu
// RUN: touch %t/Inputs/usr/lib/x86_64-linux-gnu/libc++.so
+// RUN: touch %t/Inputs/usr/lib/x86_64-linux-gnu/libc++.a
// RUN: %clang -print-library-module-manifest-path \
// RUN: -stdlib=libc++ \
@@ -10,13 +11,21 @@
// RUN: --target=x86_64-linux-gnu 2>&1 \
// RUN: | FileCheck libcxx-no-module-json.cpp
-// RUN: touch %t/Inputs/usr/lib/x86_64-linux-gnu/modules.json
+// RUN: touch %t/Inputs/usr/lib/x86_64-linux-gnu/libc++.modules.json
// RUN: %clang -print-library-module-manifest-path \
// RUN: -stdlib=libc++ \
// RUN: -resource-dir=%t/Inputs/usr/lib/x86_64-linux-gnu \
// RUN: --target=x86_64-linux-gnu 2>&1 \
// RUN: | FileCheck libcxx.cpp
+// RUN: rm %t/Inputs/usr/lib/x86_64-linux-gnu/libc++.so
+// RUN: touch %t/Inputs/usr/lib/x86_64-linux-gnu/libc++.a
+// RUN: %clang -print-library-module-manifest-path \
+// RUN: -stdlib=libc++ \
+// RUN: -resource-dir=%t/Inputs/usr/lib/x86_64-linux-gnu \
+// RUN: --target=x86_64-linux-gnu 2>&1 \
+// RUN: | FileCheck libcxx-no-shared-lib.cpp
+
// RUN: %clang -print-library-module-manifest-path \
// RUN: -stdlib=libstdc++ \
// RUN: -resource-dir=%t/Inputs/usr/lib/x86_64-linux-gnu \
@@ -29,7 +38,13 @@
//--- libcxx.cpp
-// CHECK: {{.*}}/Inputs/usr/lib/x86_64-linux-gnu{{/|\\}}modules.json
+// CHECK: {{.*}}/Inputs/usr/lib/x86_64-linux-gnu{{/|\\}}libc++.modules.json
+
+//--- libcxx-no-shared-lib.cpp
+
+// Note this might find a different path depending whether search path
+// contains a different libc++.so.
+// CHECK: {{.*}}libc++.modules.json
//--- libstdcxx.cpp
diff --git a/clang/test/Driver/msvc-link.c b/clang/test/Driver/msvc-link.c
index 64e099ea6304..b5c32b173785 100644
--- a/clang/test/Driver/msvc-link.c
+++ b/clang/test/Driver/msvc-link.c
@@ -36,3 +36,23 @@
// VFSOVERLAY: "--vfsoverlay"
// VFSOVERLAY: lld-link
// VFSOVERLAY: "/vfsoverlay:{{.*}}" "{{.*}}.obj"
+
+// RUN: %clang -target arm64ec-pc-windows-msvc -fuse-ld=link -### %s 2>&1 | FileCheck --check-prefix=ARM64EC %s
+// RUN: %clang_cl -target arm64ec-pc-windows-msvc -fuse-ld=link -### -- %s 2>&1 | FileCheck --check-prefix=ARM64EC %s
+// RUN: %clang_cl -arm64EC -fuse-ld=link -### -- %s 2>&1 | FileCheck --check-prefix=ARM64EC %s
+// ARM64EC: "-machine:arm64ec"
+
+// RUN: %clang -target arm64ec-pc-windows-msvc -fuse-ld=link -marm64x -### %s 2>&1 | \
+// RUN: FileCheck --check-prefix=ARM64X %s
+// RUN: %clang -target aarch64-pc-windows-msvc -fuse-ld=link -marm64x -### %s 2>&1 | \
+// RUN: FileCheck --check-prefix=ARM64X %s
+// RUN: %clang_cl -marm64x -fuse-ld=link -### -- %s 2>&1 | FileCheck --check-prefix=ARM64X %s
+// RUN: %clang_cl -arm64EC -marm64x -fuse-ld=link -### -- %s 2>&1 | FileCheck --check-prefix=ARM64X %s
+// ARM64X: "-machine:arm64x"
+
+// RUN: not %clang -target x86_64-linux-gnu -marm64x -### %s 2>&1 | FileCheck --check-prefix=HYBRID-ERR %s
+// HYBRID-ERR: error: unsupported option '-marm64x' for target 'x86_64-linux-gnu'
+
+// RUN: %clang -c -marm64x -target arm64ec-pc-windows-msvc -fuse-ld=link -### %s 2>&1 | \
+// RUN: FileCheck --check-prefix=HYBRID-WARN %s
+// HYBRID-WARN: warning: argument unused during compilation: '-marm64x' [-Wunused-command-line-argument]
diff --git a/clang/test/Driver/riscv-profiles.c b/clang/test/Driver/riscv-profiles.c
new file mode 100644
index 000000000000..0227487015ba
--- /dev/null
+++ b/clang/test/Driver/riscv-profiles.c
@@ -0,0 +1,324 @@
+// RUN: %clang --target=riscv32 -### -c %s 2>&1 -march=rvi20u32 \
+// RUN: | FileCheck -check-prefix=RVI20U32 %s
+// RVI20U32: "-target-feature" "-a"
+// RVI20U32: "-target-feature" "-c"
+// RVI20U32: "-target-feature" "-d"
+// RVI20U32: "-target-feature" "-f"
+// RVI20U32: "-target-feature" "-m"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rvi20u64 \
+// RUN: | FileCheck -check-prefix=RVI20U64 %s
+// RVI20U64: "-target-feature" "-a"
+// RVI20U64: "-target-feature" "-c"
+// RVI20U64: "-target-feature" "-d"
+// RVI20U64: "-target-feature" "-f"
+// RVI20U64: "-target-feature" "-m"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rva20u64 \
+// RUN: | FileCheck -check-prefix=RVA20U64 %s
+// RVA20U64: "-target-feature" "+m"
+// RVA20U64: "-target-feature" "+a"
+// RVA20U64: "-target-feature" "+f"
+// RVA20U64: "-target-feature" "+d"
+// RVA20U64: "-target-feature" "+c"
+// RVA20U64: "-target-feature" "+ziccamoa"
+// RVA20U64: "-target-feature" "+ziccif"
+// RVA20U64: "-target-feature" "+zicclsm"
+// RVA20U64: "-target-feature" "+ziccrse"
+// RVA20U64: "-target-feature" "+zicntr"
+// RVA20U64: "-target-feature" "+zicsr"
+// RVA20U64: "-target-feature" "+za128rs"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rva20s64 \
+// RUN: | FileCheck -check-prefix=RVA20S64 %s
+// RVA20S64: "-target-feature" "+m"
+// RVA20S64: "-target-feature" "+a"
+// RVA20S64: "-target-feature" "+f"
+// RVA20S64: "-target-feature" "+d"
+// RVA20S64: "-target-feature" "+c"
+// RVA20S64: "-target-feature" "+ziccamoa"
+// RVA20S64: "-target-feature" "+ziccif"
+// RVA20S64: "-target-feature" "+zicclsm"
+// RVA20S64: "-target-feature" "+ziccrse"
+// RVA20S64: "-target-feature" "+zicntr"
+// RVA20S64: "-target-feature" "+zicsr"
+// RVA20S64: "-target-feature" "+zifencei"
+// RVA20S64: "-target-feature" "+za128rs"
+// RVA20S64: "-target-feature" "+ssccptr"
+// RVA20S64: "-target-feature" "+sstvala"
+// RVA20S64: "-target-feature" "+sstvecd"
+// RVA20S64: "-target-feature" "+svade"
+// RVA20S64: "-target-feature" "+svbare"
+
+// RUN: %clang --target=riscv64 --target=riscv64 -### -c %s 2>&1 -march=rva22u64 \
+// RUN: | FileCheck -check-prefix=RVA22U64 %s
+// RVA22U64: "-target-feature" "+m"
+// RVA22U64: "-target-feature" "+a"
+// RVA22U64: "-target-feature" "+f"
+// RVA22U64: "-target-feature" "+d"
+// RVA22U64: "-target-feature" "+c"
+// RVA22U64: "-target-feature" "+zic64b"
+// RVA22U64: "-target-feature" "+zicbom"
+// RVA22U64: "-target-feature" "+zicbop"
+// RVA22U64: "-target-feature" "+zicboz"
+// RVA22U64: "-target-feature" "+ziccamoa"
+// RVA22U64: "-target-feature" "+ziccif"
+// RVA22U64: "-target-feature" "+zicclsm"
+// RVA22U64: "-target-feature" "+ziccrse"
+// RVA22U64: "-target-feature" "+zicntr"
+// RVA22U64: "-target-feature" "+zicsr"
+// RVA22U64: "-target-feature" "+zihintpause"
+// RVA22U64: "-target-feature" "+zihpm"
+// RVA22U64: "-target-feature" "+za64rs"
+// RVA22U64: "-target-feature" "+zfhmin"
+// RVA22U64: "-target-feature" "+zba"
+// RVA22U64: "-target-feature" "+zbb"
+// RVA22U64: "-target-feature" "+zbs"
+// RVA22U64: "-target-feature" "+zkt"
+
+// RUN: %clang --target=riscv64 --target=riscv64 -### -c %s 2>&1 -march=rva22s64 \
+// RUN: | FileCheck -check-prefix=RVA22S64 %s
+// RVA22S64: "-target-feature" "+m"
+// RVA22S64: "-target-feature" "+a"
+// RVA22S64: "-target-feature" "+f"
+// RVA22S64: "-target-feature" "+d"
+// RVA22S64: "-target-feature" "+c"
+// RVA22S64: "-target-feature" "+zic64b"
+// RVA22S64: "-target-feature" "+zicbom"
+// RVA22S64: "-target-feature" "+zicbop"
+// RVA22S64: "-target-feature" "+zicboz"
+// RVA22S64: "-target-feature" "+ziccamoa"
+// RVA22S64: "-target-feature" "+ziccif"
+// RVA22S64: "-target-feature" "+zicclsm"
+// RVA22S64: "-target-feature" "+ziccrse"
+// RVA22S64: "-target-feature" "+zicntr"
+// RVA22S64: "-target-feature" "+zicsr"
+// RVA22S64: "-target-feature" "+zifencei"
+// RVA22S64: "-target-feature" "+zihintpause"
+// RVA22S64: "-target-feature" "+zihpm"
+// RVA22S64: "-target-feature" "+za64rs"
+// RVA22S64: "-target-feature" "+zfhmin"
+// RVA22S64: "-target-feature" "+zba"
+// RVA22S64: "-target-feature" "+zbb"
+// RVA22S64: "-target-feature" "+zbs"
+// RVA22S64: "-target-feature" "+zkt"
+// RVA22S64: "-target-feature" "+ssccptr"
+// RVA22S64: "-target-feature" "+sscounterenw"
+// RVA22S64: "-target-feature" "+sstvala"
+// RVA22S64: "-target-feature" "+sstvecd"
+// RVA22S64: "-target-feature" "+svade"
+// RVA22S64: "-target-feature" "+svbare"
+// RVA22S64: "-target-feature" "+svinval"
+// RVA22S64: "-target-feature" "+svpbmt"
+
+// RUN: %clang --target=riscv64 --target=riscv64 -### -c %s 2>&1 -march=rva23u64 -menable-experimental-extensions \
+// RUN: | FileCheck -check-prefix=RVA23U64 %s
+// RVA23U64: "-target-feature" "+m"
+// RVA23U64: "-target-feature" "+a"
+// RVA23U64: "-target-feature" "+f"
+// RVA23U64: "-target-feature" "+d"
+// RVA23U64: "-target-feature" "+c"
+// RVA23U64: "-target-feature" "+v"
+// RVA23U64: "-target-feature" "+zic64b"
+// RVA23U64: "-target-feature" "+zicbom"
+// RVA23U64: "-target-feature" "+zicbop"
+// RVA23U64: "-target-feature" "+zicboz"
+// RVA23U64: "-target-feature" "+ziccamoa"
+// RVA23U64: "-target-feature" "+ziccif"
+// RVA23U64: "-target-feature" "+zicclsm"
+// RVA23U64: "-target-feature" "+ziccrse"
+// RVA23U64: "-target-feature" "+zicntr"
+// RVA23U64: "-target-feature" "+zicond"
+// RVA23U64: "-target-feature" "+zicsr"
+// RVA23U64: "-target-feature" "+zihintntl"
+// RVA23U64: "-target-feature" "+zihintpause"
+// RVA23U64: "-target-feature" "+zihpm"
+// RVA23U64: "-target-feature" "+experimental-zimop"
+// RVA23U64: "-target-feature" "+za64rs"
+// RVA23U64: "-target-feature" "+zawrs"
+// RVA23U64: "-target-feature" "+zfa"
+// RVA23U64: "-target-feature" "+zfhmin"
+// RVA23U64: "-target-feature" "+zcb"
+// RVA23U64: "-target-feature" "+experimental-zcmop"
+// RVA23U64: "-target-feature" "+zba"
+// RVA23U64: "-target-feature" "+zbb"
+// RVA23U64: "-target-feature" "+zbs"
+// RVA23U64: "-target-feature" "+zkt"
+// RVA23U64: "-target-feature" "+zvbb"
+// RVA23U64: "-target-feature" "+zvfhmin"
+// RVA23U64: "-target-feature" "+zvkt"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rva23s64 -menable-experimental-extensions \
+// RUN: | FileCheck -check-prefix=RVA23S64 %s
+// RVA23S64: "-target-feature" "+m"
+// RVA23S64: "-target-feature" "+a"
+// RVA23S64: "-target-feature" "+f"
+// RVA23S64: "-target-feature" "+d"
+// RVA23S64: "-target-feature" "+c"
+// RVA23S64: "-target-feature" "+v"
+// RVA23S64: "-target-feature" "+h"
+// RVA23S64: "-target-feature" "+zic64b"
+// RVA23S64: "-target-feature" "+zicbom"
+// RVA23S64: "-target-feature" "+zicbop"
+// RVA23S64: "-target-feature" "+zicboz"
+// RVA23S64: "-target-feature" "+ziccamoa"
+// RVA23S64: "-target-feature" "+ziccif"
+// RVA23S64: "-target-feature" "+zicclsm"
+// RVA23S64: "-target-feature" "+ziccrse"
+// RVA23S64: "-target-feature" "+zicntr"
+// RVA23S64: "-target-feature" "+zicond"
+// RVA23S64: "-target-feature" "+zicsr"
+// RVA23S64: "-target-feature" "+zifencei"
+// RVA23S64: "-target-feature" "+zihintntl"
+// RVA23S64: "-target-feature" "+zihintpause"
+// RVA23S64: "-target-feature" "+zihpm"
+// RVA23S64: "-target-feature" "+experimental-zimop"
+// RVA23S64: "-target-feature" "+za64rs"
+// RVA23S64: "-target-feature" "+zawrs"
+// RVA23S64: "-target-feature" "+zfa"
+// RVA23S64: "-target-feature" "+zfhmin"
+// RVA23S64: "-target-feature" "+zcb"
+// RVA23S64: "-target-feature" "+experimental-zcmop"
+// RVA23S64: "-target-feature" "+zba"
+// RVA23S64: "-target-feature" "+zbb"
+// RVA23S64: "-target-feature" "+zbs"
+// RVA23S64: "-target-feature" "+zkt"
+// RVA23S64: "-target-feature" "+zvbb"
+// RVA23S64: "-target-feature" "+zvfhmin"
+// RVA23S64: "-target-feature" "+zvkt"
+// RVA23S64: "-target-feature" "+shcounterenw"
+// RVA23S64: "-target-feature" "+shgatpa"
+// RVA23S64: "-target-feature" "+shtvala"
+// RVA23S64: "-target-feature" "+shvsatpa"
+// RVA23S64: "-target-feature" "+shvstvala"
+// RVA23S64: "-target-feature" "+shvstvecd"
+// RVA23S64: "-target-feature" "+ssccptr"
+// RVA23S64: "-target-feature" "+sscofpmf"
+// RVA23S64: "-target-feature" "+sscounterenw"
+// RVA23S64: "-target-feature" "+experimental-ssnpm"
+// RVA23S64: "-target-feature" "+ssstateen"
+// RVA23S64: "-target-feature" "+sstc"
+// RVA23S64: "-target-feature" "+sstvala"
+// RVA23S64: "-target-feature" "+sstvecd"
+// RVA23S64: "-target-feature" "+ssu64xl"
+// RVA23S64: "-target-feature" "+svade"
+// RVA23S64: "-target-feature" "+svbare"
+// RVA23S64: "-target-feature" "+svinval"
+// RVA23S64: "-target-feature" "+svnapot"
+// RVA23S64: "-target-feature" "+svpbmt"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rvb23u64 -menable-experimental-extensions \
+// RUN: | FileCheck -check-prefix=RVB23U64 %s
+// RVB23U64: "-target-feature" "+m"
+// RVB23U64: "-target-feature" "+a"
+// RVB23U64: "-target-feature" "+f"
+// RVB23U64: "-target-feature" "+d"
+// RVB23U64: "-target-feature" "+c"
+// RVB23U64: "-target-feature" "+zic64b"
+// RVB23U64: "-target-feature" "+zicbom"
+// RVB23U64: "-target-feature" "+zicbop"
+// RVB23U64: "-target-feature" "+zicboz"
+// RVB23U64: "-target-feature" "+ziccamoa"
+// RVB23U64: "-target-feature" "+ziccif"
+// RVB23U64: "-target-feature" "+zicclsm"
+// RVB23U64: "-target-feature" "+ziccrse"
+// RVB23U64: "-target-feature" "+zicntr"
+// RVB23U64: "-target-feature" "+zicond"
+// RVB23U64: "-target-feature" "+zicsr"
+// RVB23U64: "-target-feature" "+zihintntl"
+// RVB23U64: "-target-feature" "+zihintpause"
+// RVB23U64: "-target-feature" "+zihpm"
+// RVB23U64: "-target-feature" "+experimental-zimop"
+// RVB23U64: "-target-feature" "+za64rs"
+// RVB23U64: "-target-feature" "+zawrs"
+// RVB23U64: "-target-feature" "+zfa"
+// RVB23U64: "-target-feature" "+zcb"
+// RVB23U64: "-target-feature" "+experimental-zcmop"
+// RVB23U64: "-target-feature" "+zba"
+// RVB23U64: "-target-feature" "+zbb"
+// RVB23U64: "-target-feature" "+zbs"
+// RVB23U64: "-target-feature" "+zkt"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rvb23s64 -menable-experimental-extensions \
+// RUN: | FileCheck -check-prefix=RVB23S64 %s
+// RVB23S64: "-target-feature" "+m"
+// RVB23S64: "-target-feature" "+a"
+// RVB23S64: "-target-feature" "+f"
+// RVB23S64: "-target-feature" "+d"
+// RVB23S64: "-target-feature" "+c"
+// RVB23S64: "-target-feature" "+zic64b"
+// RVB23S64: "-target-feature" "+zicbom"
+// RVB23S64: "-target-feature" "+zicbop"
+// RVB23S64: "-target-feature" "+zicboz"
+// RVB23S64: "-target-feature" "+ziccamoa"
+// RVB23S64: "-target-feature" "+ziccif"
+// RVB23S64: "-target-feature" "+zicclsm"
+// RVB23S64: "-target-feature" "+ziccrse"
+// RVB23S64: "-target-feature" "+zicntr"
+// RVB23S64: "-target-feature" "+zicond"
+// RVB23S64: "-target-feature" "+zicsr"
+// RVB23S64: "-target-feature" "+zifencei"
+// RVB23S64: "-target-feature" "+zihintntl"
+// RVB23S64: "-target-feature" "+zihintpause"
+// RVB23S64: "-target-feature" "+zihpm"
+// RVB23S64: "-target-feature" "+experimental-zimop"
+// RVB23S64: "-target-feature" "+za64rs"
+// RVB23S64: "-target-feature" "+zawrs"
+// RVB23S64: "-target-feature" "+zfa"
+// RVB23S64: "-target-feature" "+zcb"
+// RVB23S64: "-target-feature" "+experimental-zcmop"
+// RVB23S64: "-target-feature" "+zba"
+// RVB23S64: "-target-feature" "+zbb"
+// RVB23S64: "-target-feature" "+zbs"
+// RVB23S64: "-target-feature" "+zkt"
+// RVB23S64: "-target-feature" "+ssccptr"
+// RVB23S64: "-target-feature" "+sscofpmf"
+// RVB23S64: "-target-feature" "+sscounterenw"
+// RVB23S64: "-target-feature" "+sstc"
+// RVB23S64: "-target-feature" "+sstvala"
+// RVB23S64: "-target-feature" "+sstvecd"
+// RVB23S64: "-target-feature" "+ssu64xl"
+// RVB23S64: "-target-feature" "+svade"
+// RVB23S64: "-target-feature" "+svbare"
+// RVB23S64: "-target-feature" "+svinval"
+// RVB23S64: "-target-feature" "+svnapot"
+// RVB23S64: "-target-feature" "+svpbmt"
+
+// RUN: %clang --target=riscv32 -### -c %s 2>&1 -march=rvm23u32 -menable-experimental-extensions \
+// RUN: | FileCheck -check-prefix=RVM23U32 %s
+// RVM23U32: "-target-feature" "+m"
+// RVM23U32: "-target-feature" "+zicbop"
+// RVM23U32: "-target-feature" "+zicond"
+// RVM23U32: "-target-feature" "+zicsr"
+// RVM23U32: "-target-feature" "+zihintntl"
+// RVM23U32: "-target-feature" "+zihintpause"
+// RVM23U32: "-target-feature" "+experimental-zimop"
+// RVM23U32: "-target-feature" "+zce"
+// RVM23U32: "-target-feature" "+experimental-zcmop"
+// RVM23U32: "-target-feature" "+zba"
+// RVM23U32: "-target-feature" "+zbb"
+// RVM23U32: "-target-feature" "+zbs"
+
+// RUN: %clang --target=riscv64 -### -c %s 2>&1 -march=rva22u64_zfa \
+// RUN: | FileCheck -check-prefix=PROFILE-WITH-ADDITIONAL %s
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+m"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+a"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+f"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+d"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+c"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zicbom"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zicbop"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zicboz"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zihintpause"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zfa"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zfhmin"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zba"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zbb"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zbs"
+// PROFILE-WITH-ADDITIONAL: "-target-feature" "+zkt"
+
+// RUN: not %clang --target=riscv64 -### -c %s 2>&1 -march=rva19u64_zfa | FileCheck -check-prefix=INVALID-PROFILE %s
+// INVALID-PROFILE: error: invalid arch name 'rva19u64_zfa', unsupported profile
+
+// RUN: not %clang --target=riscv64 -### -c %s 2>&1 -march=rva22u64zfa | FileCheck -check-prefix=INVALID-ADDITIONAL %s
+// INVALID-ADDITIONAL: error: invalid arch name 'rva22u64zfa', additional extensions must be after separator '_'
diff --git a/clang/test/Driver/toc-conf.c b/clang/test/Driver/toc-conf.c
index 80d92ee1a90b..7b2d5122ebc6 100644
--- a/clang/test/Driver/toc-conf.c
+++ b/clang/test/Driver/toc-conf.c
@@ -23,7 +23,7 @@ void func() {
// CHECK-CONF1-NOT: warning:
// CHECK-CONF1: "-cc1"{{.*}}" "-mno-tocdata"
-// CHECK-CONF1: "-mtocdata=g2,g1"
+// CHECK-CONF1: "-mtocdata=g1,g2"
// CHECK-CONF2-NOT: warning:
// CHECK-CONF2: "-cc1"{{.*}}" "-mtocdata"
diff --git a/clang/test/Driver/unsupported-option-gpu.c b/clang/test/Driver/unsupported-option-gpu.c
index f23cb71ebfb0..5618b2cba72e 100644
--- a/clang/test/Driver/unsupported-option-gpu.c
+++ b/clang/test/Driver/unsupported-option-gpu.c
@@ -2,4 +2,5 @@
// DEFINE: %{check} = %clang -### --target=x86_64-linux-gnu -c -mcmodel=medium
// RUN: %{check} -x cuda %s --cuda-path=%S/Inputs/CUDA/usr/local/cuda --offload-arch=sm_60 --no-cuda-version-check -fbasic-block-sections=all
+// RUN: %{check} -x hip %s --offload=spirv64 -nogpulib -nogpuinc
// RUN: %{check} -x hip %s --rocm-path=%S/Inputs/rocm -nogpulib -nogpuinc
diff --git a/clang/test/Format/fail-on-incomplete.cpp b/clang/test/Format/fail-on-incomplete.cpp
new file mode 100644
index 000000000000..ccd77af4d599
--- /dev/null
+++ b/clang/test/Format/fail-on-incomplete.cpp
@@ -0,0 +1,4 @@
+// RUN: not clang-format -style=LLVM -fail-on-incomplete-format %s
+// RUN: clang-format -style=LLVM %s
+
+int a(
diff --git a/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI1.h b/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI1.h
new file mode 100644
index 000000000000..83a5b9507de3
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI1.h
@@ -0,0 +1 @@
+extern int extraGlobalAPI1;
diff --git a/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI2.h b/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI2.h
new file mode 100644
index 000000000000..34fe3364bba8
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Extra/SimpleExtraAPI2.h
@@ -0,0 +1 @@
+extern int extraGlobalAPI2;
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Basic.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Basic.h
new file mode 100644
index 000000000000..08412bb2de28
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Basic.h
@@ -0,0 +1,103 @@
+#import <Foundation/Foundation.h>
+
+// Basic class with no super class
+@interface Basic1
+@end
+
+@interface Basic2 : NSObject
+@end
+
+@interface Basic3 : NSObject
+@property BOOL property1;
+@property(readonly) BOOL property2;
+@property(getter=isProperty3) BOOL property3;
+@property BOOL dynamicProp;
+@end
+
+@interface Basic4 : NSObject {
+@public
+ BOOL ivar1;
+@protected
+ BOOL ivar2;
+@package
+ BOOL ivar3;
+@private
+ BOOL ivar4;
+}
+@end
+
+__attribute__((visibility("hidden"))) @interface Basic4_1 : NSObject {
+@public
+ BOOL ivar1;
+@protected
+ BOOL ivar2;
+@package
+ BOOL ivar3;
+@private
+ BOOL ivar4;
+}
+@end
+
+@interface Basic4_2 : NSObject {
+@private
+ BOOL ivar4;
+@package
+ BOOL ivar3;
+@protected
+ BOOL ivar2;
+@public
+ BOOL ivar1;
+}
+@end
+
+@interface Basic5 : NSObject
++ (void)aClassMethod;
+- (void)anInstanceMethod;
+@end
+
+@interface Basic6 : NSObject
+@end
+
+@interface Basic6 () {
+@public
+ BOOL ivar1;
+}
+@property BOOL property1;
+- (void)anInstanceMethodFromAnExtension;
+@end
+
+@interface Basic6 (Foo)
+@property BOOL property2;
+- (void)anInstanceMethodFromACategory;
+@end
+
+__attribute__((visibility("hidden")))
+@interface Basic7 : NSObject
+@end
+
+@interface Basic7 ()
+- (void) anInstanceMethodFromAnHiddenExtension;
+@end
+
+@interface Basic8 : NSObject
++ (void)useSameName;
+@end
+
+// Classes and protocols can have the same name. For now they would only clash
+// in the selector map if the protocl starts with '_'.
+@protocol _A
+- (void)aMethod;
+@end
+
+@interface A : NSObject
+- (void)aMethod NS_AVAILABLE(10_11, 9_0);
+- (void)bMethod NS_UNAVAILABLE;
+@end
+
+@interface Basic9 : NSObject
+@property(readonly) BOOL aProperty NS_AVAILABLE(10_10, 8_0);
+@end
+
+@interface Basic9 (deprecated)
+@property(readwrite) BOOL aProperty NS_DEPRECATED_MAC(10_8, 10_10);
+@end
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/External.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/External.h
new file mode 100644
index 000000000000..5dc3c92f34c2
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/External.h
@@ -0,0 +1,19 @@
+#import <Foundation/Foundation.h>
+
+// Sub-class an external defined ObjC Class.
+@interface ExternalManagedObject : NSManagedObject
+- (void)foo;
+@end
+
+// Add category to external defined ObjC Class.
+@interface NSManagedObject (Simple)
+- (int)supportsSimple;
+@end
+
+// CoreData Accessors are dynamically generated and have no implementation.
+@interface ExternalManagedObject (CoreDataGeneratedAccessors)
+- (void)addChildObject:(ExternalManagedObject *)value;
+- (void)removeChildObject:(ExternalManagedObject *)value;
+- (void)addChild:(NSSet *)values;
+- (void)removeChild:(NSSet *)values;
+@end
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Simple.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Simple.h
new file mode 100644
index 000000000000..12c77098a8d9
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/Simple.h
@@ -0,0 +1,45 @@
+#import <Foundation/Foundation.h>
+
+// Useless forward declaration. This is used for testing.
+@class FooBar;
+@protocol FooProtocol;
+
+@protocol ForwardProcotol;
+
+// Test public global.
+extern int publicGlobalVariable;
+
+// Test weak public global.
+extern int weakPublicGlobalVariable __attribute__((weak));
+
+// Test public ObjC class
+@interface Simple : NSObject
+@end
+
+__attribute__((objc_exception))
+@interface Base : NSObject
+@end
+
+@interface SubClass : Base
+@end
+
+@protocol BaseProtocol
+- (void) baseMethod;
+@end
+
+NS_AVAILABLE(10_11, 9_0)
+@protocol FooProtocol <BaseProtocol>
+- (void) protocolMethod;
+@end
+
+@protocol BarProtocol
+- (void) barMethod;
+@end
+
+@interface FooClass <FooProtocol, BarProtocol>
+@end
+
+// Create an empty category conforms to a forward declared protocol.
+// <rdar://problem/35605892>
+@interface FooClass (Test) <ForwardProcotol>
+@end
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/SimpleAPI.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/SimpleAPI.h
new file mode 100644
index 000000000000..d953fac966da
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/Headers/SimpleAPI.h
@@ -0,0 +1 @@
+extern int otherFrameworkAPI;
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivate.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivate.h
new file mode 100644
index 000000000000..5a28cda3928e
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivate.h
@@ -0,0 +1,5 @@
+// Test private global variable.
+extern int privateGlobalVariable;
+
+// Test weak private global.
+extern int weakPrivateGlobalVariable __attribute__((weak));
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivateSPI.h b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivateSPI.h
new file mode 100644
index 000000000000..c9aca30fa82f
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.framework/PrivateHeaders/SimplePrivateSPI.h
@@ -0,0 +1,2 @@
+// Test private global variable.
+extern int otherFrameworkSPI;
diff --git a/clang/test/InstallAPI/Inputs/Simple/Simple.yaml b/clang/test/InstallAPI/Inputs/Simple/Simple.yaml
new file mode 100644
index 000000000000..998e51f1a67d
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/Simple.yaml
@@ -0,0 +1,3196 @@
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x1000007
+ cpusubtype: 0x3
+ filetype: 0x6
+ ncmds: 15
+ sizeofcmds: 1952
+ flags: 0x118085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 472
+ segname: __TEXT
+ vmaddr: 0
+ vmsize: 12288
+ fileoff: 0
+ filesize: 12288
+ maxprot: 5
+ initprot: 5
+ nsects: 5
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x1BC0
+ size: 180
+ offset: 0x1BC0
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 554889E50FBE47085DC3554889E58857085DC3554889E50FBE47095DC3554889E50FBE470A5DC3554889E588570A5DC3554889E55DC3554889E55DC3554889E55DC3554889E50FBE47095DC3554889E58857095DC3554889E5B8010000005DC3554889E55DC3554889E55DC3554889E55DC3554889E55DC3554889E5B0015DC3554889E55DC3554889E55DC3554889E55DC3554889E50FBE47085DC3554889E55DC3554889E55DC3554889E55DC3554889E55DC3
+ - sectname: __cstring
+ segname: __TEXT
+ addr: 0x1C74
+ size: 296
+ offset: 0x1C74
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x2
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 53696D706C65004261736500537562436C6173730053696D706C65496E7465726E616C4150490053696D706C65496E7465726E616C53504900426173696331004261736963320042617369633300426173696334004261736963345F31004261736963345F32004261736963350042617369633600466F6F004261736963370045787465726E616C4D616E616765644F626A6563740048696464656E436C61737300426173696338004100426173696339006465707265636174656400466F6F436C61737300466F6F50726F746F636F6C004261736550726F746F636F6C0042617250726F746F636F6C0050726976617465005072697661746550726F746F636F6C0063313640303A380076323040303A3863313600630076313640303A380042313640303A3800
+ - sectname: __objc_methname
+ segname: __TEXT
+ addr: 0x1D9C
+ size: 450
+ offset: 0x1D9C
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x2
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 70726F7065727479310073657450726F7065727479313A0070726F70657274793200697350726F7065727479330073657450726F7065727479333A0070726F7065727479330054632C5670726F7065727479310054632C522C5670726F7065727479320054632C47697350726F7065727479332C5670726F7065727479330064796E616D696350726F700054632C440069766172310069766172320069766172330069766172340061436C6173734D6574686F6400616E496E7374616E63654D6574686F6400616E496E7374616E63654D6574686F6446726F6D416E457874656E73696F6E0073657450726F7065727479323A00616E496E7374616E63654D6574686F6446726F6D4143617465676F727900546300616E496E7374616E63654D6574686F6446726F6D416E48696464656E457874656E73696F6E00666F6F00737570706F72747353696D706C650075736553616D654E616D6500614D6574686F64006150726F7065727479005F6150726F70657274790054632C522C565F6150726F706572747900626173654D6574686F640070726F746F636F6C4D6574686F64006261724D6574686F64007072697661746550726F636F746F6C4D6574686F6400
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x1F60
+ size: 4152
+ offset: 0x1F60
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 010000001C000000010000002000000000000000200000000200000000000001C01B00003800000038000000741C00000000000038000000030000000C0001001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+ - sectname: __eh_frame
+ segname: __TEXT
+ addr: 0x2F98
+ size: 24
+ offset: 0x2F98
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x6000000B
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 1400000000000000017A520001781001100C070890010000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 792
+ segname: __DATA
+ vmaddr: 12288
+ vmsize: 8192
+ fileoff: 12288
+ filesize: 8192
+ maxprot: 3
+ initprot: 3
+ nsects: 9
+ flags: 0
+ Sections:
+ - sectname: __objc_const
+ segname: __DATA
+ addr: 0x3000
+ size: 4952
+ offset: 0x3000
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 010000002800000028000000000000000000000000000000741C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000008000000000000000000000000000000741C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000028000000280000000000000000000000000000007B1C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000008000000080000000000000000000000000000007B1C0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000007B1C000000000000D043000000000000010000002800000028000000000000000000000000000000801C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000800000008000000000000000000000000000000801C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000801C0000000000002044000000000000010000002800000028000000000000000000000000000000891C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000800000008000000000000000000000000000000891C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000891C00000000000070440000000000000100000028000000280000000000000000000000000000009B1C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000008000000080000000000000000000000000000009B1C0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000009B1C000000000000C044000000000000030000002800000028000000000000000000000000000000AD1C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000AD1C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000002800000028000000000000000000000000000000B41C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000008000000000000000000000000000000B41C00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000002800000028000000000000000000000000000000BB1C0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018000000050000009C1D000000000000771D000000000000C01B000000000000A61D0000000000007F1D000000000000CA1B000000000000B41D000000000000771D000000000000D31B000000000000BE1D000000000000771D000000000000DD1B000000000000CA1D0000000000007F1D000000000000E71B000000000000200000000300000098490000000000009C1D0000000000008A1D0000000000000000000001000000A049000000000000B41D0000000000008A1D0000000000000000000001000000A849000000000000D81D0000000000008A1D000000000000000000000100000010000000040000009C1D000000000000E21D000000000000B41D000000000000F01D000000000000D81D000000000000001E0000000000001B1E000000000000271E00000000000000000000080000000B000000000000000000000000000000BB1C00000000000098340000000000000000000000000000183500000000000000000000000000008035000000000000010000002800000028000000000000000000000000000000C21C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000004000000B0490000000000002C1E0000000000008A1D0000000000000000000001000000B849000000000000321E0000000000008A1D0000000000000000000001000000C049000000000000381E0000000000008A1D0000000000000000000001000000C8490000000000003E1E0000000000008A1D000000000000000000000100000000000000080000000C000000000000000000000000000000C21C00000000000000000000000000000000000000000000583600000000000000000000000000000000000000000000110000002800000028000000000000000000000000000000C91C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000004000000D0490000000000002C1E0000000000008A1D0000000000000000000001000000D849000000000000321E0000000000008A1D0000000000000000000001000000E049000000000000381E0000000000008A1D0000000000000000000001000000E8490000000000003E1E0000000000008A1D000000000000000000000100000010000000080000000C000000000000000000000000000000C91C00000000000000000000000000000000000000000000703700000000000000000000000000000000000000000000010000002800000028000000000000000000000000000000D21C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000004000000F0490000000000003E1E0000000000008A1D0000000000000000000001000000F849000000000000381E0000000000008A1D0000000000000000000001000000004A000000000000321E0000000000008A1D0000000000000000000001000000084A0000000000002C1E0000000000008A1D000000000000000000000100000000000000080000000C000000000000000000000000000000D21C000000000000000000000000000000000000000000008838000000000000000000000000000000000000000000001800000001000000441E0000000000008C1D000000000000F01B000000000000010000002800000028000000000000000000000000000000DB1C000000000000583900000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000511E0000000000008C1D000000000000F61B000000000000000000000800000008000000000000000000000000000000DB1C000000000000C0390000000000000000000000000000000000000000000000000000000000000000000000000000010000002800000028000000000000000000000000000000E21C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000003000000621E0000000000008C1D000000000000FC1B0000000000009C1D000000000000771D000000000000021C000000000000A61D0000000000007F1D0000000000000C1C0000000000002000000002000000104A0000000000002C1E0000000000008A1D0000000000000000000001000000184A0000000000009C1D0000000000008A1D000000000000000000000100000010000000010000009C1D000000000000E21D00000000000000000000080000000A000000000000000000000000000000E21C000000000000703A0000000000000000000000000000C03A0000000000000000000000000000083B0000000000001800000003000000B41D000000000000771D000000000000151C000000000000821E0000000000007F1D000000000000201C000000000000901E0000000000008C1D000000000000261C0000000000001000000001000000B41D000000000000AE1E000000000000E91C0000000000004047000000000000683B00000000000000000000000000000000000000000000B83B00000000000000000000000000004000000000000000110000002800000028000000000000000000000000000000ED1C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000B11E0000000000008C1D0000000000002C1C000000000000100000000800000008000000000000000000000000000000ED1C000000000000583C0000000000000000000000000000000000000000000000000000000000000000000000000000010000002800000028000000000000000000000000000000F41C000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000D71E0000000000008C1D000000000000321C000000000000000000000800000008000000000000000000000000000000F41C000000000000083D00000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000DB1E000000000000941D000000000000381C000000000000741C0000000000000000000000000000703D000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000001100000028000000280000000000000000000000000000000A1D000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000008000000080000000000000000000000000000000A1D000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000EA1E0000000000008C1D000000000000401C000000000000010000002800000028000000000000000000000000000000161D000000000000603E00000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000EA1E0000000000008C1D000000000000461C000000000000000000000800000008000000000000000000000000000000161D000000000000C83E00000000000000000000000000000000000000000000000000000000000000000000000000000100000028000000280000000000000000000000000000001D1D000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000F61E0000000000008C1D0000000000004C1C0000000000000000000008000000080000000000000000000000000000001D1D000000000000783F00000000000000000000000000000000000000000000000000000000000000000000000000000100000028000000280000000000000000000000000000001F1D000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001800000001000000FE1E000000000000771D000000000000521C0000000000002000000001000000204A000000000000081F0000000000008A1D00000000000000000000010000001000000001000000FE1E000000000000131F0000000000000000000008000000090000000000000000000000000000001F1D000000000000284000000000000000000000000000004840000000000000000000000000000070400000000000001000000001000000FE1E000000000000AE1E000000000000261D0000000000002049000000000000000000000000000000000000000000000000000000000000D040000000000000000000000000000040000000000000001800000001000000241F0000000000008C1D00000000000000000000000000008C1D0000000000000100000000000000284A000000000000000000000000000018000000010000002F1F0000000000008C1D00000000000000000000000000008C1D00000000000018000000010000003E1F0000000000008C1D00000000000000000000000000008C1D0000000000000200000000000000884A000000000000E84A0000000000000000000000000000030000002800000028000000000000000000000000000000311D0000000000000000000000000000B8410000000000000000000000000000000000000000000000000000000000001800000003000000241F0000000000008C1D0000000000005C1C0000000000002F1F0000000000008C1D000000000000621C0000000000003E1F0000000000008C1D000000000000681C000000000000020000000000000000000000000000000000000000000000311D0000000000002042000000000000B8410000000000000000000000000000000000000000000000000000000000001800000001000000481F0000000000008C1D0000000000006E1C0000000000001800000001000000481F0000000000008C1D00000000000000000000000000008C1D0000000000000100000000000000484B00000000000000000000000000005F1D0000000000004849000000000000B84200000000000000000000000000000043000000000000000000000000000000000000000000004000000000000000
+ - sectname: __objc_data
+ segname: __DATA
+ addr: 0x4358
+ size: 1600
+ offset: 0x4358
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 000000000000000000000000000000000000000000000000000000000000000000300000000000005843000000000000000000000000000000000000000000000000000000000000483000000000000000000000000000000000000000000000000000000000000000000000000000009030000000000000A843000000000000000000000000000000000000000000000000000000000000D8300000000000000000000000000000A843000000000000000000000000000000000000000000003831000000000000F843000000000000D0430000000000000000000000000000000000000000000080310000000000000000000000000000000000000000000000000000000000000000000000000000E03100000000000048440000000000000000000000000000000000000000000000000000000000002832000000000000000000000000000000000000000000000000000000000000000000000000000088320000000000009844000000000000000000000000000000000000000000000000000000000000D032000000000000104500000000000000000000000000000000000000000000000000000000000078330000000000001045000000000000E8440000000000000000000000000000000000000000000030330000000000000000000000000000000000000000000000000000000000000000000000000000C03300000000000038450000000000000000000000000000000000000000000000000000000000000834000000000000000000000000000000000000000000000000000000000000000000000000000050340000000000008845000000000000000000000000000000000000000000000000000000000000C83500000000000000000000000000000000000000000000000000000000000000000000000000001036000000000000D845000000000000000000000000000000000000000000000000000000000000E036000000000000000000000000000000000000000000000000000000000000000000000000000028370000000000002846000000000000000000000000000000000000000000000000000000000000F837000000000000000000000000000000000000000000000000000000000000000000000000000040380000000000007846000000000000000000000000000000000000000000000000000000000000103900000000000000000000000000000000000000000000000000000000000000000000000000007839000000000000C846000000000000000000000000000000000000000000000000000000000000E0390000000000000000000000000000000000000000000000000000000000000000000000000000283A0000000000001847000000000000000000000000000000000000000000000000000000000000203B0000000000000000000000000000000000000000000000000000000000000000000000000000103C0000000000006847000000000000000000000000000000000000000000000000000000000000783C0000000000000000000000000000000000000000000000000000000000000000000000000000C03C000000000000B847000000000000000000000000000000000000000000000000000000000000283D0000000000000000000000000000000000000000000000000000000000000000000000000000D03D0000000000000848000000000000000000000000000000000000000000000000000000000000183E0000000000000000000000000000000000000000000000000000000000000000000000000000803E0000000000005848000000000000000000000000000000000000000000000000000000000000E83E0000000000000000000000000000000000000000000000000000000000000000000000000000303F000000000000A848000000000000000000000000000000000000000000000000000000000000983F0000000000000000000000000000000000000000000000000000000000000000000000000000E03F000000000000F8480000000000000000000000000000000000000000000000000000000000008840000000000000704900000000000000000000000000000000000000000000000000000000000070420000000000007049000000000000484900000000000000000000000000000000000000000000D841000000000000
+ - sectname: __objc_ivar
+ segname: __DATA
+ addr: 0x4998
+ size: 144
+ offset: 0x4998
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 080000000000000009000000000000000A00000000000000080000000000000009000000000000000A000000000000000B00000000000000080000000000000009000000000000000A000000000000000B00000000000000080000000000000009000000000000000A000000000000000B00000000000000080000000000000009000000000000000800000000000000
+ - sectname: __data
+ segname: __DATA
+ addr: 0x4A28
+ size: 392
+ offset: 0x4A28
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 0000000000000000461D000000000000000000000000000028410000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000048410000000000000000000000000000000000000000000000000000000000003A1D00000000000050410000000000006841000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000008841000000000000000000000000000000000000000000000000000000000000531D0000000000000000000000000000904100000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000B041000000000000000000000000000000000000000000000000000000000000671D0000000000000000000000000000D84200000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000F842000000000000000000000000000000000000000000000000000000000000
+ - sectname: __objc_protolist
+ segname: __DATA
+ addr: 0x4BB0
+ size: 32
+ offset: 0x4BB0
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x1000000B
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 284A000000000000884A000000000000E84A000000000000484B000000000000
+ - sectname: __objc_classlist
+ segname: __DATA
+ addr: 0x4BD0
+ size: 160
+ offset: 0x4BD0
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x10000000
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 8043000000000000D04300000000000020440000000000007044000000000000C044000000000000E8440000000000006045000000000000B04500000000000000460000000000005046000000000000A046000000000000F04600000000000040470000000000009047000000000000E04700000000000030480000000000008048000000000000D04800000000000020490000000000004849000000000000
+ - sectname: __objc_catlist
+ segname: __DATA
+ addr: 0x4C70
+ size: 32
+ offset: 0x4C70
+ align: 3
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x10000000
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: D03B000000000000903D000000000000E8400000000000001843000000000000
+ - sectname: __objc_imageinfo
+ segname: __DATA
+ addr: 0x4C90
+ size: 8
+ offset: 0x4C90
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: '0000000040000000'
+ - sectname: __common
+ segname: __DATA
+ addr: 0x4C98
+ size: 16
+ offset: 0x0
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x1
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 20480
+ vmsize: 10272
+ fileoff: 20480
+ filesize: 10272
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_INFO_ONLY
+ cmdsize: 48
+ rebase_off: 20480
+ rebase_size: 320
+ bind_off: 20800
+ bind_size: 480
+ weak_bind_off: 0
+ weak_bind_size: 0
+ lazy_bind_off: 0
+ lazy_bind_size: 0
+ export_off: 21280
+ export_size: 896
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 22208
+ nsyms: 187
+ stroff: 25200
+ strsize: 5552
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 131
+ iextdefsym: 131
+ nextdefsym: 49
+ iundefsym: 180
+ nundefsym: 7
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_ID_DYLIB
+ cmdsize: 88
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 66051
+ compatibility_version: 65536
+ Content: '/System/Library/Frameworks/Simple.framework/Versions/A/Simple'
+ ZeroPadBytes: 3
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C441D-5555-3144-A104-DD1AF4EF8FE7
+ - cmd: LC_VERSION_MIN_MACOSX
+ cmdsize: 16
+ version: 658432
+ sdk: 983040
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 96
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 197722368
+ compatibility_version: 19660800
+ Content: '/System/Library/Frameworks/Foundation.framework/Versions/C/Foundation'
+ ZeroPadBytes: 3
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 14942208
+ compatibility_version: 65536
+ Content: '/usr/lib/libobjc.A.dylib'
+ ZeroPadBytes: 8
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 96
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 91750400
+ compatibility_version: 65536
+ Content: '/System/Library/Frameworks/CoreData.framework/Versions/A/CoreData'
+ ZeroPadBytes: 7
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 88539136
+ compatibility_version: 65536
+ Content: '/usr/lib/libSystem.B.dylib'
+ ZeroPadBytes: 6
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 22176
+ datasize: 32
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 22208
+ datasize: 0
+LinkEditData:
+ RebaseOpcodes:
+ - Opcode: REBASE_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ - Opcode: REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
+ Imm: 1
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x3, 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x4, 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 15
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 8
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x10 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x10 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x10 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 5
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 7
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 9
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 9
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 5
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x28 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 7
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 5
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 4
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x40 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 5
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 7
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 7
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x30 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 4
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x8 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x8 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 9
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x38 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 1
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x20 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 2
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 3
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 2
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x98 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ Imm: 3
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 5
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+ Imm: 0
+ ExtraData: [ 0x2, 0x8 ]
+ - Opcode: REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ Imm: 4
+ - Opcode: REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ Imm: 0
+ ExtraData: [ 0x18 ]
+ - Opcode: REBASE_OPCODE_DO_REBASE_ULEB_TIMES
+ Imm: 0
+ ExtraData: [ 0x1C ]
+ - Opcode: REBASE_OPCODE_DONE
+ Imm: 0
+ BindOpcodes:
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: _objc_ehtype_vtable
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
+ Imm: 2
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
+ Imm: 1
+ ULEBExtraData: [ 0x120 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_ADDEND_SLEB
+ Imm: 0
+ SLEBExtraData: [ 16 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xA0 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xA0 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xA0 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: '_OBJC_CLASS_$_NSManagedObject'
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
+ Imm: 3
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xA78 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_ADDEND_SLEB
+ Imm: 0
+ SLEBExtraData: [ 0 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xA48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: '_OBJC_METACLASS_$_NSObject'
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
+ Imm: 2
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xFFFFFFFFFFFFFB68 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x90 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x40 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: __objc_empty_cache
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xFFFFFFFFFFFFFA60 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x20 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: '_OBJC_CLASS_$_NSObject'
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xFFFFFFFFFFFFFA00 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x98 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x98 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x98 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0x48 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ Imm: 0
+ Symbol: '_OBJC_METACLASS_$_NSManagedObject'
+ - Opcode: BIND_OPCODE_SET_TYPE_IMM
+ Imm: 1
+ Symbol: ''
+ - Opcode: BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
+ Imm: 3
+ Symbol: ''
+ - Opcode: BIND_OPCODE_ADD_ADDR_ULEB
+ Imm: 0
+ ULEBExtraData: [ 0xFFFFFFFFFFFFFE90 ]
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DO_BIND
+ Imm: 0
+ Symbol: ''
+ - Opcode: BIND_OPCODE_DONE
+ Imm: 0
+ Symbol: ''
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 5
+ Name: _
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 41
+ Name: p
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 86
+ Name: rivateGlobalVariable
+ Flags: 0x0
+ Address: 0x4CA0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 92
+ Name: ublicGlobalVariable
+ Flags: 0x0
+ Address: 0x4CA4
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 98
+ Name: extraGlobalAPI
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 106
+ Name: '1'
+ Flags: 0x0
+ Address: 0x4C98
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 112
+ Name: '2'
+ Flags: 0x0
+ Address: 0x4C9C
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 118
+ Name: weakP
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 165
+ Name: rivateGlobalVariable
+ Flags: 0x4
+ Address: 0x4BAC
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 171
+ Name: ublicGlobalVariable
+ Flags: 0x4
+ Address: 0x4BA8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 177
+ Name: OBJC_
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 232
+ Name: 'IVAR_$_Basic'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 248
+ Name: 6.ivar1
+ Flags: 0x0
+ Address: 0x4A10
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 254
+ Name: '4'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 274
+ Name: .ivar
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 284
+ Name: '2'
+ Flags: 0x0
+ Address: 0x49B8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 290
+ Name: '1'
+ Flags: 0x0
+ Address: 0x49B0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 296
+ Name: _2.ivar
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 306
+ Name: '1'
+ Flags: 0x0
+ Address: 0x4A08
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 312
+ Name: '2'
+ Flags: 0x0
+ Address: 0x4A00
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 318
+ Name: 'METACLASS_$_'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 369
+ Name: FooClass
+ Flags: 0x0
+ Address: 0x4970
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 375
+ Name: ExternalManagedObject
+ Flags: 0x0
+ Address: 0x47B8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 381
+ Name: S
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 401
+ Name: imple
+ Flags: 0x0
+ Address: 0x4358
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 418
+ Name: Internal
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 432
+ Name: SPI
+ Flags: 0x0
+ Address: 0x4498
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 438
+ Name: API
+ Flags: 0x0
+ Address: 0x4448
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 444
+ Name: ubClass
+ Flags: 0x0
+ Address: 0x43F8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 450
+ Name: Bas
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 461
+ Name: e
+ Flags: 0x0
+ Address: 0x43A8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 467
+ Name: ic
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 501
+ Name: '2'
+ Flags: 0x0
+ Address: 0x4538
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 507
+ Name: '3'
+ Flags: 0x0
+ Address: 0x4588
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 513
+ Name: '5'
+ Flags: 0x0
+ Address: 0x46C8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 519
+ Name: '4'
+ Flags: 0x0
+ Address: 0x45D8
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 530
+ Name: _2
+ Flags: 0x0
+ Address: 0x4678
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 536
+ Name: '9'
+ Flags: 0x0
+ Address: 0x48F8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 542
+ Name: '8'
+ Flags: 0x0
+ Address: 0x4858
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 548
+ Name: '6'
+ Flags: 0x0
+ Address: 0x4718
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 554
+ Name: '1'
+ Flags: 0x0
+ Address: 0x4510
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 560
+ Name: A
+ Flags: 0x0
+ Address: 0x48A8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 566
+ Name: 'EHTYPE_$_'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 579
+ Name: Base
+ Flags: 0x0
+ Address: 0x3120
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 584
+ Name: S
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 612
+ Name: ubClass
+ Flags: 0x0
+ Address: 0x31C8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 617
+ Name: impleInternal
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 631
+ Name: SPI
+ Flags: 0x0
+ Address: 0x3318
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 636
+ Name: API
+ Flags: 0x0
+ Address: 0x3270
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 641
+ Name: 'CLASS_$_'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 692
+ Name: A
+ Flags: 0x0
+ Address: 0x48D0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 698
+ Name: ExternalManagedObject
+ Flags: 0x0
+ Address: 0x47E0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 704
+ Name: FooClass
+ Flags: 0x0
+ Address: 0x4948
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 710
+ Name: S
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 730
+ Name: ubClass
+ Flags: 0x0
+ Address: 0x4420
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 736
+ Name: imple
+ Flags: 0x0
+ Address: 0x4380
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 753
+ Name: Internal
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 767
+ Name: API
+ Flags: 0x0
+ Address: 0x4470
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 773
+ Name: SPI
+ Flags: 0x0
+ Address: 0x44C0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 779
+ Name: Bas
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 790
+ Name: e
+ Flags: 0x0
+ Address: 0x43D0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 796
+ Name: ic
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 830
+ Name: '1'
+ Flags: 0x0
+ Address: 0x44E8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 836
+ Name: '3'
+ Flags: 0x0
+ Address: 0x45B0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 842
+ Name: '4'
+ Flags: 0x0
+ Address: 0x4600
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 853
+ Name: _2
+ Flags: 0x0
+ Address: 0x46A0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 859
+ Name: '2'
+ Flags: 0x0
+ Address: 0x4560
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 865
+ Name: '8'
+ Flags: 0x0
+ Address: 0x4880
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 871
+ Name: '9'
+ Flags: 0x0
+ Address: 0x4920
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 877
+ Name: '6'
+ Flags: 0x0
+ Address: 0x4740
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 4
+ NodeOffset: 883
+ Name: '5'
+ Flags: 0x0
+ Address: 0x46F0
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 2
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7104
+ - n_strx: 22
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7114
+ - n_strx: 46
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7123
+ - n_strx: 66
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7133
+ - n_strx: 88
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7143
+ - n_strx: 112
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7152
+ - n_strx: 135
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7158
+ - n_strx: 162
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7164
+ - n_strx: 204
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7170
+ - n_strx: 224
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7180
+ - n_strx: 248
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7189
+ - n_strx: 273
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7200
+ - n_strx: 302
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7206
+ - n_strx: 347
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7212
+ - n_strx: 395
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7218
+ - n_strx: 424
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7224
+ - n_strx: 466
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7232
+ - n_strx: 488
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7238
+ - n_strx: 510
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7244
+ - n_strx: 523
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7250
+ - n_strx: 543
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7260
+ - n_strx: 566
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7266
+ - n_strx: 593
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7272
+ - n_strx: 615
+ n_type: 0xE
+ n_sect: 1
+ n_desc: 0
+ n_value: 7278
+ - n_strx: 658
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12288
+ - n_strx: 687
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12360
+ - n_strx: 712
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12432
+ - n_strx: 739
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12504
+ - n_strx: 762
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12600
+ - n_strx: 793
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12672
+ - n_strx: 820
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12768
+ - n_strx: 860
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12840
+ - n_strx: 896
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 12936
+ - n_strx: 936
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13008
+ - n_strx: 972
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13176
+ - n_strx: 997
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13104
+ - n_strx: 1026
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13248
+ - n_strx: 1055
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13320
+ - n_strx: 1080
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13392
+ - n_strx: 1109
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13464
+ - n_strx: 1142
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13592
+ - n_strx: 1177
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13696
+ - n_strx: 1203
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13768
+ - n_strx: 1228
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13840
+ - n_strx: 1257
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 13912
+ - n_strx: 1292
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14048
+ - n_strx: 1317
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14120
+ - n_strx: 1348
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14192
+ - n_strx: 1385
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14328
+ - n_strx: 1412
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14400
+ - n_strx: 1443
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14472
+ - n_strx: 1480
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14608
+ - n_strx: 1507
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14680
+ - n_strx: 1537
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14712
+ - n_strx: 1566
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14784
+ - n_strx: 1599
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14816
+ - n_strx: 1624
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14888
+ - n_strx: 1653
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 14960
+ - n_strx: 1686
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15040
+ - n_strx: 1721
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15112
+ - n_strx: 1747
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15136
+ - n_strx: 1772
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15208
+ - n_strx: 1820
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15288
+ - n_strx: 1852
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15312
+ - n_strx: 1883
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15376
+ - n_strx: 1912
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15448
+ - n_strx: 1945
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15480
+ - n_strx: 1970
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15552
+ - n_strx: 2014
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15624
+ - n_strx: 2062
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15656
+ - n_strx: 2102
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15728
+ - n_strx: 2162
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15760
+ - n_strx: 2205
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15824
+ - n_strx: 2239
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15896
+ - n_strx: 2269
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 15968
+ - n_strx: 2299
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16000
+ - n_strx: 2328
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16072
+ - n_strx: 2361
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16104
+ - n_strx: 2386
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16176
+ - n_strx: 2410
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16248
+ - n_strx: 2438
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16280
+ - n_strx: 2458
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16352
+ - n_strx: 2487
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16424
+ - n_strx: 2520
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16456
+ - n_strx: 2555
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16496
+ - n_strx: 2581
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16520
+ - n_strx: 2606
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16592
+ - n_strx: 2645
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16616
+ - n_strx: 2683
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17008
+ - n_strx: 2710
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16856
+ - n_strx: 2741
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16680
+ - n_strx: 2789
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16712
+ - n_strx: 2833
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16720
+ - n_strx: 2868
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16744
+ - n_strx: 2915
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16776
+ - n_strx: 2958
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16784
+ - n_strx: 3005
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16816
+ - n_strx: 3048
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16824
+ - n_strx: 3082
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 16928
+ - n_strx: 3117
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17080
+ - n_strx: 3171
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17112
+ - n_strx: 3222
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17144
+ - n_strx: 3269
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17152
+ - n_strx: 3316
+ n_type: 0xE
+ n_sect: 6
+ n_desc: 0
+ n_value: 17176
+ - n_strx: 4000
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 17960
+ - n_strx: 4027
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 18000
+ - n_strx: 4192
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 18280
+ - n_strx: 4217
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 18320
+ - n_strx: 4314
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 18440
+ - n_strx: 4344
+ n_type: 0x1E
+ n_sect: 7
+ n_desc: 0
+ n_value: 18480
+ - n_strx: 4548
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18840
+ - n_strx: 4578
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18848
+ - n_strx: 4608
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18856
+ - n_strx: 4690
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18880
+ - n_strx: 4716
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18888
+ - n_strx: 4742
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18896
+ - n_strx: 4770
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18904
+ - n_strx: 4798
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18912
+ - n_strx: 4826
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18920
+ - n_strx: 4854
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18928
+ - n_strx: 4882
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18936
+ - n_strx: 4992
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18968
+ - n_strx: 5022
+ n_type: 0x1E
+ n_sect: 8
+ n_desc: 0
+ n_value: 18976
+ - n_strx: 5053
+ n_type: 0x1E
+ n_sect: 9
+ n_desc: 0
+ n_value: 18984
+ - n_strx: 5084
+ n_type: 0x1E
+ n_sect: 9
+ n_desc: 0
+ n_value: 19080
+ - n_strx: 5114
+ n_type: 0x1E
+ n_sect: 9
+ n_desc: 0
+ n_value: 19176
+ - n_strx: 5144
+ n_type: 0x1E
+ n_sect: 9
+ n_desc: 0
+ n_value: 19272
+ - n_strx: 5231
+ n_type: 0x1E
+ n_sect: 10
+ n_desc: 0
+ n_value: 19376
+ - n_strx: 5268
+ n_type: 0x1E
+ n_sect: 10
+ n_desc: 0
+ n_value: 19384
+ - n_strx: 5304
+ n_type: 0x1E
+ n_sect: 10
+ n_desc: 0
+ n_value: 19392
+ - n_strx: 5340
+ n_type: 0x1E
+ n_sect: 10
+ n_desc: 0
+ n_value: 19400
+ - n_strx: 3353
+ n_type: 0xF
+ n_sect: 14
+ n_desc: 0
+ n_value: 19608
+ - n_strx: 3370
+ n_type: 0xF
+ n_sect: 14
+ n_desc: 0
+ n_value: 19612
+ - n_strx: 3387
+ n_type: 0xF
+ n_sect: 14
+ n_desc: 0
+ n_value: 19616
+ - n_strx: 3410
+ n_type: 0xF
+ n_sect: 14
+ n_desc: 0
+ n_value: 19620
+ - n_strx: 3432
+ n_type: 0xF
+ n_sect: 6
+ n_desc: 0
+ n_value: 12576
+ - n_strx: 3452
+ n_type: 0xF
+ n_sect: 6
+ n_desc: 0
+ n_value: 12744
+ - n_strx: 3476
+ n_type: 0xF
+ n_sect: 6
+ n_desc: 0
+ n_value: 12912
+ - n_strx: 3509
+ n_type: 0xF
+ n_sect: 6
+ n_desc: 0
+ n_value: 13080
+ - n_strx: 3542
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17240
+ - n_strx: 3567
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17280
+ - n_strx: 3588
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17320
+ - n_strx: 3611
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17360
+ - n_strx: 3630
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17400
+ - n_strx: 3657
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17440
+ - n_strx: 3680
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17480
+ - n_strx: 3716
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17520
+ - n_strx: 3748
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17560
+ - n_strx: 3784
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17600
+ - n_strx: 3816
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17640
+ - n_strx: 3837
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17680
+ - n_strx: 3862
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17720
+ - n_strx: 3887
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17760
+ - n_strx: 3908
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17800
+ - n_strx: 3933
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17840
+ - n_strx: 3954
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17880
+ - n_strx: 3979
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 17920
+ - n_strx: 4050
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18040
+ - n_strx: 4077
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18080
+ - n_strx: 4100
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18120
+ - n_strx: 4125
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18160
+ - n_strx: 4146
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18200
+ - n_strx: 4171
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18240
+ - n_strx: 4238
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18360
+ - n_strx: 4278
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18400
+ - n_strx: 4370
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18520
+ - n_strx: 4395
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18560
+ - n_strx: 4416
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18600
+ - n_strx: 4436
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18640
+ - n_strx: 4452
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18680
+ - n_strx: 4477
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18720
+ - n_strx: 4498
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18760
+ - n_strx: 4521
+ n_type: 0xF
+ n_sect: 7
+ n_desc: 0
+ n_value: 18800
+ - n_strx: 4638
+ n_type: 0xF
+ n_sect: 8
+ n_desc: 0
+ n_value: 18864
+ - n_strx: 4664
+ n_type: 0xF
+ n_sect: 8
+ n_desc: 0
+ n_value: 18872
+ - n_strx: 4910
+ n_type: 0xF
+ n_sect: 8
+ n_desc: 0
+ n_value: 18944
+ - n_strx: 4938
+ n_type: 0xF
+ n_sect: 8
+ n_desc: 0
+ n_value: 18952
+ - n_strx: 4966
+ n_type: 0xF
+ n_sect: 8
+ n_desc: 0
+ n_value: 18960
+ - n_strx: 5178
+ n_type: 0xF
+ n_sect: 9
+ n_desc: 128
+ n_value: 19368
+ - n_strx: 5204
+ n_type: 0xF
+ n_sect: 9
+ n_desc: 128
+ n_value: 19372
+ - n_strx: 5380
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 768
+ n_value: 0
+ - n_strx: 5410
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ - n_strx: 5433
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 768
+ n_value: 0
+ - n_strx: 5467
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ - n_strx: 5494
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ - n_strx: 5513
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ - n_strx: 5533
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 1024
+ n_value: 0
+ StringTable:
+ - ' '
+ - '-[Basic3 property1]'
+ - '-[Basic3 setProperty1:]'
+ - '-[Basic3 property2]'
+ - '-[Basic3 isProperty3]'
+ - '-[Basic3 setProperty3:]'
+ - '+[Basic5 aClassMethod]'
+ - '-[Basic5 anInstanceMethod]'
+ - '-[Basic6 anInstanceMethodFromAnExtension]'
+ - '-[Basic6 property1]'
+ - '-[Basic6 setProperty1:]'
+ - '-[Basic6(Foo) property2]'
+ - '-[Basic6(Foo) setProperty2:]'
+ - '-[Basic6(Foo) anInstanceMethodFromACategory]'
+ - '-[Basic7 anInstanceMethodFromAnHiddenExtension]'
+ - '-[ExternalManagedObject foo]'
+ - '-[NSManagedObject(Simple) supportsSimple]'
+ - '+[Basic8 useSameName]'
+ - '-[Basic8 useSameName]'
+ - '-[A aMethod]'
+ - '-[Basic9 aProperty]'
+ - '-[FooClass baseMethod]'
+ - '-[FooClass protocolMethod]'
+ - '-[FooClass barMethod]'
+ - '-[FooClass(Private) privateProcotolMethod]'
+ - '__OBJC_METACLASS_RO_$_Simple'
+ - '__OBJC_CLASS_RO_$_Simple'
+ - '__OBJC_METACLASS_RO_$_Base'
+ - '__OBJC_CLASS_RO_$_Base'
+ - '__OBJC_METACLASS_RO_$_SubClass'
+ - '__OBJC_CLASS_RO_$_SubClass'
+ - '__OBJC_METACLASS_RO_$_SimpleInternalAPI'
+ - '__OBJC_CLASS_RO_$_SimpleInternalAPI'
+ - '__OBJC_METACLASS_RO_$_SimpleInternalSPI'
+ - '__OBJC_CLASS_RO_$_SimpleInternalSPI'
+ - '__OBJC_CLASS_RO_$_Basic1'
+ - '__OBJC_METACLASS_RO_$_Basic1'
+ - '__OBJC_METACLASS_RO_$_Basic2'
+ - '__OBJC_CLASS_RO_$_Basic2'
+ - '__OBJC_METACLASS_RO_$_Basic3'
+ - '__OBJC_$_INSTANCE_METHODS_Basic3'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic3'
+ - '__OBJC_$_PROP_LIST_Basic3'
+ - '__OBJC_CLASS_RO_$_Basic3'
+ - '__OBJC_METACLASS_RO_$_Basic4'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic4'
+ - '__OBJC_CLASS_RO_$_Basic4'
+ - '__OBJC_METACLASS_RO_$_Basic4_1'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic4_1'
+ - '__OBJC_CLASS_RO_$_Basic4_1'
+ - '__OBJC_METACLASS_RO_$_Basic4_2'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic4_2'
+ - '__OBJC_CLASS_RO_$_Basic4_2'
+ - '__OBJC_$_CLASS_METHODS_Basic5'
+ - '__OBJC_METACLASS_RO_$_Basic5'
+ - '__OBJC_$_INSTANCE_METHODS_Basic5'
+ - '__OBJC_CLASS_RO_$_Basic5'
+ - '__OBJC_METACLASS_RO_$_Basic6'
+ - '__OBJC_$_INSTANCE_METHODS_Basic6'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic6'
+ - '__OBJC_$_PROP_LIST_Basic6'
+ - '__OBJC_CLASS_RO_$_Basic6'
+ - '__OBJC_$_CATEGORY_INSTANCE_METHODS_Basic6_$_Foo'
+ - '__OBJC_$_PROP_LIST_Basic6_$_Foo'
+ - '__OBJC_$_CATEGORY_Basic6_$_Foo'
+ - '__OBJC_METACLASS_RO_$_Basic7'
+ - '__OBJC_$_INSTANCE_METHODS_Basic7'
+ - '__OBJC_CLASS_RO_$_Basic7'
+ - '__OBJC_METACLASS_RO_$_ExternalManagedObject'
+ - '__OBJC_$_INSTANCE_METHODS_ExternalManagedObject'
+ - '__OBJC_CLASS_RO_$_ExternalManagedObject'
+ - '__OBJC_$_CATEGORY_INSTANCE_METHODS_NSManagedObject_$_Simple'
+ - '__OBJC_$_CATEGORY_NSManagedObject_$_Simple'
+ - '__OBJC_METACLASS_RO_$_HiddenClass'
+ - '__OBJC_CLASS_RO_$_HiddenClass'
+ - '__OBJC_$_CLASS_METHODS_Basic8'
+ - '__OBJC_METACLASS_RO_$_Basic8'
+ - '__OBJC_$_INSTANCE_METHODS_Basic8'
+ - '__OBJC_CLASS_RO_$_Basic8'
+ - '__OBJC_METACLASS_RO_$_A'
+ - '__OBJC_$_INSTANCE_METHODS_A'
+ - '__OBJC_CLASS_RO_$_A'
+ - '__OBJC_METACLASS_RO_$_Basic9'
+ - '__OBJC_$_INSTANCE_METHODS_Basic9'
+ - '__OBJC_$_INSTANCE_VARIABLES_Basic9'
+ - '__OBJC_$_PROP_LIST_Basic9'
+ - '__OBJC_CLASS_RO_$_Basic9'
+ - '__OBJC_$_PROP_LIST_Basic9_$_deprecated'
+ - '__OBJC_$_CATEGORY_Basic9_$_deprecated'
+ - '__OBJC_CLASS_RO_$_FooClass'
+ - '__OBJC_METACLASS_RO_$_FooClass'
+ - '__OBJC_$_PROTOCOL_INSTANCE_METHODS_BaseProtocol'
+ - '__OBJC_$_PROTOCOL_METHOD_TYPES_BaseProtocol'
+ - '__OBJC_$_PROTOCOL_REFS_FooProtocol'
+ - '__OBJC_$_PROTOCOL_INSTANCE_METHODS_FooProtocol'
+ - '__OBJC_$_PROTOCOL_METHOD_TYPES_FooProtocol'
+ - '__OBJC_$_PROTOCOL_INSTANCE_METHODS_BarProtocol'
+ - '__OBJC_$_PROTOCOL_METHOD_TYPES_BarProtocol'
+ - '__OBJC_CLASS_PROTOCOLS_$_FooClass'
+ - '__OBJC_$_INSTANCE_METHODS_FooClass'
+ - '__OBJC_$_CATEGORY_INSTANCE_METHODS_FooClass_$_Private'
+ - '__OBJC_$_PROTOCOL_INSTANCE_METHODS_PrivateProtocol'
+ - '__OBJC_$_PROTOCOL_METHOD_TYPES_PrivateProtocol'
+ - '__OBJC_CATEGORY_PROTOCOLS_$_FooClass_$_Private'
+ - '__OBJC_$_CATEGORY_FooClass_$_Private'
+ - _extraGlobalAPI1
+ - _extraGlobalAPI2
+ - _privateGlobalVariable
+ - _publicGlobalVariable
+ - '_OBJC_EHTYPE_$_Base'
+ - '_OBJC_EHTYPE_$_SubClass'
+ - '_OBJC_EHTYPE_$_SimpleInternalAPI'
+ - '_OBJC_EHTYPE_$_SimpleInternalSPI'
+ - '_OBJC_METACLASS_$_Simple'
+ - '_OBJC_CLASS_$_Simple'
+ - '_OBJC_METACLASS_$_Base'
+ - '_OBJC_CLASS_$_Base'
+ - '_OBJC_METACLASS_$_SubClass'
+ - '_OBJC_CLASS_$_SubClass'
+ - '_OBJC_METACLASS_$_SimpleInternalAPI'
+ - '_OBJC_CLASS_$_SimpleInternalAPI'
+ - '_OBJC_METACLASS_$_SimpleInternalSPI'
+ - '_OBJC_CLASS_$_SimpleInternalSPI'
+ - '_OBJC_CLASS_$_Basic1'
+ - '_OBJC_METACLASS_$_Basic1'
+ - '_OBJC_METACLASS_$_Basic2'
+ - '_OBJC_CLASS_$_Basic2'
+ - '_OBJC_METACLASS_$_Basic3'
+ - '_OBJC_CLASS_$_Basic3'
+ - '_OBJC_METACLASS_$_Basic4'
+ - '_OBJC_CLASS_$_Basic4'
+ - '_OBJC_METACLASS_$_Basic4_1'
+ - '_OBJC_CLASS_$_Basic4_1'
+ - '_OBJC_METACLASS_$_Basic4_2'
+ - '_OBJC_CLASS_$_Basic4_2'
+ - '_OBJC_METACLASS_$_Basic5'
+ - '_OBJC_CLASS_$_Basic5'
+ - '_OBJC_METACLASS_$_Basic6'
+ - '_OBJC_CLASS_$_Basic6'
+ - '_OBJC_METACLASS_$_Basic7'
+ - '_OBJC_CLASS_$_Basic7'
+ - '_OBJC_METACLASS_$_ExternalManagedObject'
+ - '_OBJC_CLASS_$_ExternalManagedObject'
+ - '_OBJC_METACLASS_$_HiddenClass'
+ - '_OBJC_CLASS_$_HiddenClass'
+ - '_OBJC_METACLASS_$_Basic8'
+ - '_OBJC_CLASS_$_Basic8'
+ - '_OBJC_METACLASS_$_A'
+ - '_OBJC_CLASS_$_A'
+ - '_OBJC_METACLASS_$_Basic9'
+ - '_OBJC_CLASS_$_Basic9'
+ - '_OBJC_CLASS_$_FooClass'
+ - '_OBJC_METACLASS_$_FooClass'
+ - '_OBJC_IVAR_$_Basic3.property1'
+ - '_OBJC_IVAR_$_Basic3.property2'
+ - '_OBJC_IVAR_$_Basic3.property3'
+ - '_OBJC_IVAR_$_Basic4.ivar1'
+ - '_OBJC_IVAR_$_Basic4.ivar2'
+ - '_OBJC_IVAR_$_Basic4.ivar3'
+ - '_OBJC_IVAR_$_Basic4.ivar4'
+ - '_OBJC_IVAR_$_Basic4_1.ivar1'
+ - '_OBJC_IVAR_$_Basic4_1.ivar2'
+ - '_OBJC_IVAR_$_Basic4_1.ivar3'
+ - '_OBJC_IVAR_$_Basic4_1.ivar4'
+ - '_OBJC_IVAR_$_Basic4_2.ivar4'
+ - '_OBJC_IVAR_$_Basic4_2.ivar3'
+ - '_OBJC_IVAR_$_Basic4_2.ivar2'
+ - '_OBJC_IVAR_$_Basic4_2.ivar1'
+ - '_OBJC_IVAR_$_Basic6.ivar1'
+ - '_OBJC_IVAR_$_Basic6.property1'
+ - '_OBJC_IVAR_$_Basic9._aProperty'
+ - '__OBJC_PROTOCOL_$_BaseProtocol'
+ - '__OBJC_PROTOCOL_$_FooProtocol'
+ - '__OBJC_PROTOCOL_$_BarProtocol'
+ - '__OBJC_PROTOCOL_$_PrivateProtocol'
+ - _weakPublicGlobalVariable
+ - _weakPrivateGlobalVariable
+ - '__OBJC_LABEL_PROTOCOL_$_BaseProtocol'
+ - '__OBJC_LABEL_PROTOCOL_$_FooProtocol'
+ - '__OBJC_LABEL_PROTOCOL_$_BarProtocol'
+ - '__OBJC_LABEL_PROTOCOL_$_PrivateProtocol'
+ - '_OBJC_CLASS_$_NSManagedObject'
+ - '_OBJC_CLASS_$_NSObject'
+ - '_OBJC_METACLASS_$_NSManagedObject'
+ - '_OBJC_METACLASS_$_NSObject'
+ - __objc_empty_cache
+ - _objc_ehtype_vtable
+ - dyld_stub_binder
+ - ''
+ - ''
+ FunctionStarts: [ 0x1BC0, 0x1BCA, 0x1BD3, 0x1BDD, 0x1BE7, 0x1BF0, 0x1BF6,
+ 0x1BFC, 0x1C02, 0x1C0C, 0x1C15, 0x1C20, 0x1C26, 0x1C2C,
+ 0x1C32, 0x1C38, 0x1C40, 0x1C46, 0x1C4C, 0x1C52, 0x1C5C,
+ 0x1C62, 0x1C68, 0x1C6E ]
+...
diff --git a/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI.h b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI.h
new file mode 100644
index 000000000000..5dd416a0619c
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI.h
@@ -0,0 +1,3 @@
+#ifndef HAVE_SEEN_PROJECT_HEADER_FIRST
+#error "Project header was not included in the correct order!"
+#endif
diff --git a/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI2.h b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI2.h
new file mode 100644
index 000000000000..9bbae52d7215
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalAPI2.h
@@ -0,0 +1,7 @@
+#import <Foundation/Foundation.h>
+
+__attribute__((objc_exception))
+@interface SimpleInternalAPI : NSObject
+@end
+
+#define HAVE_SEEN_PROJECT_HEADER_FIRST 1
diff --git a/clang/test/InstallAPI/Inputs/Simple/SimpleInternalSPI.h b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalSPI.h
new file mode 100644
index 000000000000..a816c01abeb0
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Simple/SimpleInternalSPI.h
@@ -0,0 +1,5 @@
+#import <Foundation/Foundation.h>
+
+__attribute__((objc_exception))
+@interface SimpleInternalSPI : NSObject
+@end
diff --git a/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/AAA.h b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/AAA.h
new file mode 100644
index 000000000000..993d5d4abadb
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/AAA.h
@@ -0,0 +1,3 @@
+#ifndef PUBLIC_UMBRELLA_HEADER_FIRST
+#error "Public umbrella header was not included first!"
+#endif
diff --git a/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/SpecialUmbrella.h b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/SpecialUmbrella.h
new file mode 100644
index 000000000000..2599ff14ae17
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/Headers/SpecialUmbrella.h
@@ -0,0 +1 @@
+#define PUBLIC_UMBRELLA_HEADER_FIRST
diff --git a/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/AAA_Private.h b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/AAA_Private.h
new file mode 100644
index 000000000000..557209bfeb86
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/AAA_Private.h
@@ -0,0 +1,3 @@
+#ifndef PRIVATE_UMBRELLA_HEADER_FIRST
+#error "Private umbrella header was not included first!"
+#endif
diff --git a/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h
new file mode 100644
index 000000000000..fd5b49b94316
--- /dev/null
+++ b/clang/test/InstallAPI/Inputs/Umbrella/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h
@@ -0,0 +1 @@
+#define PRIVATE_UMBRELLA_HEADER_FIRST
diff --git a/clang/test/InstallAPI/diagnostics-cpp.test b/clang/test/InstallAPI/diagnostics-cpp.test
index 658886537507..51cca129ea0a 100644
--- a/clang/test/InstallAPI/diagnostics-cpp.test
+++ b/clang/test/InstallAPI/diagnostics-cpp.test
@@ -21,6 +21,8 @@ CHECK-NEXT: CPP.h:5:7: error: declaration has external linkage, but symbol has i
CHECK-NEXT: CPP.h:6:7: error: dynamic library symbol '(weak-def) Bar::init()' is weak defined, but its declaration is not
CHECK-NEXT: int init();
CHECK-NEXT: ^
+CHECK-NEXT: warning: violations found for arm64
+CHECK-NEXT: error: no declaration found for exported symbol 'int foo<unsigned int>(unsigned int)' in dynamic library
//--- inputs.json.in
{
diff --git a/clang/test/InstallAPI/diagnostics-dsym.test b/clang/test/InstallAPI/diagnostics-dsym.test
new file mode 100644
index 000000000000..8a1b394f2f86
--- /dev/null
+++ b/clang/test/InstallAPI/diagnostics-dsym.test
@@ -0,0 +1,39 @@
+; REQUIRES: 86_64-darwin
+
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+
+// Build a simple dylib with debug info.
+; RUN: %clang --target=x86_64-apple-macos10.15 -g -dynamiclib %t/foo.c \
+; RUN: -current_version 1 -compatibility_version 1 -L%t/usr/lib \
+; RUN: -save-temps \
+; RUN: -o %t/foo.dylib -install_name %t/foo.dylib
+; RUN: dsymutil %t/foo.dylib -o %t/foo.dSYM
+
+; RUN: not clang-installapi -x c++ --target=x86_64-apple-macos10.15 \
+; RUN: -install_name %t/foo.dylib \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: -o %t/output.tbd \
+; RUN: --verify-against=%t/foo.dylib --dsym=%t/foo.dSYM \
+; RUN: --verify-mode=Pedantic 2>&1 | FileCheck %s
+
+; CHECK: violations found for x86_64
+; CHECK: foo.c:5:0: error: no declaration found for exported symbol 'bar' in dynamic library
+; CHECK: foo.c:1:0: error: no declaration found for exported symbol 'foo' in dynamic library
+
+;--- foo.c
+int foo(void) {
+ return 1;
+}
+extern char bar;
+char bar = 'a';
+
+;--- usr/lib/libSystem.tbd
+--- !tapi-tbd
+tbd-version: 4
+targets: [ x86_64-macos ]
+install-name: '/usr/lib/libSystem.B.dylib'
+exports:
+ - targets: [ x86_64-macos ]
+ symbols: [ dyld_stub_binder ]
+...
diff --git a/clang/test/InstallAPI/extra-exclude-headers.test b/clang/test/InstallAPI/extra-exclude-headers.test
new file mode 100644
index 000000000000..663ca1a5d500
--- /dev/null
+++ b/clang/test/InstallAPI/extra-exclude-headers.test
@@ -0,0 +1,207 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: mkdir -p %t/System/Library/Frameworks
+; RUN: cp -r %S/Inputs/Simple/Simple.framework %t/System/Library/Frameworks/
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+; RUN: yaml2obj %S/Inputs/Simple/Simple.yaml -o %t/Simple
+
+// Add exclude options.
+; RUN: clang-installapi -target x86_64-apple-macosx10.12 \
+; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
+; RUN: -current_version 1.2.3 -compatibility_version 1 \
+; RUN: -F%t/System/Library/Frameworks \
+; RUN: %t/inputs.json -o %t/Simple.tbd \
+; RUN: --verify-against=%t/Simple --verify-mode=ErrorsAndWarnings \
+; RUN: --exclude-public-header=**/SimpleAPI.h \
+; RUN: --exclude-private-header=**/SimplePrivateSPI.h 2>&1 | FileCheck -check-prefix=WARNINGS %s
+; RUN: llvm-readtapi -compare %t/Simple.tbd %t/expected-excluded.tbd
+
+// Add extra options.
+; RUN: clang-installapi -target x86_64-apple-macosx10.12 \
+; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
+; RUN: -current_version 1.2.3 -compatibility_version 1 \
+; RUN: -F%t/System/Library/Frameworks \
+; RUN: %t/inputs.json -o %t/Simple.tbd \
+; RUN: --verify-against=%t/Simple --verify-mode=Pedantic \
+; RUN: --extra-project-header=%S/Inputs/Simple/SimpleInternalAPI2.h \
+; RUN: --extra-project-header=%S/Inputs/Simple/SimpleInternalAPI.h \
+; RUN: --extra-public-header=%S/Inputs/Simple/Extra \
+; RUN: --extra-private-header=%S/Inputs/Simple/SimpleInternalSPI.h \
+; RUN: --exclude-public-header=**/SimpleAPI.h \
+; RUN: --exclude-private-header=**/SimplePrivateSPI.h 2>&1 | FileCheck -check-prefix=PEDANTIC -allow-empty %s
+; RUN: llvm-readtapi -compare %t/Simple.tbd %t/expected-extra.tbd
+
+// Check fatal missing file input.
+; RUN: not clang-installapi -target x86_64-apple-macosx10.12 \
+; RUN: -install_name /System/Library/Frameworks/Simple.framework/Versions/A/Simple \
+; RUN: -current_version 1.2.3 -compatibility_version 1 \
+; RUN: -F%t/System/Library/Frameworks \
+; RUN: %t/inputs.json -o %t/Simple.tbd \
+; RUN: --extra-public-header=%S/Inputs/Simple/NoSuchFile.h 2>&1 | FileCheck -allow-empty -check-prefix=NOPUBLIC %s
+
+; WARNINGS: warning: no declaration was found for exported symbol '_extraGlobalAPI1' in dynamic library
+; WARNINGS: warning: no declaration was found for exported symbol '_extraGlobalAPI2' in dynamic library
+; WARNINGS: warning: no declaration was found for exported symbol '(ObjC Class) SimpleInternalSPI' in dynamic library
+; WARNINGS: warning: no declaration was found for exported symbol '(ObjC Class) SimpleInternalAPI' in dynamic library
+
+; PEDANTIC-NOT: error
+; PEDANTIC: warning: cannot find protocol definition for 'ForwardProcotol'
+
+; NOPUBLIC: error: no such public header file:
+
+;--- expected-excluded.tbd
+{
+ "main_library": {
+ "current_versions": [
+ {
+ "version": "1.2.3"
+ }
+ ],
+ "exported_symbols": [
+ {
+ "data": {
+ "global": [
+ "_publicGlobalVariable",
+ "_privateGlobalVariable"
+ ],
+ "objc_class": [
+ "ExternalManagedObject", "Basic6",
+ "Basic1", "Base", "Basic3",
+ "FooClass", "Simple",
+ "Basic4_2", "Basic5",
+ "Basic9","Basic8",
+ "Basic2", "Basic4", "A", "SubClass"
+ ],
+ "objc_eh_type": [
+ "SubClass", "Base"
+ ],
+ "objc_ivar": [
+ "Basic4.ivar2", "Basic4_2.ivar1", "Basic6.ivar1",
+ "Basic4.ivar1", "Basic4_2.ivar2"
+ ],
+ "weak": [
+ "_weakPrivateGlobalVariable", "_weakPublicGlobalVariable"
+ ]
+ }
+ }
+ ],
+ "flags": [
+ {
+ "attributes": ["not_app_extension_safe"]
+ }
+ ],
+ "install_names": [
+ {
+ "name": "/System/Library/Frameworks/Simple.framework/Versions/A/Simple"
+ }
+ ],
+ "target_info": [
+ {"min_deployment": "10.12", "target": "x86_64-macos"}
+ ]
+ },
+ "tapi_tbd_version": 5
+}
+
+;--- expected-extra.tbd
+{
+ "main_library": {
+ "current_versions": [
+ { "version": "1.2.3" }
+ ],
+ "exported_symbols": [
+ {
+ "data": {
+ "global": [
+ "_publicGlobalVariable", "_extraGlobalAPI2",
+ "_extraGlobalAPI1", "_privateGlobalVariable"
+ ],
+ "objc_class": [
+ "SubClass", "SimpleInternalSPI",
+ "Basic6", "Basic1", "Base",
+ "Basic3", "Simple", "Basic4_2",
+ "Basic5", "FooClass", "Basic9",
+ "Basic8", "Basic2", "Basic4",
+ "A", "SimpleInternalAPI",
+ "ExternalManagedObject"
+ ],
+ "objc_eh_type": [
+ "SubClass", "SimpleInternalAPI",
+ "Base", "SimpleInternalSPI"
+ ],
+ "objc_ivar": [
+ "Basic4.ivar2", "Basic4_2.ivar1",
+ "Basic6.ivar1", "Basic4.ivar1",
+ "Basic4_2.ivar2"
+ ],
+ "weak": [
+ "_weakPrivateGlobalVariable", "_weakPublicGlobalVariable"
+ ]
+ }
+ }
+ ],
+ "flags": [
+ {
+ "attributes": [ "not_app_extension_safe"]
+ }
+ ],
+ "install_names": [
+ { "name": "/System/Library/Frameworks/Simple.framework/Versions/A/Simple" }
+ ],
+ "target_info": [
+ { "min_deployment": "10.12", "target": "x86_64-macos" }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
+
+;--- inputs.json.in
+{
+ "headers": [
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/Headers/Basic.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/Headers/External.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/Headers/Simple.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/Headers/SimpleAPI.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/PrivateHeaders/SimplePrivate.h",
+ "type" : "private"
+ },
+ {
+ "path" : "DSTROOT/System/Library/Frameworks/Simple.framework/PrivateHeaders/SimplePrivateSPI.h",
+ "type" : "private"
+ }
+ ],
+ "version": "3"
+}
+
+;--- System/Library/Frameworks/Foundation.framework/Headers/Foundation.h
+@interface NSObject
+@end
+
+typedef unsigned char BOOL;
+#ifndef NS_AVAILABLE
+#define NS_AVAILABLE(x,y) __attribute__((availability(macosx,introduced=x)))
+#endif
+#ifndef NS_UNAVAILABLE
+#define NS_UNAVAILABLE __attribute__((unavailable))
+#endif
+#ifndef NS_DEPRECATED_MAC
+#define NS_DEPRECATED_MAC(x,y) __attribute__((availability(macosx,introduced=x,deprecated=y,message="" )));
+#endif
+
+@interface NSManagedObject
+@end
+
+@interface NSSet
+@end
diff --git a/clang/test/InstallAPI/linker-symbols.test b/clang/test/InstallAPI/linker-symbols.test
new file mode 100644
index 000000000000..1e4ddf9c45d5
--- /dev/null
+++ b/clang/test/InstallAPI/linker-symbols.test
@@ -0,0 +1,440 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+
+; RUN: yaml2obj %t/MagicSymbols.yaml -o %t/MagicSymbols
+
+; RUN: not clang-installapi -target x86_64-apple-macosx13 \
+; RUN: -install_name \
+; RUN: /System/Library/Frameworks/SpecialLinkerSymbols.framework/Versions/A/SpecialLinkerSymbols \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: %t/inputs.json -o %t/output.tbd \
+; RUN: --verify-mode=ErrorsOnly \
+; RUN: --verify-against=%t/MagicSymbols 2>&1 | FileCheck %s
+
+CHECK: warning: violations found for x86_64
+CHECK: error: no declaration found for exported symbol '$ld$add$os10.4$_symbol2' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$add$os10.5$_symbol2' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$hide$os10.6$_symbol1' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$hide$os10.7$_symbol1' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$weak$os10.5$_symbol3' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$weak$os10.4$_symbol3' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$install_name$os10.4$/System/Library/Frameworks/A.framework/Versions/A/A' in dynamic library
+CHECK: error: no declaration found for exported symbol '$ld$install_name$os10.5$/System/Library/Frameworks/B.framework/Versions/A/B' in dynamic library
+
+;--- MagicSymbols.h
+#ifndef SPECIAL_LINKER_SYMBOLS_H
+#define SPECIAL_LINKER_SYMBOLS_H
+
+extern const int SpecialLinkerSymbolsVersion;
+
+extern int symbol1;
+extern int symbol3;
+
+#endif // SPECIAL_LINKER_SYMBOLS_H
+
+;--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/MagicSymbols.h",
+ "type" : "project"
+ }
+ ],
+ "version": "3"
+}
+
+;--- MagicSymbols.yaml
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x1000007
+ cpusubtype: 0x3
+ filetype: 0x6
+ ncmds: 12
+ sizeofcmds: 952
+ flags: 0x100085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __TEXT
+ vmaddr: 0
+ vmsize: 4096
+ fileoff: 0
+ filesize: 4096
+ maxprot: 5
+ initprot: 5
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0xBD8
+ size: 0
+ offset: 0xBD8
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000000
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: ''
+ - sectname: __const
+ segname: __TEXT
+ addr: 0xBD8
+ size: 4
+ offset: 0xBD8
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: '07000000'
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __DATA
+ vmaddr: 4096
+ vmsize: 4096
+ fileoff: 4096
+ filesize: 4096
+ maxprot: 3
+ initprot: 3
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __data
+ segname: __DATA
+ addr: 0x1000
+ size: 8
+ offset: 0x1000
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 4D00000009030000
+ - sectname: __common
+ segname: __DATA
+ addr: 0x1008
+ size: 8
+ offset: 0x0
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x1
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 8192
+ vmsize: 944
+ fileoff: 8192
+ filesize: 944
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_INFO_ONLY
+ cmdsize: 48
+ rebase_off: 0
+ rebase_size: 0
+ bind_off: 0
+ bind_size: 0
+ weak_bind_off: 0
+ weak_bind_size: 0
+ lazy_bind_off: 0
+ lazy_bind_size: 0
+ export_off: 8192
+ export_size: 376
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 8576
+ nsyms: 12
+ stroff: 8768
+ strsize: 368
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 0
+ iextdefsym: 0
+ nextdefsym: 11
+ iundefsym: 11
+ nundefsym: 1
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_ID_DYLIB
+ cmdsize: 120
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 65536
+ compatibility_version: 65536
+ Content: '/System/Library/Frameworks/SpecialLinkerSymbols.framework/Versions/A/SpecialLinkerSymbols'
+ ZeroPadBytes: 7
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C4478-5555-3144-A106-356C3C9DACA3
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 1
+ minos: 851968
+ sdk: 983040
+ ntools: 1
+ Tools:
+ - tool: 4
+ version: 1245184
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 88539136
+ compatibility_version: 65536
+ Content: '/usr/lib/libSystem.B.dylib'
+ ZeroPadBytes: 6
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 8568
+ datasize: 8
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 8576
+ datasize: 0
+LinkEditData:
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 11
+ Name: _
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 50
+ Name: SpecialLinkerSymbolsVersion
+ Flags: 0x0
+ Address: 0xBD8
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 55
+ Name: symbol
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 63
+ Name: '3'
+ Flags: 0x0
+ Address: 0x1004
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 68
+ Name: '1'
+ Flags: 0x0
+ Address: 0x1000
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 73
+ Name: '$ld$'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 134
+ Name: 'add$os10.'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 162
+ Name: '4$_symbol2'
+ Flags: 0x0
+ Address: 0x1008
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 167
+ Name: '5$_symbol2'
+ Flags: 0x0
+ Address: 0x1009
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 172
+ Name: 'hide$os10.'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 200
+ Name: '6$_symbol1'
+ Flags: 0x0
+ Address: 0x100A
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 205
+ Name: '7$_symbol1'
+ Flags: 0x0
+ Address: 0x100B
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 210
+ Name: 'weak$os10.'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 238
+ Name: '5$_symbol3'
+ Flags: 0x0
+ Address: 0x100F
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 243
+ Name: '4$_symbol3'
+ Flags: 0x0
+ Address: 0x100E
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 248
+ Name: 'install_name$os10.'
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 362
+ Name: '4$/System/Library/Frameworks/A.framework/Versions/A/A'
+ Flags: 0x0
+ Address: 0x100C
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 367
+ Name: '5$/System/Library/Frameworks/B.framework/Versions/A/B'
+ Flags: 0x0
+ Address: 0x100D
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 2
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4104
+ - n_strx: 26
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4105
+ - n_strx: 50
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4106
+ - n_strx: 75
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4107
+ - n_strx: 100
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4108
+ - n_strx: 176
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4109
+ - n_strx: 252
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4110
+ - n_strx: 277
+ n_type: 0xF
+ n_sect: 4
+ n_desc: 0
+ n_value: 4111
+ - n_strx: 302
+ n_type: 0xF
+ n_sect: 2
+ n_desc: 0
+ n_value: 3032
+ - n_strx: 331
+ n_type: 0xF
+ n_sect: 3
+ n_desc: 0
+ n_value: 4096
+ - n_strx: 340
+ n_type: 0xF
+ n_sect: 3
+ n_desc: 0
+ n_value: 4100
+ - n_strx: 349
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 256
+ n_value: 0
+ StringTable:
+ - ' '
+ - '$ld$add$os10.4$_symbol2'
+ - '$ld$add$os10.5$_symbol2'
+ - '$ld$hide$os10.6$_symbol1'
+ - '$ld$hide$os10.7$_symbol1'
+ - '$ld$install_name$os10.4$/System/Library/Frameworks/A.framework/Versions/A/A'
+ - '$ld$install_name$os10.5$/System/Library/Frameworks/B.framework/Versions/A/B'
+ - '$ld$weak$os10.4$_symbol3'
+ - '$ld$weak$os10.5$_symbol3'
+ - _SpecialLinkerSymbolsVersion
+ - _symbol1
+ - _symbol3
+ - dyld_stub_binder
+ - ''
+ - ''
+...
diff --git a/clang/test/InstallAPI/mismatching-objc-class-symbols.test b/clang/test/InstallAPI/mismatching-objc-class-symbols.test
new file mode 100644
index 000000000000..3b4acf1035ac
--- /dev/null
+++ b/clang/test/InstallAPI/mismatching-objc-class-symbols.test
@@ -0,0 +1,269 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+; RUN: yaml2obj %t/swift-objc-class.yaml -o %t/libswift-objc.dylib
+
+// Try out dylib that only has 1 symbol for a ObjCClass, with no declarations in header.
+; RUN: clang-installapi -target arm64-apple-macos14 -dynamiclib \
+; RUN: -install_name tmp.dylib --verify-against=%t/libswift-objc.dylib \
+; RUN: -I%t/usr/include %t/inputs.json -o %t/missing.tbd \
+; RUN: --verify-mode=ErrorsAndWarnings 2>&1 | FileCheck --check-prefix MISSING_DECL %s
+; RUN: llvm-readtapi --compare %t/missing.tbd %t/missing-expected.tbd
+
+// Try out a dylib that only has 1 symbol for a ObjCClass,
+// but a complete ObjCClass decl in header.
+; RUN: clang-installapi -target arm64-apple-macos14 -dynamiclib \
+; RUN: -install_name tmp.dylib --verify-against=%t/libswift-objc.dylib \
+; RUN: -I%t/usr/include %t/inputs.json -o %t/mismatching.tbd \
+; RUN: --verify-mode=Pedantic -DFULL_DECL 2>&1 | FileCheck --check-prefix MISMATCH_DECL %s
+; RUN: llvm-readtapi -compare %t/mismatching.tbd %t/mismatching-expected.tbd
+
+// Try out a dylib that only has 1 symbol for a ObjCClass, but is represented in header.
+; RUN: clang-installapi -target arm64-apple-macos14 \
+; RUN: -install_name tmp.dylib --verify-against=%t/libswift-objc.dylib \
+; RUN: -I%t/usr/include %t/inputs.json -o %t/matching.tbd \
+; RUN: --verify-mode=Pedantic \
+; RUN: -DHAS_META_DECL 2>&1 | FileCheck --allow-empty %s
+
+; MISSING_DECL: violations found for arm64
+; MISSING_DECL-NEXT: warning: no declaration was found for exported symbol 'Metaclass of Suggestion' in dynamic library
+
+; MISMATCH_DECL: violations found for arm64-apple-macos14
+; MISMATCH_DECL: warning: declaration has external linkage, but dynamic library doesn't have symbol 'Class of Suggestion'
+
+; CHECK-NOT: error
+; CHECK-NOT: warning
+
+
+;--- usr/include/mismatch.h
+#if HAS_META_DECL
+int metaclass __asm("_OBJC_METACLASS_$_Suggestion");
+#endif
+
+#if FULL_DECL
+@interface Suggestion
+@end
+#endif
+
+;--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/usr/include/mismatch.h",
+ "type" : "public"
+ }
+ ],
+ "version": "3"
+}
+
+;--- missing-expected.tbd
+--- !tapi-tbd
+tbd-version: 4
+targets: [ arm64-macos ]
+flags: [ not_app_extension_safe ]
+install-name: tmp.dylib
+current-version: 0
+compatibility-version: 0
+...
+
+;--- mismatching-expected.tbd
+--- !tapi-tbd
+tbd-version: 4
+targets: [ arm64-macos ]
+flags: [ not_app_extension_safe ]
+install-name: tmp.dylib
+current-version: 0
+compatibility-version: 0
+exports:
+ - targets: [ arm64-macos ]
+ objc-classes: [ Suggestion ]
+...
+
+;--- swift-objc-class.yaml
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x100000C
+ cpusubtype: 0x0
+ filetype: 0x6
+ ncmds: 13
+ sizeofcmds: 752
+ flags: 0x100085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __TEXT
+ vmaddr: 0
+ vmsize: 16384
+ fileoff: 0
+ filesize: 16384
+ maxprot: 5
+ initprot: 5
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x330
+ size: 0
+ offset: 0x330
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000000
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: ''
+ - sectname: __const
+ segname: __TEXT
+ addr: 0x330
+ size: 1
+ offset: 0x330
+ align: 0
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: '61'
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 16384
+ vmsize: 416
+ fileoff: 16384
+ filesize: 416
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_INFO_ONLY
+ cmdsize: 48
+ rebase_off: 0
+ rebase_size: 0
+ bind_off: 0
+ bind_size: 0
+ weak_bind_off: 0
+ weak_bind_size: 0
+ lazy_bind_off: 0
+ lazy_bind_size: 0
+ export_off: 16384
+ export_size: 40
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 16432
+ nsyms: 2
+ stroff: 16464
+ strsize: 48
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 0
+ iextdefsym: 0
+ nextdefsym: 1
+ iundefsym: 1
+ nundefsym: 1
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_ID_DYLIB
+ cmdsize: 40
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 0
+ compatibility_version: 0
+ Content: tmp.dylib
+ ZeroPadBytes: 7
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C4443-5555-3144-A142-97179769CBE0
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 1
+ minos: 917504
+ sdk: 983040
+ ntools: 1
+ Tools:
+ - tool: 4
+ version: 1245184
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 96
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 197656576
+ compatibility_version: 19660800
+ Content: '/System/Library/Frameworks/Foundation.framework/Versions/C/Foundation'
+ ZeroPadBytes: 3
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 88473600
+ compatibility_version: 65536
+ Content: '/usr/lib/libSystem.B.dylib'
+ ZeroPadBytes: 6
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 16424
+ datasize: 8
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 16432
+ datasize: 0
+ - cmd: LC_CODE_SIGNATURE
+ cmdsize: 16
+ dataoff: 16512
+ datasize: 288
+LinkEditData:
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 32
+ Name: '_OBJC_METACLASS_$_Suggestion'
+ Flags: 0x0
+ Address: 0x330
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 2
+ n_type: 0xF
+ n_sect: 2
+ n_desc: 0
+ n_value: 816
+ - n_strx: 31
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ StringTable:
+ - ' '
+ - '_OBJC_METACLASS_$_Suggestion'
+ - dyld_stub_binder
+ FunctionStarts: [ 0x330 ]
+...
+// Generated from:
+// xcrun -sdk macosx clang tmp.c -dynamiclib -install_name tmp.dylib
+// tmp.c:
+// __attribute__((visibility("default")))
+// const char Meta __asm("_OBJC_METACLASS_$_Suggestion") = 'a';
diff --git a/clang/test/InstallAPI/symbol-flags.test b/clang/test/InstallAPI/symbol-flags.test
new file mode 100644
index 000000000000..3f68afd17e3b
--- /dev/null
+++ b/clang/test/InstallAPI/symbol-flags.test
@@ -0,0 +1,290 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+
+; RUN: yaml2obj %t/flags.yaml -o %t/SymbolFlags
+
+; RUN: not clang-installapi -x c++ --target=arm64-apple-macos13 \
+; RUN: -install_name /System/Library/Frameworks/SymbolFlags.framework/Versions/A/SymbolFlags \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: %t/inputs.json -o output.tbd \
+; RUN: --verify-against=%t/SymbolFlags \
+; RUN: --verify-mode=ErrorsOnly 2>&1 | FileCheck %s
+
+; CHECK: project.h:2:21: error: declaration '(tlv) val' is thread local, but symbol is not in dynamic library
+; CHECK-NEXT: extern __thread int val;
+; CHECK: project.h:3:13: error: dynamic library symbol '(weak-def) __Z12my_weak_funcv' is weak defined, but its declaration is not
+; CHECK-NEXT: extern void my_weak_func();
+
+;--- project.h
+extern void my_func();
+extern __thread int val;
+extern void my_weak_func();
+
+;--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/project.h",
+ "type" : "project"
+ }
+ ],
+ "version": "3"
+}
+
+;--- flags.yaml
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x100000C
+ cpusubtype: 0x0
+ filetype: 0x6
+ ncmds: 14
+ sizeofcmds: 912
+ flags: 0x118085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __TEXT
+ vmaddr: 0
+ vmsize: 16384
+ fileoff: 0
+ filesize: 16384
+ maxprot: 5
+ initprot: 5
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0xFB0
+ size: 8
+ offset: 0xFB0
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: C0035FD6C0035FD6
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0xFB8
+ size: 4152
+ offset: 0xFB8
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ content: 010000001C000000010000002000000000000000200000000200000000000002B00F00003800000038000000B80F00000000000038000000030000000C0001001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+ - cmd: LC_SEGMENT_64
+ cmdsize: 152
+ segname: __DATA
+ vmaddr: 16384
+ vmsize: 16384
+ fileoff: 16384
+ filesize: 0
+ maxprot: 3
+ initprot: 3
+ nsects: 1
+ flags: 0
+ Sections:
+ - sectname: __common
+ segname: __DATA
+ addr: 0x4000
+ size: 4
+ offset: 0x0
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x1
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 32768
+ vmsize: 480
+ fileoff: 16384
+ filesize: 480
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_INFO_ONLY
+ cmdsize: 48
+ rebase_off: 0
+ rebase_size: 0
+ bind_off: 0
+ bind_size: 0
+ weak_bind_off: 0
+ weak_bind_size: 0
+ lazy_bind_off: 0
+ lazy_bind_size: 0
+ export_off: 16384
+ export_size: 64
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 16456
+ nsyms: 4
+ stroff: 16520
+ strsize: 56
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 0
+ iextdefsym: 0
+ nextdefsym: 3
+ iundefsym: 3
+ nundefsym: 1
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_ID_DYLIB
+ cmdsize: 96
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 65536
+ compatibility_version: 65536
+ Content: '/System/Library/Frameworks/SymbolFlags.framework/Versions/A/SymbolFlags'
+ ZeroPadBytes: 1
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C4436-5555-3144-A1AF-5D3063ACFC99
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 1
+ minos: 851968
+ sdk: 983040
+ ntools: 1
+ Tools:
+ - tool: 4
+ version: 1245184
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 48
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 117985024
+ compatibility_version: 65536
+ Content: '/usr/lib/libc++.1.dylib'
+ ZeroPadBytes: 1
+ - cmd: LC_LOAD_DYLIB
+ cmdsize: 56
+ dylib:
+ name: 24
+ timestamp: 0
+ current_version: 88473600
+ compatibility_version: 65536
+ Content: '/usr/lib/libSystem.B.dylib'
+ ZeroPadBytes: 6
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 16448
+ datasize: 8
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 16456
+ datasize: 0
+ - cmd: LC_CODE_SIGNATURE
+ cmdsize: 16
+ dataoff: 16576
+ datasize: 288
+LinkEditData:
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 5
+ Name: _
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 16
+ Name: val
+ Flags: 0x0
+ Address: 0x4000
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 0
+ NodeOffset: 22
+ Name: _Z
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 3
+ NodeOffset: 52
+ Name: 7my_funcv
+ Flags: 0x0
+ Address: 0xFB0
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 3
+ NodeOffset: 57
+ Name: 12my_weak_funcv
+ Flags: 0x4
+ Address: 0xFB4
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 2
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4016
+ - n_strx: 15
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 128
+ n_value: 4020
+ - n_strx: 34
+ n_type: 0xF
+ n_sect: 3
+ n_desc: 0
+ n_value: 16384
+ - n_strx: 39
+ n_type: 0x1
+ n_sect: 0
+ n_desc: 512
+ n_value: 0
+ StringTable:
+ - ' '
+ - __Z7my_funcv
+ - __Z12my_weak_funcv
+ - _val
+ - dyld_stub_binder
+ FunctionStarts: [ 0xFB0, 0xFB4 ]
+...
+
+/// Generated from:
+// clang++ -mtargetos=macosx13 -arch arm64 flags.cpp
+// flags.cpp:
+// __attribute__((visibility("default"))) void my_func() {}
+// __attribute__((weak)) void my_weak_func() {}
+// int val = 0;
diff --git a/clang/test/InstallAPI/umbrella-headers-unix.test b/clang/test/InstallAPI/umbrella-headers-unix.test
new file mode 100644
index 000000000000..46118779896c
--- /dev/null
+++ b/clang/test/InstallAPI/umbrella-headers-unix.test
@@ -0,0 +1,40 @@
+// UNSUPPORTED: system-windows
+
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+; RUN: mkdir %t/Frameworks/
+; RUN: cp -r %S/Inputs/Umbrella/Umbrella.framework %t/Frameworks/
+
+// Only validate path based input that rely on regex matching on unix based file systems.
+; RUN: clang-installapi --target=arm64-apple-macosx13 \
+; RUN: -install_name /System/Library/Frameworks/Umbrella2.framework/Versions/A/Umbrella \
+; RUN: -ObjC -F%t/Frameworks/ %t/inputs.json \
+; RUN: --public-umbrella-header=%t/Frameworks/Umbrella.framework/Headers/SpecialUmbrella.h \
+; RUN: -private-umbrella-header \
+; RUN: %t/Frameworks/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h \
+; RUN: -o %t/output.tbd 2>&1 | FileCheck -allow-empty %s
+
+; CHECK-NOT: error
+; CHECK-NOT: warning
+
+;--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/Headers/AAA.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/Headers/SpecialUmbrella.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/PrivateHeaders/AAA_Private.h",
+ "type" : "private"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h",
+ "type" : "private"
+ }],
+ "version": "3"
+}
diff --git a/clang/test/InstallAPI/umbrella-headers.test b/clang/test/InstallAPI/umbrella-headers.test
new file mode 100644
index 000000000000..ce9c50608c41
--- /dev/null
+++ b/clang/test/InstallAPI/umbrella-headers.test
@@ -0,0 +1,48 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+; RUN: sed -e "s|DSTROOT|%/t|g" %t/inputs.json.in > %t/inputs.json
+; RUN: cp -r %S/Inputs/Umbrella/Umbrella.framework %t/Frameworks/
+
+// Check base filename matches.
+; RUN: clang-installapi --target=arm64-apple-macosx13 \
+; RUN: -install_name /System/Library/Frameworks/Umbrella.framework/Versions/A/Umbrella \
+; RUN: -ObjC -F%t/Frameworks/ %t/inputs.json \
+; RUN: --public-umbrella-header=SpecialUmbrella.h \
+; RUN: --private-umbrella-header=SpecialPrivateUmbrella.h \
+; RUN: -o %t/output.tbd 2>&1 | FileCheck -allow-empty %s
+
+// Try missing umbrella header argument.
+; RUN: not clang-installapi --target=arm64-apple-macosx13 \
+; RUN: -install_name /System/Library/Frameworks/Umbrella.framework/Versions/A/Umbrella \
+; RUN: -ObjC -F%t/Frameworks/ %t/inputs.json \
+; RUN: --public-umbrella-header=Ignore.h \
+; RUN: -o %t/output.tbd 2>&1 | FileCheck %s -check-prefix=ERR
+
+; ERR: error: public umbrella header file not found in input: 'Ignore.h'
+
+; CHECK-NOT: error
+; CHECK-NOT: warning
+
+;--- Frameworks/Umbrella.framework/Headers/Ignore.h
+#error "This header should be ignored"
+
+;--- inputs.json.in
+{
+ "headers": [ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/Headers/AAA.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/Headers/SpecialUmbrella.h",
+ "type" : "public"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/PrivateHeaders/AAA_Private.h",
+ "type" : "private"
+ },
+ {
+ "path" : "DSTROOT/Frameworks/Umbrella.framework/PrivateHeaders/SpecialPrivateUmbrella.h",
+ "type" : "private"
+ }],
+ "version": "3"
+}
diff --git a/clang/test/Lexer/has_extension_cxx.cpp b/clang/test/Lexer/has_extension_cxx.cpp
index 7941997428ac..7366029d3727 100644
--- a/clang/test/Lexer/has_extension_cxx.cpp
+++ b/clang/test/Lexer/has_extension_cxx.cpp
@@ -33,6 +33,11 @@ int has_deleted_functions();
int has_inline_namespaces();
#endif
+// CHECK: has_lambdas
+#if __has_extension(cxx_lambdas)
+int has_lambdas();
+#endif
+
// CHECK: has_override_control
#if __has_extension(cxx_override_control)
int has_override_control();
diff --git a/clang/test/Modules/codegen.test b/clang/test/Modules/codegen.test
index 77602056defd..0af630a75480 100644
--- a/clang/test/Modules/codegen.test
+++ b/clang/test/Modules/codegen.test
@@ -26,7 +26,7 @@ USE: $_Z4instIiEvv = comdat any
USE: $_Z10always_inlv = comdat any
FOO: $_ZN13implicit_dtorD2Ev = comdat any
FOO: define weak_odr void @_Z2f1PKcz(ptr noundef %fmt, ...) #{{[0-9]+}} comdat
-FOO: call void @llvm.va_start(ptr %{{[a-zA-Z0-9]*}})
+FOO: call void @llvm.va_start.p0(ptr %{{[a-zA-Z0-9]*}})
Test that implicit special members are emitted into the FOO module if they're
ODR used there, otherwise emit them linkonce_odr as usual in the use.
diff --git a/clang/test/Modules/no-local-decl-in-reduced-bmi.cppm b/clang/test/Modules/no-local-decl-in-reduced-bmi.cppm
new file mode 100644
index 000000000000..41ae2bf0dec8
--- /dev/null
+++ b/clang/test/Modules/no-local-decl-in-reduced-bmi.cppm
@@ -0,0 +1,33 @@
+// Test that we won't record local declarations by default in reduced BMI.
+
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: cd %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-reduced-module-interface -o %t/a.pcm
+// RUN: llvm-bcanalyzer --dump --disable-histogram --show-binary-blobs %t/a.pcm > %t/a.dump
+// RUN: cat %t/a.dump | FileCheck %t/a.cppm
+//
+// RUN: %clang_cc1 -std=c++20 %t/b.cppm -emit-reduced-module-interface -o %t/b.pcm
+// RUN: llvm-bcanalyzer --dump --disable-histogram --show-binary-blobs %t/b.pcm > %t/b.dump
+// RUN: cat %t/b.dump | FileCheck %t/b.cppm
+
+//--- a.cppm
+export module a;
+export int func() {
+ int v = 43;
+ return 43;
+}
+
+// Test that the variable declaration is not recorded completely.
+// CHECK-NOT: <DECL_VAR
+
+//--- b.cppm
+export module b;
+export inline int func() {
+ int v = 43;
+ return v;
+}
+
+// Check that we still records the declaration from inline functions.
+// CHECK: <DECL_VAR
diff --git a/clang/test/OpenMP/atomic_capture_codegen.cpp b/clang/test/OpenMP/atomic_capture_codegen.cpp
index 08d1f21f8e0b..eba7906d8eb8 100644
--- a/clang/test/OpenMP/atomic_capture_codegen.cpp
+++ b/clang/test/OpenMP/atomic_capture_codegen.cpp
@@ -811,7 +811,7 @@ int main(void) {
#pragma omp atomic relaxed capture
iv = bfx4.a = bfx4.a * ldv;
// CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -831,7 +831,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -870,7 +870,7 @@ int main(void) {
#pragma omp atomic capture release
{bfx4.b /= ldv; iv = bfx4.b;}
// CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) acquire, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) acquire, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -890,7 +890,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
diff --git a/clang/test/OpenMP/atomic_read_codegen.c b/clang/test/OpenMP/atomic_read_codegen.c
index 0a68c8e2c35a..4294c145fa1c 100644
--- a/clang/test/OpenMP/atomic_read_codegen.c
+++ b/clang/test/OpenMP/atomic_read_codegen.c
@@ -292,7 +292,7 @@ int main(void) {
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.a;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) monotonic, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) monotonic, align 1
// CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
@@ -309,7 +309,7 @@ int main(void) {
// CHECK: store x86_fp80
#pragma omp atomic read relaxed
ldv = bfx4.b;
-// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @bfx4_packed, i64 2) acquire, align 1
+// CHECK: [[LD:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @bfx4_packed, i32 0, i32 1) acquire, align 1
// CHECK: store i8 [[LD]], ptr [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, ptr [[LDTEMP]]
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
diff --git a/clang/test/OpenMP/atomic_update_codegen.cpp b/clang/test/OpenMP/atomic_update_codegen.cpp
index 31160b417646..ce0765118922 100644
--- a/clang/test/OpenMP/atomic_update_codegen.cpp
+++ b/clang/test/OpenMP/atomic_update_codegen.cpp
@@ -737,7 +737,7 @@ int main(void) {
#pragma omp atomic
bfx4.a = bfx4.a * ldv;
// CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -757,7 +757,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -792,7 +792,7 @@ int main(void) {
#pragma omp atomic
bfx4.b /= ldv;
// CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -812,7 +812,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
diff --git a/clang/test/OpenMP/atomic_write_codegen.c b/clang/test/OpenMP/atomic_write_codegen.c
index afe8737d30b0..493b01cbac76 100644
--- a/clang/test/OpenMP/atomic_write_codegen.c
+++ b/clang/test/OpenMP/atomic_write_codegen.c
@@ -413,7 +413,7 @@ int main(void) {
bfx4.a = ldv;
// CHECK: load x86_fp80, ptr @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -423,7 +423,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
@@ -451,7 +451,7 @@ int main(void) {
bfx4.b = ldv;
// CHECK: load x86_fp80, ptr @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
-// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 2) monotonic, align 1
+// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
@@ -462,7 +462,7 @@ int main(void) {
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]]
-// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
+// CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
diff --git a/clang/test/OpenMP/bug54082.c b/clang/test/OpenMP/bug54082.c
index b88b68fd4301..337c120983e0 100644
--- a/clang/test/OpenMP/bug54082.c
+++ b/clang/test/OpenMP/bug54082.c
@@ -69,9 +69,7 @@ void foo() {
// CHECK-NEXT: [[X_TRAITS:%.*]] = alloca [1 x %struct.omp_alloctrait_t], align 16
// CHECK-NEXT: [[X_ALLOC:%.*]] = alloca i64, align 8
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X_TRAITS]]) #[[ATTR5:[0-9]+]]
-// CHECK-NEXT: store i32 2, ptr [[X_TRAITS]], align 16
-// CHECK-NEXT: [[LOC0:%.*]] = getelementptr inbounds i8, ptr [[X_TRAITS]], i64 8
-// CHECK-NEXT: store i64 64, ptr [[LOC0]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(16) [[X_TRAITS]], ptr noundef nonnull align 16 dereferenceable(16) @__const.foo.x_traits, i64 16, i1 false)
// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[X_ALLOC]]) #[[ATTR5]]
// CHECK-NEXT: [[CALL:%.*]] = call i64 @omp_init_allocator(i64 noundef 0, i32 noundef 1, ptr noundef nonnull [[X_TRAITS]]) #[[ATTR5]]
// CHECK-NEXT: store i64 [[CALL]], ptr [[X_ALLOC]], align 8, !tbaa [[TBAA3:![0-9]+]]
diff --git a/clang/test/OpenMP/declare_reduction_messages.cpp b/clang/test/OpenMP/declare_reduction_messages.cpp
index 38a5d766eead..752cc4fb05a1 100644
--- a/clang/test/OpenMP/declare_reduction_messages.cpp
+++ b/clang/test/OpenMP/declare_reduction_messages.cpp
@@ -58,16 +58,10 @@ class Class2 : public Class1<T> {
#pragma omp declare reduction(fun1 : long : omp_out += omp_in) initializer // expected-error {{expected '(' after 'initializer'}}
#pragma omp declare reduction(fun2 : long : omp_out += omp_in) initializer { // expected-error {{expected '(' after 'initializer'}} expected-error {{expected expression}} expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}}
#pragma omp declare reduction(fun3 : long : omp_out += omp_in) initializer[
-#if __cplusplus <= 199711L
-// expected-error@-2 {{expected '(' after 'initializer'}}
-// expected-error@-3 {{expected expression}}
-// expected-warning@-4 {{extra tokens at the end of '#pragma omp declare reduction' are ignored}}
-#else
-// expected-error@-6 {{expected '(' after 'initializer'}}
-// expected-error@-7 {{expected variable name or 'this' in lambda capture list}}
-// expected-error@-8 {{expected ')'}}
-// expected-note@-9 {{to match this '('}}
-#endif
+// expected-error@-1 {{expected '(' after 'initializer'}}
+// expected-error@-2 {{expected variable name or 'this' in lambda capture list}}
+// expected-error@-3 {{expected ')'}}
+// expected-note@-4 {{to match this '('}}
#pragma omp declare reduction(fun4 : long : omp_out += omp_in) initializer() // expected-error {{expected expression}}
#pragma omp declare reduction(fun5 : long : omp_out += omp_in) initializer(temp) // expected-error {{only 'omp_priv' or 'omp_orig' variables are allowed in initializer expression}}
#pragma omp declare reduction(fun6 : long : omp_out += omp_in) initializer(omp_orig // expected-error {{expected ')'}} expected-note {{to match this '('}}
diff --git a/clang/test/OpenMP/openmp_check.cpp b/clang/test/OpenMP/openmp_check.cpp
index 6a8dd17fc836..b52ce0c06692 100644
--- a/clang/test/OpenMP/openmp_check.cpp
+++ b/clang/test/OpenMP/openmp_check.cpp
@@ -18,7 +18,7 @@ int nested(int a) {
auto F = [&]() {
#if __cplusplus <= 199711L
// expected-warning@-2 {{'auto' type specifier is a C++11 extension}}
- // expected-error@-3 {{expected expression}}
+ // expected-warning@-3 {{lambdas are a C++11 extension}}
#endif
#pragma omp parallel
diff --git a/clang/test/Options/enable_16bit_types_validation.hlsl b/clang/test/Options/enable_16bit_types_validation.hlsl
new file mode 100644
index 000000000000..71d336f6f503
--- /dev/null
+++ b/clang/test/Options/enable_16bit_types_validation.hlsl
@@ -0,0 +1,25 @@
+// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 2016 %s 2>&1 | FileCheck -check-prefix=both_invalid %s
+// RUN: not %clang_dxc -enable-16bit-types -T lib_6_4 -HV 2017 %s 2>&1 | FileCheck -check-prefix=HV_invalid_2017 %s
+// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 /HV 2021 %s 2>&1 | FileCheck -check-prefix=TP_invalid %s
+// RUN: %clang_dxc -enable-16bit-types -T lib_6_4 /HV 2018 %s 2>&1 -### | FileCheck -check-prefix=valid_2018 %s
+// RUN: %clang_dxc -enable-16bit-types -T lib_6_4 /HV 2021 %s 2>&1 -### | FileCheck -check-prefix=valid_2021 %s
+
+
+// both_invalid: error: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl2016' and shader model is '6.0'
+// HV_invalid_2017: error: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl2017' and shader model is '6.4'
+// TP_invalid: error: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl2021' and shader model is '6.0'
+
+// valid_2021: "dxil-unknown-shadermodel6.4-library"
+// valid_2021-SAME: "-std=hlsl2021"
+// valid_2021-SAME: "-fnative-half-type"
+
+// valid_2018: "dxil-unknown-shadermodel6.4-library"
+// valid_2018-SAME: "-std=hlsl2018"
+// valid_2018-SAME: "-fnative-half-type"
+
+[numthreads(1,1,1)]
+void main()
+{
+ return;
+}
+
diff --git a/clang/test/Options/enable_16bit_types_validation_spirv.hlsl b/clang/test/Options/enable_16bit_types_validation_spirv.hlsl
new file mode 100644
index 000000000000..a9700ef87a27
--- /dev/null
+++ b/clang/test/Options/enable_16bit_types_validation_spirv.hlsl
@@ -0,0 +1,14 @@
+// RUN: not %clang_cc1 -internal-isystem D:\llvm-project\build\x64-Release\lib\clang\19\include -nostdsysteminc -triple spirv-vulkan-library -x hlsl -std=hlsl2016 -fnative-half-type -emit-llvm -disable-llvm-passes -o - %s 2>&1 | FileCheck %s --check-prefix=SPIRV
+// RUN: %clang_cc1 -internal-isystem D:\llvm-project\build\x64-Release\lib\clang\19\include -nostdsysteminc -triple spirv-vulkan-library -x hlsl -std=hlsl2021 -fnative-half-type -emit-llvm -disable-llvm-passes -o - %s 2>&1 | FileCheck %s --check-prefix=valid
+
+// SPIRV: error: '-fnative-half-type' option requires target HLSL Version >= 2018, but HLSL Version is 'hlsl2016'
+
+// valid: "spirv-unknown-vulkan-library"
+// valid: define spir_func void @main() #0 {
+
+[numthreads(1,1,1)]
+void main()
+{
+ return;
+}
+
diff --git a/clang/test/Parser/cxx03-lambda-extension.cpp b/clang/test/Parser/cxx03-lambda-extension.cpp
new file mode 100644
index 000000000000..82ae7da30530
--- /dev/null
+++ b/clang/test/Parser/cxx03-lambda-extension.cpp
@@ -0,0 +1,5 @@
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++03 %s
+
+void func() {
+ []() {}; // expected-warning {{lambdas are a C++11 extension}}
+}
diff --git a/clang/test/Parser/cxx0x-lambda-expressions.cpp b/clang/test/Parser/cxx0x-lambda-expressions.cpp
index 72b315a497c0..a786a964163e 100644
--- a/clang/test/Parser/cxx0x-lambda-expressions.cpp
+++ b/clang/test/Parser/cxx0x-lambda-expressions.cpp
@@ -1,10 +1,15 @@
-// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++11 -Wno-c99-designator %s
-// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++20 -Wno-c99-designator %s
-// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++23 -Wno-c99-designator %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected,cxx14ext,cxx17ext,cxx20ext,cxx23ext -std=c++03 -Wno-c99-designator %s -Wno-c++11-extensions
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected,cxx14ext,cxx17ext,cxx20ext,cxx23ext -std=c++11 -Wno-c99-designator %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected,cxx17ext,cxx20ext,cxx23ext -std=c++14 -Wno-c99-designator %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected,cxx20ext,cxx23ext -std=c++17 -Wno-c99-designator %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected,cxx23ext -std=c++20 -Wno-c99-designator %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=expected -std=c++23 -Wno-c99-designator %s
enum E { e };
+#if __cplusplus >= 201103L
constexpr int id(int n) { return n; }
+#endif
class C {
@@ -19,28 +24,25 @@ class C {
[&,] {}; // expected-error {{expected variable name or 'this' in lambda capture list}}
[=,] {}; // expected-error {{expected variable name or 'this' in lambda capture list}}
[] {};
- [=] (int i) {};
- [&] (int) mutable -> void {};
- [foo,bar] () { return 3; };
- [=,&foo] () {};
- [&,foo] () {};
- [this] () {};
+ [=] (int i) {};
+ [&] (int) mutable -> void {};
+ [foo,bar] () { return 3; };
+ [=,&foo] () {};
+ [&,foo] () {};
+ [this] () {};
[] () -> class C { return C(); };
[] () -> enum E { return e; };
- [] -> int { return 0; };
- [] mutable -> int { return 0; };
-#if __cplusplus <= 202002L
- // expected-warning@-3 {{lambda without a parameter clause is a C++23 extension}}
- // expected-warning@-3 {{is a C++23 extension}}
-#endif
+ [] -> int { return 0; }; // cxx23ext-warning {{lambda without a parameter clause is a C++23 extension}}
+ [] mutable -> int { return 0; }; // cxx23ext-warning {{is a C++23 extension}}
+
[](int) -> {}; // PR13652 expected-error {{expected a type}}
return 1;
}
void designator_or_lambda() {
- typedef int T;
- const int b = 0;
+ typedef int T;
+ const int b = 0;
const int c = 1;
int d;
int a1[1] = {[b] (T()) {}}; // expected-error{{no viable conversion from '(lambda}}
@@ -49,19 +51,18 @@ class C {
int a4[1] = {[&b] = 1 }; // expected-error{{integral constant expression must have integral or unscoped enumeration type, not 'const int *'}}
int a5[3] = { []{return 0;}() };
int a6[1] = {[this] = 1 }; // expected-error{{integral constant expression must have integral or unscoped enumeration type, not 'C *'}}
- int a7[1] = {[d(0)] { return d; } ()};
- int a8[1] = {[d = 0] { return d; } ()};
- int a10[1] = {[id(0)] { return id; } ()};
-#if __cplusplus <= 201103L
- // expected-warning@-4{{extension}}
- // expected-warning@-4{{extension}}
- // expected-warning@-4{{extension}}
+ int a7[1] = {[d(0)] { return d; } ()}; // cxx14ext-warning {{initialized lambda captures are a C++14 extension}}
+ int a8[1] = {[d = 0] { return d; } ()}; // cxx14ext-warning {{initialized lambda captures are a C++14 extension}}
+#if __cplusplus >= 201103L
+ int a10[1] = {[id(0)] { return id; } ()}; // cxx14ext-warning {{initialized lambda captures are a C++14 extension}}
#endif
int a9[1] = {[d = 0] = 1}; // expected-error{{is not an integral constant expression}}
#if __cplusplus >= 201402L
// expected-note@-2{{constant expression cannot modify an object that is visible outside that expression}}
#endif
+#if __cplusplus >= 201103L
int a11[1] = {[id(0)] = 1};
+#endif
}
void delete_lambda(int *p) {
@@ -80,43 +81,33 @@ class C {
// We support init-captures in C++11 as an extension.
int z;
void init_capture() {
- [n(0)] () mutable -> int { return ++n; };
- [n{0}] { return; };
- [a([&b = z]{})](){};
- [n = 0] { return ++n; }; // expected-error {{captured by copy in a non-mutable}}
- [n = {0}] { return; }; // expected-error {{<initializer_list>}}
-#if __cplusplus <= 201103L
- // expected-warning@-6{{extension}}
- // expected-warning@-6{{extension}}
- // expected-warning@-6{{extension}}
- // expected-warning@-7{{extension}}
- // expected-warning@-7{{extension}}
- // expected-warning@-7{{extension}}
-#endif
+ [n(0)] () mutable -> int { return ++n; }; // cxx14ext-warning {{initialized lambda captures are a C++14 extension}}
+ [n{0}] { return; }; // cxx14ext-warning {{initialized lambda captures are a C++14 extension}}
+ [a([&b = z]{})](){}; // cxx14ext-warning 2 {{initialized lambda captures are a C++14 extension}}
+ [n = 0] { return ++n; }; // expected-error {{captured by copy in a non-mutable}}
+ // cxx14ext-warning@-1 {{initialized lambda captures are a C++14 extension}}
+ [n = {0}] { return; }; // expected-error {{<initializer_list>}}
+ // cxx14ext-warning@-1 {{initialized lambda captures are a C++14 extension}}
int x = 4;
- auto y = [&r = x, x = x + 1]() -> int {
-#if __cplusplus <= 201103L
- // expected-warning@-2{{extension}}
- // expected-warning@-3{{extension}}
-#endif
+ auto y = [&r = x, x = x + 1]() -> int { // cxx14ext-warning 2 {{initialized lambda captures are a C++14 extension}}
r += 2;
return x + 2;
} ();
}
void attributes() {
- [] __attribute__((noreturn)){};
-#if __cplusplus <= 202002L
- // expected-warning@-2 {{is a C++23 extension}}
-#endif
+ [] __attribute__((noreturn)){}; // cxx23ext-warning {{lambda without a parameter clause is a C++23 extension}}
+
[]() [[]]
mutable {}; // expected-error {{expected body of lambda expression}}
[]() [[]] {};
[]() [[]] -> void {};
[]() mutable [[]] -> void {};
+#if __cplusplus >= 201103L
[]() mutable noexcept [[]] -> void {};
+#endif
// Testing GNU-style attributes on lambdas -- the attribute is specified
// before the mutable specifier instead of after (unlike C++11).
@@ -126,28 +117,18 @@ class C {
// Testing support for P2173 on adding attributes to the declaration
// rather than the type.
- [][[]](){};
-#if __cplusplus <= 202002L
- // expected-warning@-2 {{an attribute specifier sequence in this position is a C++23 extension}}
-#endif
-#if __cplusplus > 201703L
- []<typename>[[]](){};
-#if __cplusplus <= 202002L
- // expected-warning@-2 {{an attribute specifier sequence in this position is a C++23 extension}}
-#endif
-#endif
- [][[]]{};
-#if __cplusplus <= 202002L
- // expected-warning@-2 {{an attribute specifier sequence in this position is a C++23 extension}}
-#endif
+ [][[]](){}; // cxx23ext-warning {{an attribute specifier sequence in this position is a C++23 extension}}
+
+ []<typename>[[]](){}; // cxx20ext-warning {{explicit template parameter list for lambdas is a C++20 extension}}
+ // cxx23ext-warning@-1 {{an attribute specifier sequence in this position is a C++23 extension}}
+
+ [][[]]{}; // cxx23ext-warning {{an attribute specifier sequence in this position is a C++23 extension}}
}
void missing_parens() {
- [] mutable {};
- [] noexcept {};
-#if __cplusplus <= 202002L
- // expected-warning@-3 {{is a C++23 extension}}
- // expected-warning@-3 {{is a C++23 extension}}
+ [] mutable {}; // cxx23ext-warning {{is a C++23 extension}}
+#if __cplusplus >= 201103L
+ [] noexcept {}; // cxx23ext-warning {{is a C++23 extension}}
#endif
}
};
@@ -165,10 +146,7 @@ struct A {
};
struct S {
- void mf() { A{[*this]{}}; }
-#if __cplusplus < 201703L
- // expected-warning@-2 {{C++17 extension}}
-#endif
+ void mf() { A(([*this]{})); } // cxx17ext-warning {{'*this' by copy is a C++17 extension}}
};
}
diff --git a/clang/test/Parser/cxx2b-lambdas.cpp b/clang/test/Parser/cxx2b-lambdas.cpp
index ad975a17b6e4..758ec9a42f56 100644
--- a/clang/test/Parser/cxx2b-lambdas.cpp
+++ b/clang/test/Parser/cxx2b-lambdas.cpp
@@ -1,30 +1,48 @@
+// RUN: %clang_cc1 -std=c++03 %s -verify -Wno-c++23-extensions -Wno-c++20-extensions -Wno-c++17-extensions -Wno-c++14-extensions -Wno-c++11-extensions
+// RUN: %clang_cc1 -std=c++11 %s -verify=expected,cxx11 -Wno-c++23-extensions -Wno-c++20-extensions -Wno-c++17-extensions -Wno-c++14-extensions
+// RUN: %clang_cc1 -std=c++14 %s -verify -Wno-c++23-extensions -Wno-c++20-extensions -Wno-c++17-extensions
+// RUN: %clang_cc1 -std=c++17 %s -verify -Wno-c++23-extensions -Wno-c++20-extensions
+// RUN: %clang_cc1 -std=c++20 %s -verify -Wno-c++23-extensions
// RUN: %clang_cc1 -std=c++23 %s -verify
auto LL0 = [] {};
auto LL1 = []() {};
auto LL2 = []() mutable {};
-auto LL3 = []() constexpr {};
+#if __cplusplus >= 201103L
+auto LL3 = []() constexpr {}; // cxx11-error {{return type 'void' is not a literal type}}
+#endif
-auto L0 = [] constexpr {};
+#if __cplusplus >= 201103L
+auto L0 = [] constexpr {}; // cxx11-error {{return type 'void' is not a literal type}}
+#endif
auto L1 = [] mutable {};
+#if __cplusplus >= 201103L
auto L2 = [] noexcept {};
-auto L3 = [] constexpr mutable {};
-auto L4 = [] mutable constexpr {};
-auto L5 = [] constexpr mutable noexcept {};
+auto L3 = [] constexpr mutable {}; // cxx11-error {{return type 'void' is not a literal type}}
+auto L4 = [] mutable constexpr {}; // cxx11-error {{return type 'void' is not a literal type}}
+auto L5 = [] constexpr mutable noexcept {}; // cxx11-error {{return type 'void' is not a literal type}}
+#endif
auto L6 = [s = 1] mutable {};
-auto L7 = [s = 1] constexpr mutable noexcept {};
+#if __cplusplus >= 201103L
+auto L7 = [s = 1] constexpr mutable noexcept {}; // cxx11-error {{return type 'void' is not a literal type}}
+#endif
auto L8 = [] -> bool { return true; };
auto L9 = []<typename T> { return true; };
+#if __cplusplus >= 201103L
auto L10 = []<typename T> noexcept { return true; };
+#endif
auto L11 = []<typename T> -> bool { return true; };
+#if __cplusplus >= 202002L
auto L12 = [] consteval {};
auto L13 = []() requires true {}; // expected-error{{non-templated function cannot have a requires clause}}
auto L14 = []<auto> requires true() requires true {};
auto L15 = []<auto> requires true noexcept {};
+#endif
auto L16 = [] [[maybe_unused]]{};
-auto XL0 = [] mutable constexpr mutable {}; // expected-error{{cannot appear multiple times}}
-auto XL1 = [] constexpr mutable constexpr {}; // expected-error{{cannot appear multiple times}}
+#if __cplusplus >= 201103L
+auto XL0 = [] mutable constexpr mutable {}; // expected-error{{cannot appear multiple times}} cxx11-error {{return type 'void' is not a literal type}}
+auto XL1 = [] constexpr mutable constexpr {}; // expected-error{{cannot appear multiple times}} cxx11-error {{return type 'void' is not a literal type}}
auto XL2 = []) constexpr mutable constexpr {}; // expected-error{{expected body of lambda expression}}
auto XL3 = []( constexpr mutable constexpr {}; // expected-error{{invalid storage class specifier}} \
// expected-error{{function parameter cannot be constexpr}} \
@@ -33,16 +51,23 @@ auto XL3 = []( constexpr mutable constexpr {}; // expected-error{{invalid storag
// expected-note{{to match this '('}} \
// expected-error{{expected body}} \
// expected-warning{{duplicate 'constexpr'}}
+#endif
// http://llvm.org/PR49736
auto XL4 = [] requires true {}; // expected-error{{expected body}}
+#if __cplusplus >= 201703L
auto XL5 = []<auto> requires true requires true {}; // expected-error{{expected body}}
auto XL6 = []<auto> requires true noexcept requires true {}; // expected-error{{expected body}}
+#endif
auto XL7 = []() static static {}; // expected-error {{cannot appear multiple times}}
auto XL8 = []() static mutable {}; // expected-error {{cannot be both mutable and static}}
+#if __cplusplus >= 202002L
auto XL9 = []() static consteval {};
-auto XL10 = []() static constexpr {};
+#endif
+#if __cplusplus >= 201103L
+auto XL10 = []() static constexpr {}; // cxx11-error {{return type 'void' is not a literal type}}
+#endif
auto XL11 = [] static {};
auto XL12 = []() static {};
@@ -67,6 +92,7 @@ void static_captures() {
};
}
+#if __cplusplus >= 201703L
constexpr auto static_capture_constexpr() {
char n = 'n';
return [n] static { return n; }(); // expected-error {{a static lambda cannot have any captures}}
@@ -78,3 +104,4 @@ constexpr auto capture_constexpr() {
return [n] { return n; }();
}
static_assert(capture_constexpr());
+#endif
diff --git a/clang/test/Parser/objcxx-lambda-expressions-neg.mm b/clang/test/Parser/objcxx-lambda-expressions-neg.mm
index b2fe39dfbf70..795157816dcf 100644
--- a/clang/test/Parser/objcxx-lambda-expressions-neg.mm
+++ b/clang/test/Parser/objcxx-lambda-expressions-neg.mm
@@ -1,13 +1,8 @@
// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify %s
-// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++98 %s
+// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify=cxx03 -std=c++98 %s
// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value -verify -std=c++11 %s
int main() {
- []{};
-#if __cplusplus <= 199711L
- // expected-error@-2 {{expected expression}}
-#else
+ []{}; // cxx03-warning {{lambdas are a C++11 extension}}
// expected-no-diagnostics
-#endif
-
}
diff --git a/clang/test/ParserHLSL/group_shared.hlsl b/clang/test/ParserHLSL/group_shared.hlsl
index 0b9f28395ee4..44f3a2e5b450 100644
--- a/clang/test/ParserHLSL/group_shared.hlsl
+++ b/clang/test/ParserHLSL/group_shared.hlsl
@@ -3,8 +3,8 @@ extern groupshared float f;
extern float groupshared f; // Ok, redeclaration?
-// NOTE:lambda is not enabled except for hlsl202x.
-// expected-error@+2 {{expected expression}}
+// expected-warning@+3 {{lambdas are a C++11 extension}}
+// expected-error@+2 {{expected body of lambda expression}}
// expected-warning@+1 {{'auto' type specifier is a C++11 extension}}
auto l = []() groupshared {};
diff --git a/clang/test/Preprocessor/aarch64-target-features.c b/clang/test/Preprocessor/aarch64-target-features.c
index 9f8a8bdeeb9c..85762b7fed4d 100644
--- a/clang/test/Preprocessor/aarch64-target-features.c
+++ b/clang/test/Preprocessor/aarch64-target-features.c
@@ -196,7 +196,7 @@
// CHECK-8_6-NOT: __ARM_FEATURE_SHA3 1
// CHECK-8_6-NOT: __ARM_FEATURE_SM4 1
-// RUN: %clang -target aarch64-none-linux-gnu -march=armv8.6-a+sve -x c -E -dM %s -o - | FileCheck --check-prefix=CHECK-SVE-8_6 %s
+// RUN: %clang -target aarch64-none-linux-gnu -march=armv8.6-a+sve+f32mm -x c -E -dM %s -o - | FileCheck --check-prefix=CHECK-SVE-8_6 %s
// CHECK-SVE-8_6: __ARM_FEATURE_SVE 1
// CHECK-SVE-8_6: __ARM_FEATURE_SVE_BF16 1
// CHECK-SVE-8_6: __ARM_FEATURE_SVE_MATMUL_FP32 1
diff --git a/clang/test/Sema/aarch64-sme-func-attrs.c b/clang/test/Sema/aarch64-sme-func-attrs.c
index 47dbeca206a9..bfc8768c3f36 100644
--- a/clang/test/Sema/aarch64-sme-func-attrs.c
+++ b/clang/test/Sema/aarch64-sme-func-attrs.c
@@ -483,14 +483,16 @@ void just_fine(void) {}
__arm_locally_streaming
__attribute__((target_version("sme2")))
-void just_fine_locally_streaming(void) {}
+void incompatible_locally_streaming(void) {}
+// expected-error@-1 {{attribute 'target_version' multiversioning cannot be combined with attribute '__arm_locally_streaming'}}
+// expected-cpp-error@-2 {{attribute 'target_version' multiversioning cannot be combined with attribute '__arm_locally_streaming'}}
__attribute__((target_version("default")))
-void just_fine_locally_streaming(void) {}
+void incompatible_locally_streaming(void) {}
void fmv_caller() {
cannot_work_version();
cannot_work_clones();
just_fine();
- just_fine_locally_streaming();
+ incompatible_locally_streaming();
}
diff --git a/clang/test/Sema/attr-target-clones-aarch64.c b/clang/test/Sema/attr-target-clones-aarch64.c
index 0ce277f41884..bc3fceab8282 100644
--- a/clang/test/Sema/attr-target-clones-aarch64.c
+++ b/clang/test/Sema/attr-target-clones-aarch64.c
@@ -27,7 +27,7 @@ int __attribute__((target_clones("rng", "fp16fml+fp", "default"))) redecl4(void)
int __attribute__((target_clones("dgh+memtag+rpres+ls64_v", "ebf16+dpb+sha1", "default"))) redecl4(void) { return 1; }
int __attribute__((target_version("flagm2"))) redef2(void) { return 1; }
-// expected-error@+2 {{multiversioning attributes cannot be combined}}
+// expected-error@+2 {{multiversioned function redeclarations require identical target attributes}}
// expected-note@-2 {{previous declaration is here}}
int __attribute__((target_clones("flagm2", "default"))) redef2(void) { return 1; }
diff --git a/clang/test/Sema/attr-target-version.c b/clang/test/Sema/attr-target-version.c
index e2940c434c2f..cd5be459456e 100644
--- a/clang/test/Sema/attr-target-version.c
+++ b/clang/test/Sema/attr-target-version.c
@@ -68,13 +68,15 @@ int __attribute__((target_version(""))) unsup1(void) { return 1; }
void __attribute__((target_version("crc32"))) unsup2(void) {}
void __attribute__((target_version("default+fp16"))) koo(void) {}
+//expected-error@-1 {{function multiversioning doesn't support feature 'default'}}
void __attribute__((target_version("default+default+default"))) loo(void) {}
+//expected-error@-1 {{function multiversioning doesn't support feature 'default'}}
void __attribute__((target_version("rdm+rng+crc"))) redef(void) {}
//expected-error@+2 {{redefinition of 'redef'}}
//expected-note@-2 {{previous definition is here}}
void __attribute__((target_version("rdm+rng+crc"))) redef(void) {}
-int __attribute__((target_version("sm4"))) def(void);
+int def(void);
void __attribute__((target_version("dit"))) nodef(void);
void __attribute__((target_version("ls64"))) nodef(void);
void __attribute__((target_version("aes"))) ovl(void);
@@ -83,7 +85,6 @@ int bar() {
// expected-error@+2 {{reference to overloaded function could not be resolved; did you mean to call it?}}
// expected-note@-3 {{possible target for call}}
ovl++;
- // expected-error@+1 {{no matching function for call to 'nodef'}}
nodef();
return def();
}
@@ -92,8 +93,6 @@ int __attribute__((target_version("sha1"))) def(void) { return 1; }
int __attribute__((target_version("sve"))) prot();
// expected-error@-1 {{multiversioned function must have a prototype}}
-// expected-note@+1 {{function multiversioning caused by this declaration}}
-int __attribute__((target_version("fcma"))) prot();
int __attribute__((target_version("pmull"))) rtype(int);
// expected-error@+1 {{multiversioned function declaration has a different return type}}
@@ -104,6 +103,7 @@ int __attribute__((target_version("sha2"))) combine(void) { return 1; }
int __attribute__((aarch64_vector_pcs, target_version("sha3"))) combine(void) { return 2; }
int __attribute__((target_version("fp+aes+pmull+rcpc"))) unspec_args() { return -1; }
+// expected-error@-1 {{multiversioned function must have a prototype}}
// expected-error@+1 {{multiversioned function must have a prototype}}
int __attribute__((target_version("default"))) unspec_args() { return 0; }
int cargs() { return unspec_args(); }
diff --git a/clang/test/Sema/builtin-popcountg.c b/clang/test/Sema/builtin-popcountg.c
deleted file mode 100644
index 9d095927d24e..000000000000
--- a/clang/test/Sema/builtin-popcountg.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// RUN: %clang_cc1 -std=c23 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
-
-typedef int int2 __attribute__((ext_vector_type(2)));
-
-void test_builtin_popcountg(short s, int i, __int128 i128, _BitInt(128) bi128,
- double d, int2 i2) {
- __builtin_popcountg();
- // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
- __builtin_popcountg(i, i);
- // expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
- __builtin_popcountg(s);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'short')}}
- __builtin_popcountg(i);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int')}}
- __builtin_popcountg(i128);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was '__int128')}}
- __builtin_popcountg(bi128);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was '_BitInt(128)')}}
- __builtin_popcountg(d);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'double')}}
- __builtin_popcountg(i2);
- // expected-error@-1 {{1st argument must be a type of unsigned integer (was 'int2' (vector of 2 'int' values))}}
-}
diff --git a/clang/test/Sema/constant-builtins-2.c b/clang/test/Sema/constant-builtins-2.c
index 6dd1d88759c7..a60a1f16a458 100644
--- a/clang/test/Sema/constant-builtins-2.c
+++ b/clang/test/Sema/constant-builtins-2.c
@@ -218,6 +218,64 @@ char clz6[__builtin_clzll(0xFFLL) == BITSIZE(long long) - 8 ? 1 : -1];
char clz7[__builtin_clzs(0x1) == BITSIZE(short) - 1 ? 1 : -1];
char clz8[__builtin_clzs(0xf) == BITSIZE(short) - 4 ? 1 : -1];
char clz9[__builtin_clzs(0xfff) == BITSIZE(short) - 12 ? 1 : -1];
+int clz10 = __builtin_clzg((unsigned char)0); // expected-error {{not a compile-time constant}}
+char clz11[__builtin_clzg((unsigned char)0, 42) == 42 ? 1 : -1];
+char clz12[__builtin_clzg((unsigned char)0x1) == BITSIZE(char) - 1 ? 1 : -1];
+char clz13[__builtin_clzg((unsigned char)0x1, 42) == BITSIZE(char) - 1 ? 1 : -1];
+char clz14[__builtin_clzg((unsigned char)0xf) == BITSIZE(char) - 4 ? 1 : -1];
+char clz15[__builtin_clzg((unsigned char)0xf, 42) == BITSIZE(char) - 4 ? 1 : -1];
+char clz16[__builtin_clzg((unsigned char)(1 << (BITSIZE(char) - 1))) == 0 ? 1 : -1];
+char clz17[__builtin_clzg((unsigned char)(1 << (BITSIZE(char) - 1)), 42) == 0 ? 1 : -1];
+int clz18 = __builtin_clzg((unsigned short)0); // expected-error {{not a compile-time constant}}
+char clz19[__builtin_clzg((unsigned short)0, 42) == 42 ? 1 : -1];
+char clz20[__builtin_clzg((unsigned short)0x1) == BITSIZE(short) - 1 ? 1 : -1];
+char clz21[__builtin_clzg((unsigned short)0x1, 42) == BITSIZE(short) - 1 ? 1 : -1];
+char clz22[__builtin_clzg((unsigned short)0xf) == BITSIZE(short) - 4 ? 1 : -1];
+char clz23[__builtin_clzg((unsigned short)0xf, 42) == BITSIZE(short) - 4 ? 1 : -1];
+char clz24[__builtin_clzg((unsigned short)(1 << (BITSIZE(short) - 1))) == 0 ? 1 : -1];
+char clz25[__builtin_clzg((unsigned short)(1 << (BITSIZE(short) - 1)), 42) == 0 ? 1 : -1];
+int clz26 = __builtin_clzg(0U); // expected-error {{not a compile-time constant}}
+char clz27[__builtin_clzg(0U, 42) == 42 ? 1 : -1];
+char clz28[__builtin_clzg(0x1U) == BITSIZE(int) - 1 ? 1 : -1];
+char clz29[__builtin_clzg(0x1U, 42) == BITSIZE(int) - 1 ? 1 : -1];
+char clz30[__builtin_clzg(0xfU) == BITSIZE(int) - 4 ? 1 : -1];
+char clz31[__builtin_clzg(0xfU, 42) == BITSIZE(int) - 4 ? 1 : -1];
+char clz32[__builtin_clzg(1U << (BITSIZE(int) - 1)) == 0 ? 1 : -1];
+char clz33[__builtin_clzg(1U << (BITSIZE(int) - 1), 42) == 0 ? 1 : -1];
+int clz34 = __builtin_clzg(0UL); // expected-error {{not a compile-time constant}}
+char clz35[__builtin_clzg(0UL, 42) == 42 ? 1 : -1];
+char clz36[__builtin_clzg(0x1UL) == BITSIZE(long) - 1 ? 1 : -1];
+char clz37[__builtin_clzg(0x1UL, 42) == BITSIZE(long) - 1 ? 1 : -1];
+char clz38[__builtin_clzg(0xfUL) == BITSIZE(long) - 4 ? 1 : -1];
+char clz39[__builtin_clzg(0xfUL, 42) == BITSIZE(long) - 4 ? 1 : -1];
+char clz40[__builtin_clzg(1UL << (BITSIZE(long) - 1)) == 0 ? 1 : -1];
+char clz41[__builtin_clzg(1UL << (BITSIZE(long) - 1), 42) == 0 ? 1 : -1];
+int clz42 = __builtin_clzg(0ULL); // expected-error {{not a compile-time constant}}
+char clz43[__builtin_clzg(0ULL, 42) == 42 ? 1 : -1];
+char clz44[__builtin_clzg(0x1ULL) == BITSIZE(long long) - 1 ? 1 : -1];
+char clz45[__builtin_clzg(0x1ULL, 42) == BITSIZE(long long) - 1 ? 1 : -1];
+char clz46[__builtin_clzg(0xfULL) == BITSIZE(long long) - 4 ? 1 : -1];
+char clz47[__builtin_clzg(0xfULL, 42) == BITSIZE(long long) - 4 ? 1 : -1];
+char clz48[__builtin_clzg(1ULL << (BITSIZE(long long) - 1)) == 0 ? 1 : -1];
+char clz49[__builtin_clzg(1ULL << (BITSIZE(long long) - 1), 42) == 0 ? 1 : -1];
+#ifdef __SIZEOF_INT128__
+int clz50 = __builtin_clzg((unsigned __int128)0); // expected-error {{not a compile-time constant}}
+char clz51[__builtin_clzg((unsigned __int128)0, 42) == 42 ? 1 : -1];
+char clz52[__builtin_clzg((unsigned __int128)0x1) == BITSIZE(__int128) - 1 ? 1 : -1];
+char clz53[__builtin_clzg((unsigned __int128)0x1, 42) == BITSIZE(__int128) - 1 ? 1 : -1];
+char clz54[__builtin_clzg((unsigned __int128)0xf) == BITSIZE(__int128) - 4 ? 1 : -1];
+char clz55[__builtin_clzg((unsigned __int128)0xf, 42) == BITSIZE(__int128) - 4 ? 1 : -1];
+char clz56[__builtin_clzg((unsigned __int128)(1 << (BITSIZE(__int128) - 1))) == 0 ? 1 : -1];
+char clz57[__builtin_clzg((unsigned __int128)(1 << (BITSIZE(__int128) - 1)), 42) == 0 ? 1 : -1];
+#endif
+int clz58 = __builtin_clzg((unsigned _BitInt(128))0); // expected-error {{not a compile-time constant}}
+char clz59[__builtin_clzg((unsigned _BitInt(128))0, 42) == 42 ? 1 : -1];
+char clz60[__builtin_clzg((unsigned _BitInt(128))0x1) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
+char clz61[__builtin_clzg((unsigned _BitInt(128))0x1, 42) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
+char clz62[__builtin_clzg((unsigned _BitInt(128))0xf) == BITSIZE(_BitInt(128)) - 4 ? 1 : -1];
+char clz63[__builtin_clzg((unsigned _BitInt(128))0xf, 42) == BITSIZE(_BitInt(128)) - 4 ? 1 : -1];
+char clz64[__builtin_clzg((unsigned _BitInt(128))(1 << (BITSIZE(_BitInt(128)) - 1))) == 0 ? 1 : -1];
+char clz65[__builtin_clzg((unsigned _BitInt(128))(1 << (BITSIZE(_BitInt(128)) - 1)), 42) == 0 ? 1 : -1];
char ctz1[__builtin_ctz(1) == 0 ? 1 : -1];
char ctz2[__builtin_ctz(8) == 3 ? 1 : -1];
@@ -226,6 +284,64 @@ int ctz4 = __builtin_ctz(0); // expected-error {{not a compile-time constant}}
char ctz5[__builtin_ctzl(0x10L) == 4 ? 1 : -1];
char ctz6[__builtin_ctzll(0x100LL) == 8 ? 1 : -1];
char ctz7[__builtin_ctzs(1 << (BITSIZE(short) - 1)) == BITSIZE(short) - 1 ? 1 : -1];
+int ctz8 = __builtin_ctzg((unsigned char)0); // expected-error {{not a compile-time constant}}
+char ctz9[__builtin_ctzg((unsigned char)0, 42) == 42 ? 1 : -1];
+char ctz10[__builtin_ctzg((unsigned char)0x1) == 0 ? 1 : -1];
+char ctz11[__builtin_ctzg((unsigned char)0x1, 42) == 0 ? 1 : -1];
+char ctz12[__builtin_ctzg((unsigned char)0x10) == 4 ? 1 : -1];
+char ctz13[__builtin_ctzg((unsigned char)0x10, 42) == 4 ? 1 : -1];
+char ctz14[__builtin_ctzg((unsigned char)(1 << (BITSIZE(char) - 1))) == BITSIZE(char) - 1 ? 1 : -1];
+char ctz15[__builtin_ctzg((unsigned char)(1 << (BITSIZE(char) - 1)), 42) == BITSIZE(char) - 1 ? 1 : -1];
+int ctz16 = __builtin_ctzg((unsigned short)0); // expected-error {{not a compile-time constant}}
+char ctz17[__builtin_ctzg((unsigned short)0, 42) == 42 ? 1 : -1];
+char ctz18[__builtin_ctzg((unsigned short)0x1) == 0 ? 1 : -1];
+char ctz19[__builtin_ctzg((unsigned short)0x1, 42) == 0 ? 1 : -1];
+char ctz20[__builtin_ctzg((unsigned short)0x10) == 4 ? 1 : -1];
+char ctz21[__builtin_ctzg((unsigned short)0x10, 42) == 4 ? 1 : -1];
+char ctz22[__builtin_ctzg((unsigned short)(1 << (BITSIZE(short) - 1))) == BITSIZE(short) - 1 ? 1 : -1];
+char ctz23[__builtin_ctzg((unsigned short)(1 << (BITSIZE(short) - 1)), 42) == BITSIZE(short) - 1 ? 1 : -1];
+int ctz24 = __builtin_ctzg(0U); // expected-error {{not a compile-time constant}}
+char ctz25[__builtin_ctzg(0U, 42) == 42 ? 1 : -1];
+char ctz26[__builtin_ctzg(0x1U) == 0 ? 1 : -1];
+char ctz27[__builtin_ctzg(0x1U, 42) == 0 ? 1 : -1];
+char ctz28[__builtin_ctzg(0x10U) == 4 ? 1 : -1];
+char ctz29[__builtin_ctzg(0x10U, 42) == 4 ? 1 : -1];
+char ctz30[__builtin_ctzg(1U << (BITSIZE(int) - 1)) == BITSIZE(int) - 1 ? 1 : -1];
+char ctz31[__builtin_ctzg(1U << (BITSIZE(int) - 1), 42) == BITSIZE(int) - 1 ? 1 : -1];
+int ctz32 = __builtin_ctzg(0UL); // expected-error {{not a compile-time constant}}
+char ctz33[__builtin_ctzg(0UL, 42) == 42 ? 1 : -1];
+char ctz34[__builtin_ctzg(0x1UL) == 0 ? 1 : -1];
+char ctz35[__builtin_ctzg(0x1UL, 42) == 0 ? 1 : -1];
+char ctz36[__builtin_ctzg(0x10UL) == 4 ? 1 : -1];
+char ctz37[__builtin_ctzg(0x10UL, 42) == 4 ? 1 : -1];
+char ctz38[__builtin_ctzg(1UL << (BITSIZE(long) - 1)) == BITSIZE(long) - 1 ? 1 : -1];
+char ctz39[__builtin_ctzg(1UL << (BITSIZE(long) - 1), 42) == BITSIZE(long) - 1 ? 1 : -1];
+int ctz40 = __builtin_ctzg(0ULL); // expected-error {{not a compile-time constant}}
+char ctz41[__builtin_ctzg(0ULL, 42) == 42 ? 1 : -1];
+char ctz42[__builtin_ctzg(0x1ULL) == 0 ? 1 : -1];
+char ctz43[__builtin_ctzg(0x1ULL, 42) == 0 ? 1 : -1];
+char ctz44[__builtin_ctzg(0x10ULL) == 4 ? 1 : -1];
+char ctz45[__builtin_ctzg(0x10ULL, 42) == 4 ? 1 : -1];
+char ctz46[__builtin_ctzg(1ULL << (BITSIZE(long long) - 1)) == BITSIZE(long long) - 1 ? 1 : -1];
+char ctz47[__builtin_ctzg(1ULL << (BITSIZE(long long) - 1), 42) == BITSIZE(long long) - 1 ? 1 : -1];
+#ifdef __SIZEOF_INT128__
+int ctz48 = __builtin_ctzg((unsigned __int128)0); // expected-error {{not a compile-time constant}}
+char ctz49[__builtin_ctzg((unsigned __int128)0, 42) == 42 ? 1 : -1];
+char ctz50[__builtin_ctzg((unsigned __int128)0x1) == 0 ? 1 : -1];
+char ctz51[__builtin_ctzg((unsigned __int128)0x1, 42) == 0 ? 1 : -1];
+char ctz52[__builtin_ctzg((unsigned __int128)0x10) == 4 ? 1 : -1];
+char ctz53[__builtin_ctzg((unsigned __int128)0x10, 42) == 4 ? 1 : -1];
+char ctz54[__builtin_ctzg((unsigned __int128)1 << (BITSIZE(__int128) - 1)) == BITSIZE(__int128) - 1 ? 1 : -1];
+char ctz55[__builtin_ctzg((unsigned __int128)1 << (BITSIZE(__int128) - 1), 42) == BITSIZE(__int128) - 1 ? 1 : -1];
+#endif
+int ctz56 = __builtin_ctzg((unsigned _BitInt(128))0); // expected-error {{not a compile-time constant}}
+char ctz57[__builtin_ctzg((unsigned _BitInt(128))0, 42) == 42 ? 1 : -1];
+char ctz58[__builtin_ctzg((unsigned _BitInt(128))0x1) == 0 ? 1 : -1];
+char ctz59[__builtin_ctzg((unsigned _BitInt(128))0x1, 42) == 0 ? 1 : -1];
+char ctz60[__builtin_ctzg((unsigned _BitInt(128))0x10) == 4 ? 1 : -1];
+char ctz61[__builtin_ctzg((unsigned _BitInt(128))0x10, 42) == 4 ? 1 : -1];
+char ctz62[__builtin_ctzg((unsigned _BitInt(128))1 << (BITSIZE(_BitInt(128)) - 1)) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
+char ctz63[__builtin_ctzg((unsigned _BitInt(128))1 << (BITSIZE(_BitInt(128)) - 1), 42) == BITSIZE(_BitInt(128)) - 1 ? 1 : -1];
char popcount1[__builtin_popcount(0) == 0 ? 1 : -1];
char popcount2[__builtin_popcount(0xF0F0) == 8 ? 1 : -1];
diff --git a/clang/test/Sema/constant-builtins-all-args-evaluated.cpp b/clang/test/Sema/constant-builtins-all-args-evaluated.cpp
new file mode 100644
index 000000000000..db55e24cecda
--- /dev/null
+++ b/clang/test/Sema/constant-builtins-all-args-evaluated.cpp
@@ -0,0 +1,34 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s
+// expected-no-diagnostics
+
+constexpr int test_clzg_0() {
+ int x = 0;
+ (void)__builtin_clzg(0U, ++x);
+ return x;
+}
+
+static_assert(test_clzg_0() == 1);
+
+constexpr int test_clzg_1() {
+ int x = 0;
+ (void)__builtin_clzg(1U, ++x);
+ return x;
+}
+
+static_assert(test_clzg_1() == 1);
+
+constexpr int test_ctzg_0() {
+ int x = 0;
+ (void)__builtin_ctzg(0U, ++x);
+ return x;
+}
+
+static_assert(test_ctzg_0() == 1);
+
+constexpr int test_ctzg_1() {
+ int x = 0;
+ (void)__builtin_ctzg(1U, ++x);
+ return x;
+}
+
+static_assert(test_ctzg_1() == 1);
diff --git a/clang/test/Sema/count-builtins.c b/clang/test/Sema/count-builtins.c
new file mode 100644
index 000000000000..79fa812f3f20
--- /dev/null
+++ b/clang/test/Sema/count-builtins.c
@@ -0,0 +1,87 @@
+// RUN: %clang_cc1 -std=c23 -triple=x86_64-pc-linux-gnu -fsyntax-only -verify -Wpedantic %s
+
+typedef int int2 __attribute__((ext_vector_type(2)));
+
+void test_builtin_popcountg(short s, int i, __int128 i128, _BitInt(128) bi128,
+ double d, int2 i2) {
+ __builtin_popcountg();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+ __builtin_popcountg(i, i);
+ // expected-error@-1 {{too many arguments to function call, expected 1, have 2}}
+ __builtin_popcountg(s);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'short')}}
+ __builtin_popcountg(i);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int')}}
+ __builtin_popcountg(i128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '__int128')}}
+ __builtin_popcountg(bi128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '_BitInt(128)')}}
+ __builtin_popcountg(d);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'double')}}
+ __builtin_popcountg(i2);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int2' (vector of 2 'int' values))}}
+}
+
+void test_builtin_clzg(short s, int i, unsigned int ui, __int128 i128,
+ _BitInt(128) bi128, double d, int2 i2) {
+ __builtin_clzg();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+ __builtin_clzg(i, i, i);
+ // expected-error@-1 {{too many arguments to function call, expected at most 2, have 3}}
+ __builtin_clzg(s);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'short')}}
+ __builtin_clzg(i);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int')}}
+ __builtin_clzg(i128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '__int128')}}
+ __builtin_clzg(bi128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '_BitInt(128)')}}
+ __builtin_clzg(d);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'double')}}
+ __builtin_clzg(i2);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int2' (vector of 2 'int' values))}}
+ __builtin_clzg(i2);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int2' (vector of 2 'int' values))}}
+ __builtin_clzg(ui, ui);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'unsigned int')}}
+ __builtin_clzg(ui, i128);
+ // expected-error@-1 {{2nd argument must be an 'int' (was '__int128')}}
+ __builtin_clzg(ui, bi128);
+ // expected-error@-1 {{2nd argument must be an 'int' (was '_BitInt(128)')}}
+ __builtin_clzg(ui, d);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'double')}}
+ __builtin_clzg(ui, i2);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'int2' (vector of 2 'int' values))}}
+}
+
+void test_builtin_ctzg(short s, int i, unsigned int ui, __int128 i128,
+ _BitInt(128) bi128, double d, int2 i2) {
+ __builtin_ctzg();
+ // expected-error@-1 {{too few arguments to function call, expected 1, have 0}}
+ __builtin_ctzg(i, i, i);
+ // expected-error@-1 {{too many arguments to function call, expected at most 2, have 3}}
+ __builtin_ctzg(s);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'short')}}
+ __builtin_ctzg(i);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int')}}
+ __builtin_ctzg(i128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '__int128')}}
+ __builtin_ctzg(bi128);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was '_BitInt(128)')}}
+ __builtin_ctzg(d);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'double')}}
+ __builtin_ctzg(i2);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int2' (vector of 2 'int' values))}}
+ __builtin_ctzg(i2);
+ // expected-error@-1 {{1st argument must be an unsigned integer (was 'int2' (vector of 2 'int' values))}}
+ __builtin_ctzg(ui, ui);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'unsigned int')}}
+ __builtin_ctzg(ui, i128);
+ // expected-error@-1 {{2nd argument must be an 'int' (was '__int128')}}
+ __builtin_ctzg(ui, bi128);
+ // expected-error@-1 {{2nd argument must be an 'int' (was '_BitInt(128)')}}
+ __builtin_ctzg(ui, d);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'double')}}
+ __builtin_ctzg(ui, i2);
+ // expected-error@-1 {{2nd argument must be an 'int' (was 'int2' (vector of 2 'int' values))}}
+}
diff --git a/clang/test/Sema/flexible-array-in-union.c b/clang/test/Sema/flexible-array-in-union.c
index 5fabfbe0b1ea..dd5e8069665f 100644
--- a/clang/test/Sema/flexible-array-in-union.c
+++ b/clang/test/Sema/flexible-array-in-union.c
@@ -1,13 +1,188 @@
-// RUN: %clang_cc1 %s -verify=c -fsyntax-only
-// RUN: %clang_cc1 %s -verify -fsyntax-only -x c++
-// RUN: %clang_cc1 %s -verify -fsyntax-only -fms-compatibility
-// RUN: %clang_cc1 %s -verify -fsyntax-only -fms-compatibility -x c++
+// RUN: %clang_cc1 %s -verify=stock,c -fsyntax-only
+// RUN: %clang_cc1 %s -verify=stock,cpp -fsyntax-only -x c++
+// RUN: %clang_cc1 %s -verify=stock,cpp -fsyntax-only -fms-compatibility -x c++
+// RUN: %clang_cc1 %s -verify=stock,c,gnu -fsyntax-only -Wgnu-flexible-array-union-member -Wgnu-empty-struct
+// RUN: %clang_cc1 %s -verify=stock,c,microsoft -fsyntax-only -fms-compatibility -Wmicrosoft
// The test checks that an attempt to initialize union with flexible array
// member with an initializer list doesn't crash clang.
-union { char x[]; } r = {0}; // c-error {{flexible array member 'x' in a union is not allowed}}
+union { char x[]; } r = {0}; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+struct _name1 {
+ int a;
+ union {
+ int b;
+ char x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ };
+} name1 = {
+ 10,
+ 42, /* initializes "b" */
+};
-// expected-no-diagnostics
+struct _name1i {
+ int a;
+ union {
+ int b;
+ char x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ };
+} name1i = {
+ .a = 10,
+ .b = 42,
+};
+
+/* Initialization of flexible array in a union is never allowed. */
+struct _name2 {
+ int a;
+ union {
+ int b;
+ char x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ };
+} name2 = {
+ 12,
+ 13,
+ { 'c' }, /* c-warning {{excess elements in struct initializer}}
+ cpp-error {{excess elements in struct initializer}}
+ */
+};
+
+/* Initialization of flexible array in a union is never allowed. */
+struct _name2i {
+ int a;
+ union {
+ int b;
+ char x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ stock-note {{initialized flexible array member 'x' is here}}
+ */
+ };
+} name2i = {
+ .a = 12,
+ .b = 13, /* stock-note {{previous initialization is here}} */
+ .x = { 'c' }, /* stock-error {{initialization of flexible array member is not allowed}}
+ c-warning {{initializer overrides prior initialization of this subobject}}
+ cpp-error {{initializer partially overrides prior initialization of this subobject}}
+ */
+};
+
+/* Flexible array initialization always allowed when not in a union,
+ and when struct has another member.
+ */
+struct _okay {
+ int a;
+ char x[];
+} okay = {
+ 22,
+ { 'x', 'y', 'z' },
+};
+
+struct _okayi {
+ int a;
+ char x[];
+} okayi = {
+ .a = 22,
+ .x = { 'x', 'y', 'z' },
+};
+
+struct _okay0 {
+ int a;
+ char x[];
+} okay0 = { };
+
+struct _flex_extension {
+ char x[]; /* gnu-warning {{flexible array member 'x' in otherwise empty struct is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in otherwise empty struct is a Microsoft extension}}
+ */
+} flex_extension = {
+ { 'x', 'y', 'z' },
+};
+
+struct _flex_extensioni {
+ char x[]; /* gnu-warning {{flexible array member 'x' in otherwise empty struct is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in otherwise empty struct is a Microsoft extension}}
+ */
+} flex_extensioni = {
+ .x = { 'x', 'y', 'z' },
+};
+
+struct already_hidden {
+ int a;
+ union {
+ int b;
+ struct {
+ struct { } __empty; // gnu-warning {{empty struct is a GNU extension}}
+ char x[];
+ };
+ };
+};
+struct still_zero_sized {
+ struct { } __unused; // gnu-warning {{empty struct is a GNU extension}}
+ int x[];
+};
+
+struct warn1 {
+ int a;
+ union {
+ int b;
+ char x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ };
+};
+
+struct warn2 {
+ int x[]; /* gnu-warning {{flexible array member 'x' in otherwise empty struct is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in otherwise empty struct is a Microsoft extension}}
+ */
+};
+
+union warn3 {
+ short x[]; /* gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+};
+
+struct quiet1 {
+ int a;
+ short x[];
+};
+
+struct _not_at_end {
+ union { short x[]; }; /* stock-warning-re {{field '' with variable sized type '{{.*}}' not at the end of a struct or class is a GNU extension}}
+ gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ int y;
+} not_at_end = {{}, 3};
+
+struct _not_at_end_s {
+ struct { int a; short x[]; }; /* stock-warning-re {{field '' with variable sized type '{{.*}}' not at the end of a struct or class is a GNU extension}} */
+ int y;
+} not_at_end_s = {{}, 3};
+
+struct {
+ int a;
+ union { /* stock-warning-re {{field '' with variable sized type '{{.*}}' not at the end of a struct or class is a GNU extension}} */
+ short x[]; /* stock-note {{initialized flexible array member 'x' is here}}
+ gnu-warning {{flexible array member 'x' in a union is a GNU extension}}
+ microsoft-warning {{flexible array member 'x' in a union is a Microsoft extension}}
+ */
+ int b;
+ };
+ int c;
+ int d;
+} i_f = { 4,
+ {5}, /* stock-error {{initialization of flexible array member is not allowed}} */
+ {},
+ 6};
+
+// expected-no-diagnostics
diff --git a/clang/test/Sema/format-strings-signedness-fixit.c b/clang/test/Sema/format-strings-signedness-fixit.c
new file mode 100644
index 000000000000..b4e6e975657a
--- /dev/null
+++ b/clang/test/Sema/format-strings-signedness-fixit.c
@@ -0,0 +1,98 @@
+// RUN: cp %s %t
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -Wformat -Wformat-signedness -fixit %t
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -fsyntax-only -Wformat -Wformat-signedness -Werror %t
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -E -o - %t | FileCheck %s
+
+#include <limits.h>
+
+int printf(const char *restrict format, ...);
+
+void test_printf_int(int x)
+{
+ printf("%u", x);
+}
+
+void test_printf_unsigned(unsigned x)
+{
+ printf("%d", x);
+}
+
+void test_printf_long(long x)
+{
+ printf("%lu", x);
+}
+
+void test_printf_unsigned_long(unsigned long x)
+{
+ printf("%ld", x);
+}
+
+void test_printf_long_long(long long x)
+{
+ printf("%llu", x);
+}
+
+void test_printf_unsigned_long_long(unsigned long long x)
+{
+ printf("%lld", x);
+}
+
+enum enum_int {
+ minus_1 = -1
+};
+
+void test_printf_enum_int(enum enum_int x)
+{
+ printf("%u", x);
+}
+
+enum enum_unsigned {
+ zero = 0
+};
+
+void test_printf_enum_unsigned(enum enum_unsigned x)
+{
+ printf("%d", x);
+}
+
+enum enum_long {
+ minus_one = -1,
+ int_val = INT_MAX,
+ unsigned_val = (unsigned)INT_MIN
+};
+
+void test_printf_enum_long(enum enum_long x)
+{
+ printf("%lu", x);
+}
+
+enum enum_unsigned_long {
+ uint_max_plus = (unsigned long)UINT_MAX+1,
+};
+
+void test_printf_enum_unsigned_long(enum enum_unsigned_long x)
+{
+ printf("%ld", x);
+}
+
+// Validate the fixes.
+// CHECK: void test_printf_int(int x)
+// CHECK: printf("%d", x);
+// CHECK: void test_printf_unsigned(unsigned x)
+// CHECK: printf("%u", x);
+// CHECK: void test_printf_long(long x)
+// CHECK: printf("%ld", x);
+// CHECK: void test_printf_unsigned_long(unsigned long x)
+// CHECK: printf("%lu", x);
+// CHECK: void test_printf_long_long(long long x)
+// CHECK: printf("%lld", x);
+// CHECK: void test_printf_unsigned_long_long(unsigned long long x)
+// CHECK: printf("%llu", x);
+// CHECK: void test_printf_enum_int(enum enum_int x)
+// CHECK: printf("%d", x);
+// CHECK: void test_printf_enum_unsigned(enum enum_unsigned x)
+// CHECK: printf("%u", x);
+// CHECK: void test_printf_enum_long(enum enum_long x)
+// CHECK: printf("%ld", x);
+// CHECK: void test_printf_enum_unsigned_long(enum enum_unsigned_long x)
+// CHECK: printf("%lu", x);
diff --git a/clang/test/Sema/format-strings-signedness.c b/clang/test/Sema/format-strings-signedness.c
new file mode 100644
index 000000000000..d5a8140d9ef8
--- /dev/null
+++ b/clang/test/Sema/format-strings-signedness.c
@@ -0,0 +1,222 @@
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -std=c11 -fsyntax-only -verify -Wformat -Wformat-signedness %s
+// RUN: %clang_cc1 -triple=x86_64-pc-win32 -std=c11 -fsyntax-only -verify -Wformat -Wformat-signedness %s
+
+// Verify that -Wformat-signedness alone (without -Wformat) trigger the
+// warnings. Note in gcc this will not trigger the signedness warnings as
+// -Wformat is default off in gcc.
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -std=c11 -fsyntax-only -verify -Wformat-signedness %s
+// RUN: %clang_cc1 -triple=x86_64-pc-win32 -std=c11 -fsyntax-only -verify -Wformat-signedness %s
+
+// Verify that -Wformat-signedness warnings are not reported with only -Wformat
+// (gcc compat).
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -std=c11 -fsyntax-only -Wformat -verify=okay %s
+
+// Verify that -Wformat-signedness with -Wno-format are not reported (gcc compat).
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -std=c11 -fsyntax-only -Wformat-signedness -Wno-format -verify=okay %s
+// RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -std=c11 -fsyntax-only -Wno-format -Wformat-signedness -verify=okay %s
+// okay-no-diagnostics
+
+int printf(const char *restrict format, ...);
+int scanf(const char * restrict, ...);
+
+void test_printf_bool(_Bool x)
+{
+ printf("%d", x); // no-warning
+ printf("%u", x); // no-warning
+ printf("%x", x); // no-warning
+}
+
+void test_printf_char(char x)
+{
+ printf("%c", x); // no-warning
+}
+
+void test_printf_unsigned_char(unsigned char x)
+{
+ printf("%c", x); // no-warning
+}
+
+void test_printf_int(int x)
+{
+ printf("%d", x); // no-warning
+ printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int'}}
+ printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has type 'int'}}
+}
+
+void test_printf_unsigned(unsigned x)
+{
+ printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has type 'unsigned int'}}
+ printf("%u", x); // no-warning
+ printf("%x", x); // no-warning
+}
+
+void test_printf_long(long x)
+{
+ printf("%ld", x); // no-warning
+ printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long'}}
+ printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has type 'long'}}
+}
+
+void test_printf_unsigned_long(unsigned long x)
+{
+ printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has type 'unsigned long'}}
+ printf("%lu", x); // no-warning
+ printf("%lx", x); // no-warning
+}
+
+void test_printf_long_long(long long x)
+{
+ printf("%lld", x); // no-warning
+ printf("%llu", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long'}}
+ printf("%llx", x); // expected-warning{{format specifies type 'unsigned long long' but the argument has type 'long long'}}
+}
+
+void test_printf_unsigned_long_long(unsigned long long x)
+{
+ printf("%lld", x); // expected-warning{{format specifies type 'long long' but the argument has type 'unsigned long long'}}
+ printf("%llu", x); // no-warning
+ printf("%llx", x); // no-warning
+}
+
+enum enum_int {
+ minus_1 = -1
+};
+
+void test_printf_enum_int(enum enum_int x)
+{
+ printf("%d", x); // no-warning
+ printf("%u", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int'}}
+ printf("%x", x); // expected-warning{{format specifies type 'unsigned int' but the argument has underlying type 'int'}}
+}
+
+#ifndef _WIN32 // Disabled due to enums have different underlying type on _WIN32
+enum enum_unsigned {
+ zero = 0
+};
+
+void test_printf_enum_unsigned(enum enum_unsigned x)
+{
+ printf("%d", x); // expected-warning{{format specifies type 'int' but the argument has underlying type 'unsigned int'}}
+ printf("%u", x); // no-warning
+ printf("%x", x); // no-warning
+}
+
+enum enum_long {
+ minus_one = -1,
+ int_val = __INT_MAX__, // INT_MAX
+ unsigned_val = (unsigned)(-__INT_MAX__ -1) // (unsigned)INT_MIN
+};
+
+void test_printf_enum_long(enum enum_long x)
+{
+ printf("%ld", x); // no-warning
+ printf("%lu", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long'}}
+ printf("%lx", x); // expected-warning{{format specifies type 'unsigned long' but the argument has underlying type 'long'}}
+}
+
+enum enum_unsigned_long {
+ uint_max_plus = (unsigned long)(__INT_MAX__ *2U +1U)+1, // (unsigned long)UINT_MAX+1
+};
+
+void test_printf_enum_unsigned_long(enum enum_unsigned_long x)
+{
+ printf("%ld", x); // expected-warning{{format specifies type 'long' but the argument has underlying type 'unsigned long'}}
+ printf("%lu", x); // no-warning
+ printf("%lx", x); // no-warning
+}
+#endif
+
+void test_scanf_char(char *y) {
+ scanf("%c", y); // no-warning
+}
+
+void test_scanf_unsigned_char(unsigned char *y) {
+ scanf("%c", y); // no-warning
+}
+
+void test_scanf_int(int *x) {
+ scanf("%d", x); // no-warning
+ scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *'}}
+ scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'int *'}}
+}
+
+void test_scanf_unsigned(unsigned *x) {
+ scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'unsigned int *'}}
+ scanf("%u", x); // no-warning
+ scanf("%x", x); // no-warning
+}
+
+void test_scanf_long(long *x) {
+ scanf("%ld", x); // no-warning
+ scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *'}}
+ scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'long *'}}
+}
+
+void test_scanf_unsigned_long(unsigned long *x) {
+ scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'unsigned long *'}}
+ scanf("%lu", x); // no-warning
+ scanf("%lx", x); // no-warning
+}
+
+void test_scanf_longlong(long long *x) {
+ scanf("%lld", x); // no-warning
+ scanf("%llu", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *'}}
+ scanf("%llx", x); // expected-warning{{format specifies type 'unsigned long long *' but the argument has type 'long long *'}}
+}
+
+void test_scanf_unsigned_longlong(unsigned long long *x) {
+ scanf("%lld", x); // expected-warning{{format specifies type 'long long *' but the argument has type 'unsigned long long *'}}
+ scanf("%llu", x); // no-warning
+ scanf("%llx", x); // no-warning
+}
+
+void test_scanf_enum_int(enum enum_int *x) {
+ scanf("%d", x); // no-warning
+ scanf("%u", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *'}}
+ scanf("%x", x); // expected-warning{{format specifies type 'unsigned int *' but the argument has type 'enum enum_int *'}}
+}
+
+#ifndef _WIN32 // Disabled due to enums have different underlying type on _WIN32
+void test_scanf_enum_unsigned(enum enum_unsigned *x) {
+ scanf("%d", x); // expected-warning{{format specifies type 'int *' but the argument has type 'enum enum_unsigned *'}}
+ scanf("%u", x); // no-warning
+ scanf("%x", x); // no-warning
+}
+
+void test_scanf_enum_long(enum enum_long *x) {
+ scanf("%ld", x); // no-warning
+ scanf("%lu", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *'}}
+ scanf("%lx", x); // expected-warning{{format specifies type 'unsigned long *' but the argument has type 'enum enum_long *'}}
+}
+
+void test_scanf_enum_unsigned_long(enum enum_unsigned_long *x) {
+ scanf("%ld", x); // expected-warning{{format specifies type 'long *' but the argument has type 'enum enum_unsigned_long *'}}
+ scanf("%lu", x); // no-warning
+ scanf("%lx", x); // no-warning
+}
+#endif
+
+// Verify that we get no warnings from <inttypes.h>
+
+typedef short int int16_t;
+typedef unsigned short int uint16_t;
+
+void test_printf_priX16(int16_t x) {
+ printf("PRId16: %" "d" /*PRId16*/ "\n", x); // no-warning
+ printf("PRIi16: %" "i" /*PRIi16*/ "\n", x); // no-warning
+}
+
+void test_printf_unsigned_priX16(uint16_t x) {
+ printf("PRIo16: %" "o" /*PRIo16*/ "\n", x); // no-warning
+ printf("PRIu16: %" "u" /*PRIu16*/ "\n", x); // no-warning
+ printf("PRIx16: %" "x" /*PRIx16*/ "\n", x); // no-warning
+ printf("PRIX16: %" "X" /*PRIX16*/ "\n", x); // no-warning
+}
+
+// Verify that we can suppress a -Wformat-signedness warning by ignoring
+// -Wformat (gcc compat).
+void test_suppress(int x)
+{
+#pragma GCC diagnostic ignored "-Wformat"
+ printf("%u", x);
+}
diff --git a/clang/test/Sema/transparent-union.c b/clang/test/Sema/transparent-union.c
index c134a7a9b1c4..f02c2298b51c 100644
--- a/clang/test/Sema/transparent-union.c
+++ b/clang/test/Sema/transparent-union.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsyntax-only -verify -Wgnu-flexible-array-union-member %s
typedef union {
int *ip;
float *fp;
@@ -131,7 +131,7 @@ union pr15134v2 {
union pr30520v { void b; } __attribute__((transparent_union)); // expected-error {{field has incomplete type 'void'}}
-union pr30520a { int b[]; } __attribute__((transparent_union)); // expected-error {{flexible array member 'b' in a union is not allowed}}
+union pr30520a { int b[]; } __attribute__((transparent_union)); // expected-warning {{flexible array member 'b' in a union is a GNU extension}}
// expected-note@+1 2 {{forward declaration of 'struct stb'}}
union pr30520s { struct stb b; } __attribute__((transparent_union)); // expected-error {{field has incomplete type 'struct stb'}}
diff --git a/clang/test/Sema/warn-cast-function-type-strict.c b/clang/test/Sema/warn-cast-function-type-strict.c
index 8c88f275d2b3..b0a70cf324b7 100644
--- a/clang/test/Sema/warn-cast-function-type-strict.c
+++ b/clang/test/Sema/warn-cast-function-type-strict.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 %s -fsyntax-only -Wcast-function-type -verify
-// RUN: %clang_cc1 %s -fsyntax-only -Wcast-function-type-strict -verify
+// RUN: %clang_cc1 %s -fsyntax-only -Wcast-function-type -verify=expected,strict
+// RUN: %clang_cc1 %s -fsyntax-only -Wcast-function-type-strict -verify=expected,strict
// RUN: %clang_cc1 %s -fsyntax-only -Wextra -Wno-ignored-qualifiers -verify
int t(int array[static 12]);
@@ -32,13 +32,13 @@ f10 *j;
void foo(void) {
a = (f1 *)x;
b = (f2 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f2 *' (aka 'int (*)(void *)') converts to incompatible function type}} */
- c = (f3 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f3 *' (aka 'int (*)()') converts to incompatible function type}} */
+ c = (f3 *)x; /* strict-warning {{cast from 'int (*)(long)' to 'f3 *' (aka 'int (*)()') converts to incompatible function type}} */
d = (f4 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f4 *' (aka 'void (*)()') converts to incompatible function type}} */
- e = (f5 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f5 *' (aka 'void (*)(void)') converts to incompatible function type}} */
+ e = (f5 *)x; /* strict-warning {{cast from 'int (*)(long)' to 'f5 *' (aka 'void (*)(void)') converts to incompatible function type}} */
f = (f6 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f6 *' (aka 'int (*)(long, int)') converts to incompatible function type}} */
- g = (f7 *)x; /* expected-warning {{cast from 'int (*)(long)' to 'f7 *' (aka 'int (*)(long, ...)') converts to incompatible function type}} */
+ g = (f7 *)x; /* strict-warning {{cast from 'int (*)(long)' to 'f7 *' (aka 'int (*)(long, ...)') converts to incompatible function type}} */
h = (f8 *)t;
i = (f9 *)u;
// FIXME: return type qualifier should not be included in the function type . Warning should be absent after this issue is fixed. https://github.com/llvm/llvm-project/issues/39494 .
- j = (f10 *)v; /* expected-warning {{cast from 'const int (*)(int)' to 'f10 *' (aka 'int (*)(int)') converts to incompatible function type}} */
+ j = (f10 *)v; /* strict-warning {{cast from 'const int (*)(int)' to 'f10 *' (aka 'int (*)(int)') converts to incompatible function type}} */
}
diff --git a/clang/test/SemaCXX/attr-target-version.cpp b/clang/test/SemaCXX/attr-target-version.cpp
index 0bd710c4e282..b3385f043590 100644
--- a/clang/test/SemaCXX/attr-target-version.cpp
+++ b/clang/test/SemaCXX/attr-target-version.cpp
@@ -9,7 +9,6 @@ void __attribute__((target_version("rcpc3"))) no_def(void);
void __attribute__((target_version("mops"))) no_def(void);
void __attribute__((target_version("rdma"))) no_def(void);
-// expected-error@+1 {{no matching function for call to 'no_def'}}
void foo(void) { no_def(); }
constexpr int __attribute__((target_version("sve2"))) diff_const(void) { return 1; }
@@ -41,6 +40,7 @@ inline int __attribute__((target_version("sme"))) diff_inline(void) { return 1;
int __attribute__((target_version("fp16"))) diff_inline(void) { return 2; }
inline int __attribute__((target_version("sme"))) diff_inline1(void) { return 1; }
+//expected-error@+1 {{multiversioned function declaration has a different inline specification}}
int __attribute__((target_version("default"))) diff_inline1(void) { return 2; }
int __attribute__((target_version("fcma"))) diff_type1(void) { return 1; }
@@ -59,8 +59,7 @@ int __attribute__((target_version("sve2-sha3"))) diff_type3(void) noexcept(true)
template <typename T> int __attribute__((target_version("default"))) temp(T) { return 1; }
template <typename T> int __attribute__((target_version("simd"))) temp1(T) { return 1; }
-// expected-error@+1 {{attribute 'target_version' multiversioned functions do not yet support function templates}}
-template <typename T> int __attribute__((target_version("sha3"))) temp1(T) { return 2; }
+// expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support function templates}}
extern "C" {
int __attribute__((target_version("aes"))) extc(void) { return 1; }
@@ -70,17 +69,23 @@ int __attribute__((target_version("lse"))) extc(void) { return 1; }
auto __attribute__((target_version("default"))) ret1(void) { return 1; }
auto __attribute__((target_version("dpb"))) ret2(void) { return 1; }
+// expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support deduced return types}}
auto __attribute__((target_version("dpb2"))) ret3(void) -> int { return 1; }
class Cls {
__attribute__((target_version("rng"))) Cls();
+ // expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support constructors}}
__attribute__((target_version("sve-i8mm"))) ~Cls();
+ // expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support destructors}}
Cls &__attribute__((target_version("f32mm"))) operator=(const Cls &) = default;
+ // expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support defaulted functions}}
Cls &__attribute__((target_version("ssbs"))) operator=(Cls &&) = delete;
+ // expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support deleted functions}}
virtual void __attribute__((target_version("default"))) vfunc();
virtual void __attribute__((target_version("sm4"))) vfunc1();
+ // expected-error@-1 {{attribute 'target_version' multiversioned functions do not yet support virtual functions}}
};
__attribute__((target_version("sha3"))) void Decl();
diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
index 3ce26c8fcd98..ce403285b0f5 100644
--- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
+++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
@@ -247,3 +247,15 @@ using Bar = Foo<U>; // expected-note {{could not match 'Foo<type-parameter-0-0>'
Bar s = {1}; // expected-error {{no viable constructor or deduction guide for deduction of template arguments}}
} // namespace test18
+
+// GH85406, verify no crash on invalid alias templates.
+namespace test19 {
+template <typename T>
+class Foo {};
+
+template <typename T>
+template <typename K>
+using Bar2 = Foo<K>; // expected-error {{extraneous template parameter list in alias template declaration}}
+
+Bar2 b = 1; // expected-error {{no viable constructor or deduction guide for deduction of template arguments}}
+} // namespace test19
diff --git a/clang/test/SemaCXX/cxx2a-template-lambdas.cpp b/clang/test/SemaCXX/cxx2a-template-lambdas.cpp
index 7ac481369891..fff524e77d3b 100644
--- a/clang/test/SemaCXX/cxx2a-template-lambdas.cpp
+++ b/clang/test/SemaCXX/cxx2a-template-lambdas.cpp
@@ -1,10 +1,14 @@
-// RUN: %clang_cc1 -std=c++2a -verify %s
+// RUN: %clang_cc1 -std=c++03 -verify -Dstatic_assert=_Static_assert -Wno-c++11-extensions -Wno-c++14-extensions -Wno-c++17-extensions -Wno-c++20-extensions %s
+// RUN: %clang_cc1 -std=c++11 -verify=expected,cxx11,cxx11-cxx14 -Wno-c++20-extensions -Wno-c++17-extensions -Wno-c++14-extensions %s
+// RUN: %clang_cc1 -std=c++14 -verify=expected,cxx11-cxx14,cxx14 -Wno-c++20-extensions -Wno-c++17-extensions %s
+// RUN: %clang_cc1 -std=c++17 -verify -Wno-c++20-extensions %s
+// RUN: %clang_cc1 -std=c++20 -verify %s
template<typename, typename>
-constexpr bool is_same = false;
+inline const bool is_same = false;
template<typename T>
-constexpr bool is_same<T, T> = true;
+inline const bool is_same<T, T> = true;
template<typename T>
struct DummyTemplate { };
@@ -23,7 +27,7 @@ void func() {
L1.operator()<6>(); // expected-note {{in instantiation}}
auto L2 = []<template<typename> class T, class U>(T<U> &&arg) {
- static_assert(is_same<T<U>, DummyTemplate<float>>); // // expected-error {{static assertion failed}}
+ static_assert(is_same<T<U>, DummyTemplate<float> >); // // expected-error {{static assertion failed}}
};
L2(DummyTemplate<float>());
L2(DummyTemplate<double>()); // expected-note {{in instantiation}}
@@ -36,15 +40,20 @@ struct ShadowMe {
}
};
+#if __cplusplus >= 201102L
template<typename T>
constexpr T outer() {
- return []<T x>() { return x; }.template operator()<123>(); // expected-error {{no matching member function}} \
- expected-note {{candidate template ignored}}
+ // FIXME: The C++11 error seems wrong
+ return []<T x>() { return x; }.template operator()<123>(); // expected-error {{no matching member function}} \
+ expected-note {{candidate template ignored}} \
+ cxx11-note {{non-literal type '<dependent type>' cannot be used in a constant expression}} \
+ cxx14-note {{non-literal type}}
}
-static_assert(outer<int>() == 123);
+static_assert(outer<int>() == 123); // cxx11-cxx14-error {{not an integral constant expression}} cxx11-cxx14-note {{in call}}
template int *outer<int *>(); // expected-note {{in instantiation}}
+#endif
-
+#if __cplusplus >= 202002L
namespace GH62611 {
template <auto A = [](auto x){}>
struct C {
@@ -87,3 +96,4 @@ void foo() {
}
}
+#endif
diff --git a/clang/test/SemaCXX/lambda-expressions.cpp b/clang/test/SemaCXX/lambda-expressions.cpp
index 389002ab0e34..151d74f21d64 100644
--- a/clang/test/SemaCXX/lambda-expressions.cpp
+++ b/clang/test/SemaCXX/lambda-expressions.cpp
@@ -1,6 +1,7 @@
-// RUN: %clang_cc1 -std=c++11 -Wno-unused-value -fsyntax-only -verify=expected,expected-cxx14,cxx11 -fblocks %s
-// RUN: %clang_cc1 -std=c++14 -Wno-unused-value -fsyntax-only -verify -verify=expected-cxx14 -fblocks %s
-// RUN: %clang_cc1 -std=c++17 -Wno-unused-value -verify -ast-dump -fblocks %s | FileCheck %s
+// RUN: %clang_cc1 -std=c++11 -Wno-unused-value -fsyntax-only -verify=expected,not-cxx03,cxx03-cxx11,cxx11,expected-cxx14 -fblocks %s
+// RUN: %clang_cc1 -std=c++03 -Wno-unused-value -fsyntax-only -verify=expected,cxx03,cxx03-cxx11,expected-cxx14 -fblocks %s -Ddecltype=__decltype -Dstatic_assert=_Static_assert -Wno-c++11-extensions
+// RUN: %clang_cc1 -std=c++14 -Wno-unused-value -fsyntax-only -verify=expected,not-cxx03,expected-cxx14 -fblocks %s
+// RUN: %clang_cc1 -std=c++17 -Wno-unused-value -verify=expected,not-cxx03 -ast-dump -fblocks %s | FileCheck %s
namespace std { class type_info; };
@@ -93,14 +94,14 @@ namespace ImplicitCapture {
[] { return ref_i; }; // expected-error {{variable 'ref_i' cannot be implicitly captured in a lambda with no capture-default specified}} expected-note {{lambda expression begins here}} expected-note 2 {{capture 'ref_i' by}} expected-note 2 {{default capture by}}
static int j;
- int &ref_j = j;
- [] { return ref_j; }; // ok
+ int &ref_j = j; // cxx03-note {{declared here}}
+ [] { return ref_j; }; // cxx03-error {{variable 'ref_j' cannot be implicitly captured in a lambda with no capture-default specified}} cxx03-note 4 {{capture}} cxx03-note {{lambda expression begins here}}
}
}
namespace SpecialMembers {
void f() {
- auto a = []{}; // expected-note 2{{here}} expected-note 2{{candidate}}
+ auto a = []{}; // expected-note 2{{here}} expected-note {{candidate}} not-cxx03-note {{candidate}}
decltype(a) b; // expected-error {{no matching constructor}}
decltype(a) c = a;
decltype(a) d = static_cast<decltype(a)&&>(a);
@@ -213,7 +214,7 @@ namespace VariadicPackExpansion {
};
template<typename...Ts> void local_class() {
- sink {
+ sink s(
[] (Ts t) {
struct S : Ts {
void f(Ts t) {
@@ -226,7 +227,7 @@ namespace VariadicPackExpansion {
s.f(t);
return s;
} (Ts()).g() ...
- };
+ );
};
struct X {}; struct Y {};
template void local_class<X, Y>();
@@ -296,7 +297,7 @@ namespace PR16708 {
namespace TypeDeduction {
struct S {};
void f() {
- const S s {};
+ const S s = S();
S &&t = [&] { return s; } ();
#if __cplusplus > 201103L
S &&u = [&] () -> auto { return s; } ();
@@ -308,7 +309,7 @@ namespace TypeDeduction {
namespace lambdas_in_NSDMIs {
template<class T>
struct L {
- T t{};
+ T t = T();
T t2 = ([](int a) { return [](int b) { return b; };})(t)(t);
};
L<int> l;
@@ -345,6 +346,7 @@ namespace CaptureIncomplete {
}
}
+#if __cplusplus >= 201103L
namespace CaptureAbstract {
struct S {
virtual void f() = 0; // expected-note {{unimplemented}}
@@ -362,6 +364,7 @@ namespace CaptureAbstract {
[=] { return s.n; }; // expected-error {{abstract}}
}
}
+#endif
namespace PR18128 {
auto l = [=]{}; // expected-error {{non-local lambda expression cannot have a capture-default}}
@@ -372,6 +375,8 @@ namespace PR18128 {
// expected-error@-1 {{non-local lambda expression cannot have a capture-default}}
// expected-error@-2 {{invalid use of non-static data member 'n'}}
// expected-cxx14-error@-3 {{a lambda expression may not appear inside of a constant expression}}
+ // cxx03-error@-4 {{function declaration cannot have variably modified type}}
+ // cxx03-warning@-5 {{variable length arrays in C++ are a Clang extension}}
int g(int k = ([=]{ return n; }(), 0));
// expected-error@-1 {{non-local lambda expression cannot have a capture-default}}
// expected-error@-2 {{invalid use of non-static data member 'n'}}
@@ -434,13 +439,13 @@ struct A {
template <typename F>
void g(F f) {
- auto a = A<decltype(f())>{};
+ auto a = A<decltype(f())>();
// expected-note@-1 {{in instantiation of template class 'PR20731::A<void>' requested here}}
auto xf = [a, f]() {};
int x = sizeof(xf);
};
void f() {
- g([] {});
+ g([] {}); // cxx03-warning {{template argument uses local type}}
// expected-note-re@-1 {{in instantiation of function template specialization 'PR20731::g<(lambda at {{.*}}>' requested here}}
}
@@ -491,8 +496,8 @@ namespace PR21857 {
fun() = default;
using Fn::operator();
};
- template<typename Fn> fun<Fn> wrap(Fn fn);
- auto x = wrap([](){});
+ template<typename Fn> fun<Fn> wrap(Fn fn); // cxx03-warning {{template argument uses unnamed type}}
+ auto x = wrap([](){}); // cxx03-warning {{template argument uses unnamed type}} cxx03-note 2 {{unnamed type used in template argument was declared here}}
}
namespace PR13987 {
@@ -559,8 +564,8 @@ struct B {
int x;
A a = [&] { int y = x; };
A b = [&] { [&] { [&] { int y = x; }; }; };
- A d = [&](auto param) { int y = x; }; // cxx11-error {{'auto' not allowed in lambda parameter}}
- A e = [&](auto param) { [&] { [&](auto param2) { int y = x; }; }; }; // cxx11-error 2 {{'auto' not allowed in lambda parameter}}
+ A d = [&](auto param) { int y = x; }; // cxx03-cxx11-error {{'auto' not allowed in lambda parameter}}
+ A e = [&](auto param) { [&] { [&](auto param2) { int y = x; }; }; }; // cxx03-cxx11-error 2 {{'auto' not allowed in lambda parameter}}
};
B<int> b;
@@ -588,9 +593,9 @@ struct S1 {
};
void foo1() {
- auto s0 = S1{[name=]() {}}; // expected-error 2 {{expected expression}}
- auto s1 = S1{[name=name]() {}}; // expected-error {{use of undeclared identifier 'name'; did you mean 'name1'?}}
- // cxx11-warning@-1 {{initialized lambda captures are a C++14 extension}}
+ auto s0 = S1([name=]() {}); // expected-error {{expected expression}}
+ auto s1 = S1([name=name]() {}); // expected-error {{use of undeclared identifier 'name'; did you mean 'name1'?}}
+ // cxx03-cxx11-warning@-1 {{initialized lambda captures are a C++14 extension}}
}
}
@@ -606,7 +611,7 @@ namespace PR25627_dont_odr_use_local_consts {
namespace ConversionOperatorDoesNotHaveDeducedReturnType {
auto x = [](int){};
- auto y = [](auto &v) -> void { v.n = 0; }; // cxx11-error {{'auto' not allowed in lambda parameter}} cxx11-note {{candidate function not viable}} cxx11-note {{conversion candidate}}
+ auto y = [](auto &v) -> void { v.n = 0; }; // cxx03-cxx11-error {{'auto' not allowed in lambda parameter}} cxx03-cxx11-note {{candidate function not viable}} cxx03-cxx11-note {{conversion candidate}}
using T = decltype(x);
using U = decltype(y);
using ExpectedTypeT = void (*)(int);
@@ -626,14 +631,15 @@ namespace ConversionOperatorDoesNotHaveDeducedReturnType {
template<typename T>
friend constexpr U::operator ExpectedTypeU<T>() const noexcept;
#else
- friend auto T::operator()(int) const; // cxx11-error {{'auto' return without trailing return type; deduced return types are a C++14 extension}}
+ friend auto T::operator()(int) const; // cxx11-error {{'auto' return without trailing return type; deduced return types are a C++14 extension}} \
+ cxx03-error {{'auto' not allowed in function return type}}
friend T::operator ExpectedTypeT() const;
template<typename T>
- friend void U::operator()(T&) const; // cxx11-error {{friend declaration of 'operator()' does not match any declaration}}
+ friend void U::operator()(T&) const; // cxx03-cxx11-error {{friend declaration of 'operator()' does not match any declaration}}
// FIXME: This should not match, as above.
template<typename T>
- friend U::operator ExpectedTypeU<T>() const; // cxx11-error {{friend declaration of 'operator void (*)(type-parameter-0-0 &)' does not match any declaration}}
+ friend U::operator ExpectedTypeU<T>() const; // cxx03-cxx11-error {{friend declaration of 'operator void (*)(type-parameter-0-0 &)' does not match any declaration}}
#endif
private:
@@ -641,7 +647,7 @@ namespace ConversionOperatorDoesNotHaveDeducedReturnType {
};
// Should be OK in C++14 and later: lambda's call operator is a friend.
- void use(X &x) { y(x); } // cxx11-error {{no matching function for call to object}}
+ void use(X &x) { y(x); } // cxx03-cxx11-error {{no matching function for call to object}}
// This used to crash in return type deduction for the conversion opreator.
struct A { int n; void f() { +[](decltype(n)) {}; } };
@@ -682,8 +688,8 @@ namespace GH60518 {
// function parameters that are used in enable_if
struct StringLiteral {
template <int N>
-StringLiteral(const char (&array)[N])
- __attribute__((enable_if(__builtin_strlen(array) == 2,
+StringLiteral(const char (&array)[N]) // cxx03-note {{declared here}}
+ __attribute__((enable_if(__builtin_strlen(array) == 2, // cxx03-error {{'enable_if' attribute expression never produces a constant expression}} cxx03-note {{read of variable}}
"invalid string literal")));
};
@@ -695,7 +701,7 @@ StringLiteral(const char (&array)[N]) [[clang::annotate_type("test", array)]];
}
void Func1() {
- [[maybe_unused]] auto y = [&](decltype(StringLiteral("xx"))) {};
+ [[maybe_unused]] auto y = [&](decltype(StringLiteral("xx"))) {}; // cxx03-note {{in instantiation of function template specialization}}
[[maybe_unused]] auto z = [&](decltype(cpp_attribute::StringLiteral("xx"))) {};
}
@@ -718,6 +724,7 @@ static_assert([]() constexpr {
// Call operator attributes refering to a variable should
// be properly handled after D124351
+#if __cplusplus >= 201103L
constexpr int i = 2;
void foo() {
(void)[=][[gnu::aligned(i)]] () {}; // expected-warning{{C++23 extension}}
@@ -725,15 +732,18 @@ void foo() {
// CHECK-NEXT: ConstantExpr
// CHECK-NEXT: value: Int 2
}
+#endif
void GH48527() {
auto a = []()__attribute__((b(({ return 0; })))){}; // expected-warning {{unknown attribute 'b' ignored}}
}
+#if __cplusplus >= 201103L
void GH67492() {
constexpr auto test = 42;
auto lambda = (test, []() noexcept(true) {});
}
+#endif
// FIXME: This currently causes clang to crash in C++11 mode.
#if __cplusplus >= 201402L
diff --git a/clang/test/SemaCXX/lambda-implicit-this-capture.cpp b/clang/test/SemaCXX/lambda-implicit-this-capture.cpp
index 7e0e347a8fee..eb1f9e880aec 100644
--- a/clang/test/SemaCXX/lambda-implicit-this-capture.cpp
+++ b/clang/test/SemaCXX/lambda-implicit-this-capture.cpp
@@ -1,3 +1,4 @@
+// RUN: %clang_cc1 -std=c++03 -verify=cxx11 %s -Wno-c++11-extensions
// RUN: %clang_cc1 -std=c++11 -verify=cxx11 %s
// RUN: %clang_cc1 -std=c++2a -verify=cxx2a %s
// RUN: %clang_cc1 -std=c++2a -verify=cxx2a-no-deprecated %s -Wno-deprecated
diff --git a/clang/test/SemaCXX/lambda-invalid-capture.cpp b/clang/test/SemaCXX/lambda-invalid-capture.cpp
index 236753871d70..5be8c8c5078f 100644
--- a/clang/test/SemaCXX/lambda-invalid-capture.cpp
+++ b/clang/test/SemaCXX/lambda-invalid-capture.cpp
@@ -1,3 +1,4 @@
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c++03 -Wno-c++11-extensions %s
// RUN: %clang_cc1 -fsyntax-only -verify %s
// Don't crash.
diff --git a/clang/test/SemaCXX/namespace-alias.cpp b/clang/test/SemaCXX/namespace-alias.cpp
index 281ee9962e8b..591957a657c0 100644
--- a/clang/test/SemaCXX/namespace-alias.cpp
+++ b/clang/test/SemaCXX/namespace-alias.cpp
@@ -47,6 +47,8 @@ namespace I {
namespace A1 { int i; }
namespace A2 = A1;
+
+ namespace A3::extra::specifiers = A2; // expected-error {{alias must be a single identifier}}
}
int f() {
diff --git a/clang/test/SemaCXX/new-delete.cpp b/clang/test/SemaCXX/new-delete.cpp
index 4f78b7c71a91..1a99c6aac604 100644
--- a/clang/test/SemaCXX/new-delete.cpp
+++ b/clang/test/SemaCXX/new-delete.cpp
@@ -171,12 +171,7 @@ void good_deletes()
void bad_deletes()
{
delete 0; // expected-error {{cannot delete expression of type 'int'}}
- delete [0] (int*)0;
-#if __cplusplus <= 199711L
- // expected-error@-2 {{expected expression}}
-#else
- // expected-error@-4 {{expected variable name or 'this' in lambda capture list}}
-#endif
+ delete [0] (int*)0; // expected-error {{expected variable name or 'this' in lambda capture list}}
delete (void*)0; // expected-warning {{cannot delete expression with pointer-to-'void' type 'void *'}}
delete (T*)0; // expected-warning {{deleting pointer to incomplete type}}
::S::delete (int*)0; // expected-error {{expected unqualified-id}}
diff --git a/clang/test/SemaCXX/warn-cast-function-type-strict.cpp b/clang/test/SemaCXX/warn-cast-function-type-strict.cpp
index b3164afde5a0..8887b3c4c5d5 100644
--- a/clang/test/SemaCXX/warn-cast-function-type-strict.cpp
+++ b/clang/test/SemaCXX/warn-cast-function-type-strict.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 %s -fblocks -fsyntax-only -Wcast-function-type -verify
-// RUN: %clang_cc1 %s -fblocks -fsyntax-only -Wcast-function-type-strict -verify
+// RUN: %clang_cc1 %s -fblocks -fsyntax-only -Wcast-function-type -verify=expected,strict
+// RUN: %clang_cc1 %s -fblocks -fsyntax-only -Wcast-function-type-strict -verify=expected,strict
// RUN: %clang_cc1 %s -fblocks -fsyntax-only -Wextra -verify
int x(long);
@@ -33,11 +33,11 @@ void foo() {
a = (f1 *)x;
b = (f2 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f2 *' (aka 'int (*)(void *)') converts to incompatible function type}}
b = reinterpret_cast<f2 *>(x); // expected-warning {{cast from 'int (*)(long)' to 'f2 *' (aka 'int (*)(void *)') converts to incompatible function type}}
- c = (f3 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f3 *' (aka 'int (*)(...)') converts to incompatible function type}}
+ c = (f3 *)x; // strict-warning {{cast from 'int (*)(long)' to 'f3 *' (aka 'int (*)(...)') converts to incompatible function type}}
d = (f4 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f4 *' (aka 'void (*)(...)') converts to incompatible function type}}
- e = (f5 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f5 *' (aka 'void (*)()') converts to incompatible function type}}
+ e = (f5 *)x; // strict-warning {{cast from 'int (*)(long)' to 'f5 *' (aka 'void (*)()') converts to incompatible function type}}
f = (f6 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f6 *' (aka 'int (*)(long, int)') converts to incompatible function type}}
- g = (f7 *)x; // expected-warning {{cast from 'int (*)(long)' to 'f7 *' (aka 'int (*)(long, ...)') converts to incompatible function type}}
+ g = (f7 *)x; // strict-warning {{cast from 'int (*)(long)' to 'f7 *' (aka 'int (*)(long, ...)') converts to incompatible function type}}
mf p1 = (mf)&S::foo; // expected-warning {{cast from 'void (S::*)(int *)' to 'mf' (aka 'void (S::*)(int)') converts to incompatible function type}}
diff --git a/clang/test/SemaCXX/warn-exit-time-destructors.cpp b/clang/test/SemaCXX/warn-exit-time-destructors.cpp
index 2f14243cb48c..55ae37d7368f 100644
--- a/clang/test/SemaCXX/warn-exit-time-destructors.cpp
+++ b/clang/test/SemaCXX/warn-exit-time-destructors.cpp
@@ -51,6 +51,15 @@ struct A { ~A(); };
}
namespace test5 {
+ struct A { ~A(); };
+ [[clang::always_destroy]] A a; // no warning
+
+ void func() {
+ [[clang::always_destroy]] static A a; // no warning
+ }
+}
+
+namespace test6 {
#if __cplusplus >= 202002L
#define CPP20_CONSTEXPR constexpr
#else
@@ -68,3 +77,4 @@ namespace test5 {
T t; // expected-warning {{exit-time destructor}}
#undef CPP20_CONSTEXPR
}
+
diff --git a/clang/test/SemaHLSL/BuiltIns/half-float-only-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/half-float-only-errors.hlsl
new file mode 100644
index 000000000000..98c02c38675f
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/half-float-only-errors.hlsl
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_ceil
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_cos
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_exp
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_exp2
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_floor
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_log
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_log2
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_log10
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_sin
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_sqrt
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_roundeven
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify -DTEST_FUNC=__builtin_elementwise_trunc
+
+double2 test_double_builtin(double2 p0) {
+ return TEST_FUNC(p0);
+ // expected-error@-1 {{passing 'double2' (aka 'vector<double, 2>') to parameter of incompatible type '__attribute__((__vector_size__(2 * sizeof(float)))) float' (vector of 2 'float' values)}}
+}
diff --git a/clang/test/SemaHLSL/BuiltIns/pow-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/pow-errors.hlsl
new file mode 100644
index 000000000000..949028aacf24
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/pow-errors.hlsl
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify
+
+double2 test_double_builtin(double2 p0, double2 p1) {
+ return __builtin_elementwise_pow(p0,p1);
+ // expected-error@-1 {{passing 'double2' (aka 'vector<double, 2>') to parameter of incompatible type '__attribute__((__vector_size__(2 * sizeof(float)))) float' (vector of 2 'float' values)}}
+}
diff --git a/clang/test/SemaHLSL/BuiltIns/reversebits-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/reversebits-errors.hlsl
new file mode 100644
index 000000000000..6e66db6d1cca
--- /dev/null
+++ b/clang/test/SemaHLSL/BuiltIns/reversebits-errors.hlsl
@@ -0,0 +1,12 @@
+// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -verify
+
+
+double2 test_int_builtin(double2 p0) {
+ return __builtin_elementwise_bitreverse(p0);
+ // expected-error@-1 {{1st argument must be a vector of integers (was 'double2' (aka 'vector<double, 2>'))}}
+}
+
+int2 test_int_builtin(int2 p0) {
+ return __builtin_elementwise_bitreverse(p0);
+ // expected-error@-1 {{passing 'int2' (aka 'vector<int, 2>') to parameter of incompatible type '__attribute__((__vector_size__(2 * sizeof(unsigned int)))) unsigned int' (vector of 2 'unsigned int' values)}}
+}
diff --git a/clang/test/SemaObjC/attr-objc-NSObject.m b/clang/test/SemaObjC/attr-objc-NSObject.m
new file mode 100644
index 000000000000..76a01dcef016
--- /dev/null
+++ b/clang/test/SemaObjC/attr-objc-NSObject.m
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -verify -Wno-objc-root-class -fsyntax-only %s
+
+@interface NSArray<__covariant ObjectType>
+- (void)containsObject:(ObjectType)anObject; // expected-note {{passing argument to parameter 'anObject' here}}
+- (void)description;
+@end
+
+typedef __attribute__((NSObject)) struct Foo *FooRef;
+typedef struct Bar *BarRef;
+
+void good() {
+ FooRef object;
+ NSArray<FooRef> *array;
+ [array containsObject:object];
+ [object description];
+}
+
+void bad() {
+ BarRef object;
+ NSArray<BarRef> *array; // expected-error {{type argument 'BarRef' (aka 'struct Bar *') is neither an Objective-C object nor a block type}}
+ [array containsObject:object]; // expected-warning {{incompatible pointer types sending 'BarRef' (aka 'struct Bar *') to parameter of type 'id'}}
+ [object description]; // expected-warning {{receiver type 'BarRef' (aka 'struct Bar *') is not 'id' or interface pointer, consider casting it to 'id'}}
+}
diff --git a/clang/test/SemaTemplate/concepts-GH86757.cpp b/clang/test/SemaTemplate/concepts-GH86757.cpp
new file mode 100644
index 000000000000..3122381b2035
--- /dev/null
+++ b/clang/test/SemaTemplate/concepts-GH86757.cpp
@@ -0,0 +1,13 @@
+// RUN: %clang_cc1 -std=c++20 -Wfatal-errors -verify %s
+
+template <typename> int a;
+template <typename... b> concept c = a<b...>;
+template <typename> concept e = c<>;
+
+// must be a fatal error to trigger the crash
+undefined; // expected-error {{a type specifier is required for all declarations}}
+
+template <typename d> concept g = e<d>;
+template <g> struct h
+template <g d>
+struct h<d>;
diff --git a/clang/test/SemaTemplate/concepts-friends.cpp b/clang/test/SemaTemplate/concepts-friends.cpp
index 255b0858917f..91b797034ed6 100644
--- a/clang/test/SemaTemplate/concepts-friends.cpp
+++ b/clang/test/SemaTemplate/concepts-friends.cpp
@@ -478,3 +478,29 @@ template <Concept> class Foo {
};
} // namespace FriendOfFriend
+
+namespace GH86769 {
+
+template <typename T>
+concept X = true;
+
+template <X T> struct Y {
+ Y(T) {}
+ template <X U> friend struct Y;
+ template <X U> friend struct Y;
+ template <X U> friend struct Y;
+};
+
+template <class T>
+struct Z {
+ // FIXME: This is ill-formed per C++11 N3337 [temp.param]p12:
+ // A default template argument shall not be specified in a friend class
+ // template declaration.
+ template <X U = void> friend struct Y;
+};
+
+template struct Y<int>;
+template struct Z<int>;
+Y y(1);
+
+}
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index b7ea0d003a52..787cc809e253 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c++20 -verify %s
+// RUN: %clang_cc1 -std=c++20 -ferror-limit 0 -verify %s
namespace PR47043 {
template<typename T> concept True = true;
@@ -1114,3 +1114,11 @@ void foo() {
}
} // namespace GH64808
+
+namespace GH86757_1 {
+template <typename...> concept b = false;
+template <typename> concept c = b<>;
+template <typename d> concept f = c< d >;
+template <f> struct e; // expected-note {{}}
+template <f d> struct e<d>; // expected-error {{class template partial specialization is not more specialized than the primary template}}
+}
diff --git a/clang/test/SemaTemplate/ctad.cpp b/clang/test/SemaTemplate/ctad.cpp
index 388ed7d4cced..ec144d4f44ba 100644
--- a/clang/test/SemaTemplate/ctad.cpp
+++ b/clang/test/SemaTemplate/ctad.cpp
@@ -53,4 +53,4 @@ X x;
template<class T, class B> struct Y { Y(T); };
template<class T, class B=void> struct Y ;
Y y(1);
-};
+}
diff --git a/clang/test/SemaTemplate/deduction-guide.cpp b/clang/test/SemaTemplate/deduction-guide.cpp
index 16c7083df29d..0caef78fedbf 100644
--- a/clang/test/SemaTemplate/deduction-guide.cpp
+++ b/clang/test/SemaTemplate/deduction-guide.cpp
@@ -239,3 +239,12 @@ F s(0);
// CHECK: |-InjectedClassNameType {{.*}} 'F<>' dependent
// CHECK: | `-CXXRecord {{.*}} 'F'
// CHECK: `-TemplateTypeParmType {{.*}} 'type-parameter-0-1' dependent depth 0 index 1
+
+template<typename T>
+struct G { T t; };
+
+G g = {1};
+// CHECK-LABEL: Dumping <deduction guide for G>:
+// CHECK: FunctionTemplateDecl
+// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for G> 'auto (T) -> G<T>' aggregate
+// CHECK: `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for G> 'auto (int) -> G<int>' implicit_instantiation aggregate
diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp
index e122cea50f72..ed401135ad84 100644
--- a/clang/tools/clang-format/ClangFormat.cpp
+++ b/clang/tools/clang-format/ClangFormat.cpp
@@ -205,6 +205,11 @@ static cl::list<std::string> FileNames(cl::Positional,
cl::desc("[@<file>] [<file> ...]"),
cl::cat(ClangFormatCategory));
+static cl::opt<bool> FailOnIncompleteFormat(
+ "fail-on-incomplete-format",
+ cl::desc("If set, fail with exit code 1 on incomplete format."),
+ cl::init(false), cl::cat(ClangFormatCategory));
+
namespace clang {
namespace format {
@@ -399,7 +404,7 @@ class ClangFormatDiagConsumer : public DiagnosticConsumer {
};
// Returns true on error.
-static bool format(StringRef FileName) {
+static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
const bool IsSTDIN = FileName == "-";
if (!OutputXML && Inplace && IsSTDIN) {
errs() << "error: cannot use -i when reading from stdin.\n";
@@ -535,7 +540,7 @@ static bool format(StringRef FileName) {
Rewrite.getEditBuffer(ID).write(outs());
}
}
- return false;
+ return ErrorOnIncompleteFormat && !Status.FormatComplete;
}
} // namespace format
@@ -699,7 +704,7 @@ int main(int argc, const char **argv) {
}
if (FileNames.empty())
- return clang::format::format("-");
+ return clang::format::format("-", FailOnIncompleteFormat);
if (FileNames.size() > 1 &&
(!Offsets.empty() || !Lengths.empty() || !LineRanges.empty())) {
@@ -717,7 +722,7 @@ int main(int argc, const char **argv) {
errs() << "Formatting [" << FileNo++ << "/" << FileNames.size() << "] "
<< FileName << "\n";
}
- Error |= clang::format::format(FileName);
+ Error |= clang::format::format(FileName, FailOnIncompleteFormat);
}
return Error ? 1 : 0;
}
diff --git a/clang/tools/clang-format/clang-format-diff.py b/clang/tools/clang-format/clang-format-diff.py
index 0a2c24743678..3a74b90e7315 100755
--- a/clang/tools/clang-format/clang-format-diff.py
+++ b/clang/tools/clang-format/clang-format-diff.py
@@ -138,6 +138,7 @@ def main():
)
# Reformat files containing changes in place.
+ has_diff = False
for filename, lines in lines_by_file.items():
if args.i and args.verbose:
print("Formatting {}".format(filename))
@@ -169,7 +170,7 @@ def main():
stdout, stderr = p.communicate()
if p.returncode != 0:
- sys.exit(p.returncode)
+ return p.returncode
if not args.i:
with open(filename) as f:
@@ -185,9 +186,12 @@ def main():
)
diff_string = "".join(diff)
if len(diff_string) > 0:
+ has_diff = True
sys.stdout.write(diff_string)
- sys.exit(1)
+
+ if has_diff:
+ return 1
if __name__ == "__main__":
- main()
+ sys.exit(main())
diff --git a/clang/tools/clang-installapi/ClangInstallAPI.cpp b/clang/tools/clang-installapi/ClangInstallAPI.cpp
index 54e82d78d4d2..13061cfa36ee 100644
--- a/clang/tools/clang-installapi/ClangInstallAPI.cpp
+++ b/clang/tools/clang-installapi/ClangInstallAPI.cpp
@@ -123,7 +123,7 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
}
}
- if (Ctx.Verifier->getState() == DylibVerifier::Result::Invalid)
+ if (Ctx.Verifier->verifyRemainingSymbols() == DylibVerifier::Result::Invalid)
return EXIT_FAILURE;
// After symbols have been collected, prepare to write output.
diff --git a/clang/tools/clang-installapi/InstallAPIOpts.td b/clang/tools/clang-installapi/InstallAPIOpts.td
index 87f4c3327e84..010f2507a1d1 100644
--- a/clang/tools/clang-installapi/InstallAPIOpts.td
+++ b/clang/tools/clang-installapi/InstallAPIOpts.td
@@ -29,3 +29,49 @@ def verify_mode_EQ : Joined<["--"], "verify-mode=">,
HelpText<"Specify the severity and extend of the validation. Valid modes are ErrorsOnly, ErrorsAndWarnings, and Pedantic.">;
def demangle : Flag<["--", "-"], "demangle">,
HelpText<"Demangle symbols when printing warnings and errors">;
+def dsym: Joined<["--"], "dsym=">,
+ MetaVarName<"<path>">, HelpText<"Specify dSYM path for enriched diagnostics.">;
+
+// Additional input options.
+def extra_project_header : Separate<["-"], "extra-project-header">,
+ MetaVarName<"<path>">,
+ HelpText<"Add additional project header location for parsing">;
+def extra_project_header_EQ : Joined<["--"], "extra-project-header=">,
+ Alias<extra_project_header>;
+def exclude_project_header : Separate<["-"], "exclude-project-header">,
+ MetaVarName<"<glob>">,
+ HelpText<"Exclude project header from parsing">;
+def exclude_project_header_EQ : Joined<["--"], "exclude-project-header=">,
+ Alias<exclude_project_header>;
+def extra_public_header : Separate<["-"], "extra-public-header">,
+ MetaVarName<"<path>">,
+ HelpText<"Add additional public header location for parsing">;
+def extra_public_header_EQ : Joined<["--"], "extra-public-header=">,
+ Alias<extra_public_header>;
+def extra_private_header : Separate<["-"], "extra-private-header">,
+ MetaVarName<"<path>">,
+ HelpText<"Add additional private header location for parsing">;
+def extra_private_header_EQ : Joined<["--"], "extra-private-header=">,
+ Alias<extra_private_header>;
+def exclude_public_header : Separate<["-"], "exclude-public-header">,
+ MetaVarName<"<glob>">,
+ HelpText<"Exclude public header from parsing">;
+def exclude_public_header_EQ : Joined<["--"], "exclude-public-header=">,
+ Alias<exclude_public_header>;
+def exclude_private_header : Separate<["-"], "exclude-private-header">,
+ MetaVarName<"<glob>">,
+ HelpText<"Exclude private header from parsing">;
+def exclude_private_header_EQ : Joined<["--"], "exclude-private-header=">,
+ Alias<exclude_private_header>;
+def public_umbrella_header : Separate<["-"], "public-umbrella-header">,
+ MetaVarName<"<path>">, HelpText<"Specify the public umbrella header location">;
+def public_umbrella_header_EQ : Joined<["--"], "public-umbrella-header=">,
+ Alias<public_umbrella_header>;
+def private_umbrella_header : Separate<["-"], "private-umbrella-header">,
+ MetaVarName<"<path>">, HelpText<"Specify the private umbrella header location">;
+def private_umbrella_header_EQ : Joined<["--"], "private-umbrella-header=">,
+ Alias<private_umbrella_header>;
+def project_umbrella_header : Separate<["-"], "project-umbrella-header">,
+ MetaVarName<"<path>">, HelpText<"Specify the project umbrella header location">;
+def project_umbrella_header_EQ : Joined<["--"], "project-umbrella-header=">,
+ Alias<project_umbrella_header>;
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index b8696bb7896d..c4f39b7c8417 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -10,6 +10,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/InstallAPI/FileList.h"
+#include "clang/InstallAPI/HeaderFile.h"
#include "clang/InstallAPI/InstallAPIDiagnostic.h"
#include "llvm/Support/Program.h"
#include "llvm/TargetParser/Host.h"
@@ -181,6 +182,26 @@ bool Options::processFrontendOptions(InputArgList &Args) {
return true;
}
+bool Options::addFilePaths(InputArgList &Args, PathSeq &Headers,
+ OptSpecifier ID) {
+ for (const StringRef Path : Args.getAllArgValues(ID)) {
+ if ((bool)FM->getDirectory(Path, /*CacheFailure=*/false)) {
+ auto InputHeadersOrErr = enumerateFiles(*FM, Path);
+ if (!InputHeadersOrErr) {
+ Diags->Report(diag::err_cannot_open_file)
+ << Path << toString(InputHeadersOrErr.takeError());
+ return false;
+ }
+ // Sort headers to ensure deterministic behavior.
+ sort(*InputHeadersOrErr);
+ for (std::string &H : *InputHeadersOrErr)
+ Headers.emplace_back(std::move(H));
+ } else
+ Headers.emplace_back(Path);
+ }
+ return true;
+}
+
std::vector<const char *>
Options::processAndFilterOutInstallAPIOptions(ArrayRef<const char *> Args) {
std::unique_ptr<llvm::opt::OptTable> Table;
@@ -220,6 +241,48 @@ Options::processAndFilterOutInstallAPIOptions(ArrayRef<const char *> Args) {
if (const Arg *A = ParsedArgs.getLastArg(OPT_verify_against))
DriverOpts.DylibToVerify = A->getValue();
+ if (const Arg *A = ParsedArgs.getLastArg(OPT_dsym))
+ DriverOpts.DSYMPath = A->getValue();
+
+ // Handle exclude & extra header directories or files.
+ auto handleAdditionalInputArgs = [&](PathSeq &Headers,
+ clang::installapi::ID OptID) {
+ if (ParsedArgs.hasArgNoClaim(OptID))
+ Headers.clear();
+ return addFilePaths(ParsedArgs, Headers, OptID);
+ };
+
+ if (!handleAdditionalInputArgs(DriverOpts.ExtraPublicHeaders,
+ OPT_extra_public_header))
+ return {};
+
+ if (!handleAdditionalInputArgs(DriverOpts.ExtraPrivateHeaders,
+ OPT_extra_private_header))
+ return {};
+ if (!handleAdditionalInputArgs(DriverOpts.ExtraProjectHeaders,
+ OPT_extra_project_header))
+ return {};
+
+ if (!handleAdditionalInputArgs(DriverOpts.ExcludePublicHeaders,
+ OPT_exclude_public_header))
+ return {};
+ if (!handleAdditionalInputArgs(DriverOpts.ExcludePrivateHeaders,
+ OPT_exclude_private_header))
+ return {};
+ if (!handleAdditionalInputArgs(DriverOpts.ExcludeProjectHeaders,
+ OPT_exclude_project_header))
+ return {};
+
+ // Handle umbrella headers.
+ if (const Arg *A = ParsedArgs.getLastArg(OPT_public_umbrella_header))
+ DriverOpts.PublicUmbrellaHeader = A->getValue();
+
+ if (const Arg *A = ParsedArgs.getLastArg(OPT_private_umbrella_header))
+ DriverOpts.PrivateUmbrellaHeader = A->getValue();
+
+ if (const Arg *A = ParsedArgs.getLastArg(OPT_project_umbrella_header))
+ DriverOpts.ProjectUmbrellaHeader = A->getValue();
+
/// Any unclaimed arguments should be forwarded to the clang driver.
std::vector<const char *> ClangDriverArgs(ParsedArgs.size());
for (const Arg *A : ParsedArgs) {
@@ -273,6 +336,15 @@ Options::Options(DiagnosticsEngine &Diag, FileManager *FM,
}
}
+static const Regex Rule("(.+)/(.+)\\.framework/");
+static StringRef getFrameworkNameFromInstallName(StringRef InstallName) {
+ SmallVector<StringRef, 3> Match;
+ Rule.match(InstallName, &Match);
+ if (Match.empty())
+ return "";
+ return Match.back();
+}
+
InstallAPIContext Options::createContext() {
InstallAPIContext Ctx;
Ctx.FM = FM;
@@ -289,6 +361,11 @@ InstallAPIContext Options::createContext() {
Ctx.OutputLoc = DriverOpts.OutputPath;
Ctx.LangMode = FEOpts.LangMode;
+ // Attempt to find umbrella headers by capturing framework name.
+ StringRef FrameworkName;
+ if (!LinkerOpts.IsDylib)
+ FrameworkName = getFrameworkNameFromInstallName(LinkerOpts.InstallName);
+
// Process inputs.
for (const std::string &ListPath : DriverOpts.FileLists) {
auto Buffer = FM->getBufferForFile(ListPath);
@@ -302,6 +379,128 @@ InstallAPIContext Options::createContext() {
return Ctx;
}
}
+ // After initial input has been processed, add any extra headers.
+ auto HandleExtraHeaders = [&](PathSeq &Headers, HeaderType Type) -> bool {
+ assert(Type != HeaderType::Unknown && "Missing header type.");
+ for (const StringRef Path : Headers) {
+ if (!FM->getOptionalFileRef(Path)) {
+ Diags->Report(diag::err_no_such_header_file) << Path << (unsigned)Type;
+ return false;
+ }
+ SmallString<PATH_MAX> FullPath(Path);
+ FM->makeAbsolutePath(FullPath);
+
+ auto IncludeName = createIncludeHeaderName(FullPath);
+ Ctx.InputHeaders.emplace_back(
+ FullPath, Type, IncludeName.has_value() ? *IncludeName : "");
+ Ctx.InputHeaders.back().setExtra();
+ }
+ return true;
+ };
+
+ if (!HandleExtraHeaders(DriverOpts.ExtraPublicHeaders, HeaderType::Public) ||
+ !HandleExtraHeaders(DriverOpts.ExtraPrivateHeaders,
+ HeaderType::Private) ||
+ !HandleExtraHeaders(DriverOpts.ExtraProjectHeaders, HeaderType::Project))
+ return Ctx;
+
+ // After all headers have been added, consider excluded headers.
+ std::vector<std::unique_ptr<HeaderGlob>> ExcludedHeaderGlobs;
+ std::set<FileEntryRef> ExcludedHeaderFiles;
+ auto ParseGlobs = [&](const PathSeq &Paths, HeaderType Type) {
+ assert(Type != HeaderType::Unknown && "Missing header type.");
+ for (const StringRef Path : Paths) {
+ auto Glob = HeaderGlob::create(Path, Type);
+ if (Glob)
+ ExcludedHeaderGlobs.emplace_back(std::move(Glob.get()));
+ else {
+ consumeError(Glob.takeError());
+ if (auto File = FM->getFileRef(Path))
+ ExcludedHeaderFiles.emplace(*File);
+ else {
+ Diags->Report(diag::err_no_such_header_file)
+ << Path << (unsigned)Type;
+ return false;
+ }
+ }
+ }
+ return true;
+ };
+
+ if (!ParseGlobs(DriverOpts.ExcludePublicHeaders, HeaderType::Public) ||
+ !ParseGlobs(DriverOpts.ExcludePrivateHeaders, HeaderType::Private) ||
+ !ParseGlobs(DriverOpts.ExcludeProjectHeaders, HeaderType::Project))
+ return Ctx;
+
+ for (HeaderFile &Header : Ctx.InputHeaders) {
+ for (auto &Glob : ExcludedHeaderGlobs)
+ if (Glob->match(Header))
+ Header.setExcluded();
+ }
+ if (!ExcludedHeaderFiles.empty()) {
+ for (HeaderFile &Header : Ctx.InputHeaders) {
+ auto FileRef = FM->getFileRef(Header.getPath());
+ if (!FileRef)
+ continue;
+ if (ExcludedHeaderFiles.count(*FileRef))
+ Header.setExcluded();
+ }
+ }
+ // Report if glob was ignored.
+ for (const auto &Glob : ExcludedHeaderGlobs)
+ if (!Glob->didMatch())
+ Diags->Report(diag::warn_glob_did_not_match) << Glob->str();
+
+ // Mark any explicit or inferred umbrella headers. If one exists, move
+ // that to the beginning of the input headers.
+ auto MarkandMoveUmbrellaInHeaders = [&](llvm::Regex &Regex,
+ HeaderType Type) -> bool {
+ auto It = find_if(Ctx.InputHeaders, [&Regex, Type](const HeaderFile &H) {
+ return (H.getType() == Type) && Regex.match(H.getPath());
+ });
+
+ if (It == Ctx.InputHeaders.end())
+ return false;
+ It->setUmbrellaHeader();
+
+ // Because there can be an umbrella header per header type,
+ // find the first non umbrella header to swap position with.
+ auto BeginPos = find_if(Ctx.InputHeaders, [](const HeaderFile &H) {
+ return !H.isUmbrellaHeader();
+ });
+ if (BeginPos != Ctx.InputHeaders.end() && BeginPos < It)
+ std::swap(*BeginPos, *It);
+ return true;
+ };
+
+ auto FindUmbrellaHeader = [&](StringRef HeaderPath, HeaderType Type) -> bool {
+ assert(Type != HeaderType::Unknown && "Missing header type.");
+ if (!HeaderPath.empty()) {
+ auto EscapedString = Regex::escape(HeaderPath);
+ Regex UmbrellaRegex(EscapedString);
+ if (!MarkandMoveUmbrellaInHeaders(UmbrellaRegex, Type)) {
+ Diags->Report(diag::err_no_such_umbrella_header_file)
+ << HeaderPath << (unsigned)Type;
+ return false;
+ }
+ } else if (!FrameworkName.empty() && (Type != HeaderType::Project)) {
+ auto UmbrellaName = "/" + Regex::escape(FrameworkName);
+ if (Type == HeaderType::Public)
+ UmbrellaName += "\\.h";
+ else
+ UmbrellaName += "[_]?Private\\.h";
+ Regex UmbrellaRegex(UmbrellaName);
+ MarkandMoveUmbrellaInHeaders(UmbrellaRegex, Type);
+ }
+ return true;
+ };
+ if (!FindUmbrellaHeader(DriverOpts.PublicUmbrellaHeader,
+ HeaderType::Public) ||
+ !FindUmbrellaHeader(DriverOpts.PrivateUmbrellaHeader,
+ HeaderType::Private) ||
+ !FindUmbrellaHeader(DriverOpts.ProjectUmbrellaHeader,
+ HeaderType::Project))
+ return Ctx;
// Parse binary dylib and initialize verifier.
if (DriverOpts.DylibToVerify.empty()) {
@@ -326,7 +525,8 @@ InstallAPIContext Options::createContext() {
}
Ctx.Verifier = std::make_unique<DylibVerifier>(
- std::move(*Slices), Diags, DriverOpts.VerifyMode, DriverOpts.Demangle);
+ std::move(*Slices), Diags, DriverOpts.VerifyMode, DriverOpts.Demangle,
+ DriverOpts.DSYMPath);
return Ctx;
}
diff --git a/clang/tools/clang-installapi/Options.h b/clang/tools/clang-installapi/Options.h
index 2beeafc86bb0..82e04b49d125 100644
--- a/clang/tools/clang-installapi/Options.h
+++ b/clang/tools/clang-installapi/Options.h
@@ -31,6 +31,33 @@ struct DriverOptions {
/// \brief Path to input file lists (JSON).
llvm::MachO::PathSeq FileLists;
+ /// \brief Path to public umbrella header.
+ std::string PublicUmbrellaHeader;
+
+ /// \brief Path to private umbrella header.
+ std::string PrivateUmbrellaHeader;
+
+ /// \brief Path to project umbrella header.
+ std::string ProjectUmbrellaHeader;
+
+ /// \brief Paths of extra public headers.
+ PathSeq ExtraPublicHeaders;
+
+ /// \brief Paths of extra private headers.
+ PathSeq ExtraPrivateHeaders;
+
+ /// \brief Paths of extra project headers.
+ PathSeq ExtraProjectHeaders;
+
+ /// \brief List of excluded public headers.
+ PathSeq ExcludePublicHeaders;
+
+ /// \brief List of excluded private headers.
+ PathSeq ExcludePrivateHeaders;
+
+ /// \brief List of excluded project headers.
+ PathSeq ExcludeProjectHeaders;
+
/// \brief Mappings of target triples & tapi targets to build for.
std::map<llvm::MachO::Target, llvm::Triple> Targets;
@@ -40,6 +67,9 @@ struct DriverOptions {
/// \brief Output path.
std::string OutputPath;
+ /// \brief DSYM path.
+ std::string DSYMPath;
+
/// \brief File encoding to print.
FileType OutFT = FileType::TBD_V5;
@@ -103,6 +133,9 @@ public:
std::vector<std::string> &getClangFrontendArgs() { return FrontendArgs; }
private:
+ bool addFilePaths(llvm::opt::InputArgList &Args, PathSeq &Headers,
+ llvm::opt::OptSpecifier ID);
+
DiagnosticsEngine *Diags;
FileManager *FM;
std::vector<std::string> FrontendArgs;
diff --git a/clang/tools/libclang/CXType.cpp b/clang/tools/libclang/CXType.cpp
index 292d524f00ab..991767dc4c49 100644
--- a/clang/tools/libclang/CXType.cpp
+++ b/clang/tools/libclang/CXType.cpp
@@ -680,6 +680,7 @@ CXCallingConv clang_getFunctionTypeCallingConv(CXType X) {
TCALLINGCONV(PreserveAll);
TCALLINGCONV(M68kRTD);
TCALLINGCONV(PreserveNone);
+ TCALLINGCONV(RISCVVectorCall);
case CC_SpirFunction: return CXCallingConv_Unexposed;
case CC_AMDGPUKernelCall: return CXCallingConv_Unexposed;
case CC_OpenCLKernel: return CXCallingConv_Unexposed;
diff --git a/clang/unittests/AST/DeclPrinterTest.cpp b/clang/unittests/AST/DeclPrinterTest.cpp
index e024c41e03b4..f2b027a25621 100644
--- a/clang/unittests/AST/DeclPrinterTest.cpp
+++ b/clang/unittests/AST/DeclPrinterTest.cpp
@@ -1386,6 +1386,79 @@ TEST(DeclPrinter, TestTemplateArgumentList16) {
ASSERT_TRUE(PrintedDeclCXX11Matches(Code, "NT2", "int NT2 = 5"));
}
+TEST(DeclPrinter, TestCXXRecordDecl17) {
+ ASSERT_TRUE(PrintedDeclCXX98Matches(
+ "template<typename T> struct Z {};"
+ "struct X {};"
+ "Z<X> A;",
+ "A", "Z<X> A",
+ [](PrintingPolicy &Policy) { Policy.SuppressTagKeyword = false; }));
+}
+
+TEST(DeclPrinter, TestCXXRecordDecl18) {
+ ASSERT_TRUE(PrintedDeclCXX98Matches(
+ "template<typename T> struct Z {};"
+ "struct X {};"
+ "Z<X> A;"
+ "template <typename T1, int>"
+ "struct Y{};"
+ "Y<Z<X>, 2> B;",
+ "B", "Y<Z<X>, 2> B",
+ [](PrintingPolicy &Policy) { Policy.SuppressTagKeyword = false; }));
+}
+
+TEST(DeclPrinter, TestCXXRecordDecl19) {
+ ASSERT_TRUE(PrintedDeclCXX98Matches(
+ "template<typename T> struct Z {};"
+ "struct X {};"
+ "Z<X> A;"
+ "template <typename T1, int>"
+ "struct Y{};"
+ "Y<Z<X>, 2> B;",
+ "B", "Y<Z<X>, 2> B",
+ [](PrintingPolicy &Policy) { Policy.SuppressTagKeyword = true; }));
+}
+
+TEST(DeclPrinter, TestCXXRecordDecl20) {
+ ASSERT_TRUE(PrintedDeclCXX98Matches(
+ "template <typename T, int N> class Inner;"
+ "template <typename T, int N>"
+ "class Inner{Inner(T val){}};"
+ "template <class InnerClass, int N> class Outer {"
+ "public:"
+ "struct NestedStruct {"
+ "int nestedValue;"
+ "NestedStruct(int val) : nestedValue(val) {}"
+ "};"
+ "InnerClass innerInstance;"
+ "Outer(const InnerClass &inner) : innerInstance(inner) {}"
+ "};"
+ "Outer<Inner<int, 10>, 5>::NestedStruct nestedInstance(100);",
+ "nestedInstance",
+ "Outer<Inner<int, 10>, 5>::NestedStruct nestedInstance(100)",
+ [](PrintingPolicy &Policy) { Policy.SuppressTagKeyword = false; }));
+}
+
+TEST(DeclPrinter, TestCXXRecordDecl21) {
+ ASSERT_TRUE(PrintedDeclCXX98Matches(
+ "template <typename T, int N> class Inner;"
+ "template <typename T, int N>"
+ "class Inner{Inner(T val){}};"
+ "template <class InnerClass, int N> class Outer {"
+ "public:"
+ "struct NestedStruct {"
+ "int nestedValue;"
+ "NestedStruct(int val) : nestedValue(val) {}"
+ "};"
+ "InnerClass innerInstance;"
+ "Outer(const InnerClass &inner) : innerInstance(inner) {}"
+ "};"
+ "Outer<Inner<int, 10>, 5>::NestedStruct nestedInstance(100);",
+ "nestedInstance",
+ "Outer<Inner<int, 10>, 5>::NestedStruct nestedInstance(100)",
+ [](PrintingPolicy &Policy) { Policy.SuppressTagKeyword = true; }));
+}
+
TEST(DeclPrinter, TestFunctionParamUglified) {
llvm::StringLiteral Code = R"cpp(
class __c;
diff --git a/clang/unittests/AST/DeclTest.cpp b/clang/unittests/AST/DeclTest.cpp
index cef0f8711416..2530ce74eb6a 100644
--- a/clang/unittests/AST/DeclTest.cpp
+++ b/clang/unittests/AST/DeclTest.cpp
@@ -429,7 +429,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator new"),
Ctx));
ASSERT_TRUE(SizedOperatorNew->getOwningModule());
- EXPECT_TRUE(SizedOperatorNew->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(SizedOperatorNew->isFromExplicitGlobalModule());
// void* operator new(std::size_t, std::align_val_t);
auto *SizedAlignedOperatorNew = selectFirst<FunctionDecl>(
@@ -441,7 +441,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator new"),
Ctx));
ASSERT_TRUE(SizedAlignedOperatorNew->getOwningModule());
- EXPECT_TRUE(SizedAlignedOperatorNew->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(SizedAlignedOperatorNew->isFromExplicitGlobalModule());
// void* operator new[](std::size_t);
auto *SizedArrayOperatorNew = selectFirst<FunctionDecl>(
@@ -451,7 +451,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator new[]"),
Ctx));
ASSERT_TRUE(SizedArrayOperatorNew->getOwningModule());
- EXPECT_TRUE(SizedArrayOperatorNew->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(SizedArrayOperatorNew->isFromExplicitGlobalModule());
// void* operator new[](std::size_t, std::align_val_t);
auto *SizedAlignedArrayOperatorNew = selectFirst<FunctionDecl>(
@@ -464,7 +464,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
Ctx));
ASSERT_TRUE(SizedAlignedArrayOperatorNew->getOwningModule());
EXPECT_TRUE(
- SizedAlignedArrayOperatorNew->getOwningModule()->isGlobalModule());
+ SizedAlignedArrayOperatorNew->isFromExplicitGlobalModule());
// void operator delete(void*) noexcept;
auto *Delete = selectFirst<FunctionDecl>(
@@ -475,7 +475,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator delete"),
Ctx));
ASSERT_TRUE(Delete->getOwningModule());
- EXPECT_TRUE(Delete->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(Delete->isFromExplicitGlobalModule());
// void operator delete(void*, std::align_val_t) noexcept;
auto *AlignedDelete = selectFirst<FunctionDecl>(
@@ -487,7 +487,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator delete"),
Ctx));
ASSERT_TRUE(AlignedDelete->getOwningModule());
- EXPECT_TRUE(AlignedDelete->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(AlignedDelete->isFromExplicitGlobalModule());
// Sized deallocation is not enabled by default. So we skip it here.
@@ -500,7 +500,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator delete[]"),
Ctx));
ASSERT_TRUE(ArrayDelete->getOwningModule());
- EXPECT_TRUE(ArrayDelete->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(ArrayDelete->isFromExplicitGlobalModule());
// void operator delete[](void*, std::align_val_t) noexcept;
auto *AlignedArrayDelete = selectFirst<FunctionDecl>(
@@ -512,7 +512,7 @@ TEST(Decl, ImplicitlyDeclaredAllocationFunctionsInModules) {
.bind("operator delete[]"),
Ctx));
ASSERT_TRUE(AlignedArrayDelete->getOwningModule());
- EXPECT_TRUE(AlignedArrayDelete->getOwningModule()->isGlobalModule());
+ EXPECT_TRUE(AlignedArrayDelete->isFromExplicitGlobalModule());
}
TEST(Decl, TemplateArgumentDefaulted) {
diff --git a/clang/unittests/Analysis/FlowSensitive/DeterminismTest.cpp b/clang/unittests/Analysis/FlowSensitive/DeterminismTest.cpp
index e794bd4943f2..a2cbfb1ff582 100644
--- a/clang/unittests/Analysis/FlowSensitive/DeterminismTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/DeterminismTest.cpp
@@ -30,7 +30,9 @@ namespace clang::dataflow {
// flow-condition at function exit.
std::string analyzeAndPrintExitCondition(llvm::StringRef Code) {
DataflowAnalysisContext DACtx(std::make_unique<WatchedLiteralsSolver>());
- clang::TestAST AST(Code);
+ TestInputs Inputs(Code);
+ Inputs.Language = TestLanguage::Lang_CXX17;
+ clang::TestAST AST(Inputs);
const auto *Target =
cast<FunctionDecl>(test::findValueDecl(AST.context(), "target"));
Environment InitEnv(DACtx, *Target);
diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
index 1d3b268976a7..ca055a462a28 100644
--- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp
@@ -17,6 +17,7 @@
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/Testing/TestAST.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Testing/Support/Error.h"
@@ -135,12 +136,32 @@ const Formula &getFormula(const ValueDecl &D, const Environment &Env) {
}
TEST(TransferTest, CNotSupported) {
- std::string Code = R"(
- void target() {}
- )";
- ASSERT_THAT_ERROR(checkDataflowWithNoopAnalysis(
- Code, [](const auto &, auto &) {}, {BuiltinOptions{}},
- LangStandard::lang_c89),
+ TestInputs Inputs("void target() {}");
+ Inputs.Language = TestLanguage::Lang_C89;
+ clang::TestAST AST(Inputs);
+ const auto *Target =
+ cast<FunctionDecl>(test::findValueDecl(AST.context(), "target"));
+ ASSERT_THAT_ERROR(AdornedCFG::build(*Target).takeError(),
+ llvm::FailedWithMessage("Can only analyze C++"));
+}
+
+TEST(TransferTest, ObjectiveCNotSupported) {
+ TestInputs Inputs("void target() {}");
+ Inputs.Language = TestLanguage::Lang_OBJC;
+ clang::TestAST AST(Inputs);
+ const auto *Target =
+ cast<FunctionDecl>(test::findValueDecl(AST.context(), "target"));
+ ASSERT_THAT_ERROR(AdornedCFG::build(*Target).takeError(),
+ llvm::FailedWithMessage("Can only analyze C++"));
+}
+
+TEST(TransferTest, ObjectiveCXXNotSupported) {
+ TestInputs Inputs("void target() {}");
+ Inputs.Language = TestLanguage::Lang_OBJCXX;
+ clang::TestAST AST(Inputs);
+ const auto *Target =
+ cast<FunctionDecl>(test::findValueDecl(AST.context(), "target"));
+ ASSERT_THAT_ERROR(AdornedCFG::build(*Target).takeError(),
llvm::FailedWithMessage("Can only analyze C++"));
}
diff --git a/clang/unittests/Analysis/FlowSensitive/UncheckedOptionalAccessModelTest.cpp b/clang/unittests/Analysis/FlowSensitive/UncheckedOptionalAccessModelTest.cpp
index 9430730004db..f16472ef1714 100644
--- a/clang/unittests/Analysis/FlowSensitive/UncheckedOptionalAccessModelTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/UncheckedOptionalAccessModelTest.cpp
@@ -2798,6 +2798,59 @@ TEST_P(UncheckedOptionalAccessTest, OptionalValueOptional) {
)");
}
+TEST_P(UncheckedOptionalAccessTest, NestedOptionalAssignValue) {
+ ExpectDiagnosticsFor(
+ R"(
+ #include "unchecked_optional_access_test.h"
+
+ using OptionalInt = $ns::$optional<int>;
+
+ void target($ns::$optional<OptionalInt> opt) {
+ if (!opt) return;
+
+ // Accessing the outer optional is OK now.
+ *opt;
+
+ // But accessing the nested optional is still unsafe because we haven't
+ // checked it.
+ **opt; // [[unsafe]]
+
+ *opt = 1;
+
+ // Accessing the nested optional is safe after assigning a value to it.
+ **opt;
+ }
+ )");
+}
+
+TEST_P(UncheckedOptionalAccessTest, NestedOptionalAssignOptional) {
+ ExpectDiagnosticsFor(
+ R"(
+ #include "unchecked_optional_access_test.h"
+
+ using OptionalInt = $ns::$optional<int>;
+
+ void target($ns::$optional<OptionalInt> opt) {
+ if (!opt) return;
+
+ // Accessing the outer optional is OK now.
+ *opt;
+
+ // But accessing the nested optional is still unsafe because we haven't
+ // checked it.
+ **opt; // [[unsafe]]
+
+ // Assign from `optional<short>` so that we trigger conversion assignment
+ // instead of move assignment.
+ *opt = $ns::$optional<short>();
+
+ // Accessing the nested optional is still unsafe after assigning an empty
+ // optional to it.
+ **opt; // [[unsafe]]
+ }
+ )");
+}
+
// Tests that structs can be nested. We use an optional field because its easy
// to use in a test, but the type of the field shouldn't matter.
TEST_P(UncheckedOptionalAccessTest, OptionalValueStruct) {
@@ -3443,6 +3496,22 @@ TEST_P(UncheckedOptionalAccessTest, ClassDerivedPrivatelyFromOptional) {
ast_matchers::hasName("Method"));
}
+TEST_P(UncheckedOptionalAccessTest, ClassDerivedFromOptionalValueConstructor) {
+ ExpectDiagnosticsFor(R"(
+ #include "unchecked_optional_access_test.h"
+
+ struct Derived : public $ns::$optional<int> {
+ Derived(int);
+ };
+
+ void target(Derived opt) {
+ *opt; // [[unsafe]]
+ opt = 1;
+ *opt;
+ }
+ )");
+}
+
// FIXME: Add support for:
// - constructors (copy, move)
// - assignment operators (default, copy, move)
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index bea989c8c306..33dec7dae319 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -3621,8 +3621,8 @@ TEST_F(FormatTest, FormatsClasses) {
" : public aaaaaaaaaaaaaaaaaaa<aaaaaaaaaaaaaaaaaaaaa,\n"
" aaaaaaaaaaaaaaaaaaaaaa> {};");
verifyFormat("template <class R, class C>\n"
- "struct Aaaaaaaaaaaaaaaaa<R (C::*)(int) const>\n"
- " : Aaaaaaaaaaaaaaaaa<R (C::*)(int)> {};");
+ "struct Aaaaaaaaaaaaaaaaa<R (C:: *)(int) const>\n"
+ " : Aaaaaaaaaaaaaaaaa<R (C:: *)(int)> {};");
verifyFormat("class ::A::B {};");
}
@@ -11034,10 +11034,10 @@ TEST_F(FormatTest, UnderstandsBinaryOperators) {
}
TEST_F(FormatTest, UnderstandsPointersToMembers) {
- verifyFormat("int A::*x;");
- verifyFormat("int (S::*func)(void *);");
- verifyFormat("void f() { int (S::*func)(void *); }");
- verifyFormat("typedef bool *(Class::*Member)() const;");
+ verifyFormat("int A:: *x;");
+ verifyFormat("int (S:: *func)(void *);");
+ verifyFormat("void f() { int (S:: *func)(void *); }");
+ verifyFormat("typedef bool *(Class:: *Member)() const;");
verifyFormat("void f() {\n"
" (a->*f)();\n"
" a->*x;\n"
@@ -11052,9 +11052,19 @@ TEST_F(FormatTest, UnderstandsPointersToMembers) {
verifyFormat(
"(aaaaaaaaaa->*bbbbbbb)(\n"
" aaaaaaaaaaaaaaaaaaaaaaaaaaa(aaaaaaaaaaaaaaaaaaaaaaaaaaa));");
+
FormatStyle Style = getLLVMStyle();
+ EXPECT_EQ(Style.PointerAlignment, FormatStyle::PAS_Right);
+ verifyFormat("typedef bool *(Class:: *Member)() const;", Style);
+ verifyFormat("void f(int A:: *p) { int A:: *v = &A::B; }", Style);
+
Style.PointerAlignment = FormatStyle::PAS_Left;
- verifyFormat("typedef bool* (Class::*Member)() const;", Style);
+ verifyFormat("typedef bool* (Class::* Member)() const;", Style);
+ verifyFormat("void f(int A::* p) { int A::* v = &A::B; }", Style);
+
+ Style.PointerAlignment = FormatStyle::PAS_Middle;
+ verifyFormat("typedef bool * (Class:: * Member)() const;", Style);
+ verifyFormat("void f(int A:: * p) { int A:: * v = &A::B; }", Style);
}
TEST_F(FormatTest, UnderstandsUnaryOperators) {
@@ -12065,6 +12075,7 @@ TEST_F(FormatTest, UnderstandsSquareAttributes) {
verifyFormat("SomeType s [[gnu::unused]] (InitValue);");
verifyFormat("SomeType s [[using gnu: unused]] (InitValue);");
verifyFormat("[[gsl::suppress(\"clang-tidy-check-name\")]] void f() {}");
+ verifyFormat("[[suppress(type.5)]] int uninitialized_on_purpose;");
verifyFormat("void f() [[deprecated(\"so sorry\")]];");
verifyFormat("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n"
" [[unused]] aaaaaaaaaaaaaaaaaaaaaaa(int i);");
@@ -12386,7 +12397,7 @@ TEST_F(FormatTest, FormatsFunctionTypes) {
verifyFormat("int (*func)(void *);");
verifyFormat("void f() { int (*func)(void *); }");
verifyFormat("template <class CallbackClass>\n"
- "using MyCallback = void (CallbackClass::*)(SomeObject *Data);");
+ "using Callback = void (CallbackClass:: *)(SomeObject *Data);");
verifyGoogleFormat("A<void*(int*, SomeType*)>;");
verifyGoogleFormat("void* (*a)(int);");
@@ -19056,6 +19067,11 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
verifyFormat("int a(int x);\n"
"double b();",
Alignment);
+ verifyFormat("int a(const Test & = Test());\n"
+ "int a1(int &foo, const Test & = Test());\n"
+ "int a2(int &foo, const Test &name = Test());\n"
+ "double b();",
+ Alignment);
verifyFormat("struct Test {\n"
" Test(const Test &) = default;\n"
" ~Test() = default;\n"
@@ -19092,6 +19108,13 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
" int x,\n"
" bool y);",
Alignment);
+ // Set ColumnLimit low so that we break the argument list in multiple lines.
+ Alignment.ColumnLimit = 35;
+ verifyFormat("int a3(SomeTypeName1 &x,\n"
+ " SomeTypeName2 &y,\n"
+ " const Test & = Test());\n"
+ "double b();",
+ Alignment);
Alignment.ColumnLimit = OldColumnLimit;
// Ensure function pointers don't screw up recursive alignment
verifyFormat("int a(int x, void (*fp)(int y));\n"
@@ -19149,13 +19172,13 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
"int bbbbbbb = 0;",
Alignment);
// http://llvm.org/PR68079
- verifyFormat("using Fn = int (A::*)();\n"
- "using RFn = int (A::*)() &;\n"
- "using RRFn = int (A::*)() &&;",
+ verifyFormat("using Fn = int (A:: *)();\n"
+ "using RFn = int (A:: *)() &;\n"
+ "using RRFn = int (A:: *)() &&;",
Alignment);
- verifyFormat("using Fn = int (A::*)();\n"
- "using RFn = int *(A::*)() &;\n"
- "using RRFn = double (A::*)() &&;",
+ verifyFormat("using Fn = int (A:: *)();\n"
+ "using RFn = int *(A:: *)() &;\n"
+ "using RRFn = double (A:: *)() &&;",
Alignment);
// PAS_Right
@@ -19277,6 +19300,10 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
"int foobar;",
AlignmentLeft);
+ verifyFormat("int a(SomeType& foo, const Test& = Test());\n"
+ "double b();",
+ AlignmentLeft);
+
// PAS_Middle
FormatStyle AlignmentMiddle = Alignment;
AlignmentMiddle.PointerAlignment = FormatStyle::PAS_Middle;
@@ -19337,6 +19364,10 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) {
"int foobar;",
AlignmentMiddle);
+ verifyFormat("int a(SomeType & foo, const Test & = Test());\n"
+ "double b();",
+ AlignmentMiddle);
+
Alignment.AlignConsecutiveAssignments.Enabled = false;
Alignment.AlignEscapedNewlines = FormatStyle::ENAS_DontAlign;
verifyFormat("#define A \\\n"
@@ -21090,7 +21121,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresRightAlignment) {
" [0] = {1, 1},\n"
" [1] { 1, 1, },\n"
" [2] { 1, 1, },\n"
- "};");
+ "};",
+ Style);
+ verifyNoCrash("test arr[] = {\n"
+ "#define FOO(i) {i, i},\n"
+ "SOME_GENERATOR(FOO)\n"
+ "{2, 2}\n"
+ "};",
+ Style);
verifyFormat("return GradForUnaryCwise(g, {\n"
" {{\"sign\"}, \"Sign\", "
@@ -21343,7 +21381,14 @@ TEST_F(FormatTest, CatchAlignArrayOfStructuresLeftAlignment) {
" [0] = {1, 1},\n"
" [1] { 1, 1, },\n"
" [2] { 1, 1, },\n"
- "};");
+ "};",
+ Style);
+ verifyNoCrash("test arr[] = {\n"
+ "#define FOO(i) {i, i},\n"
+ "SOME_GENERATOR(FOO)\n"
+ "{2, 2}\n"
+ "};",
+ Style);
verifyFormat("return GradForUnaryCwise(g, {\n"
" {{\"sign\"}, \"Sign\", {\"x\", "
diff --git a/clang/unittests/Format/FormatTestTableGen.cpp b/clang/unittests/Format/FormatTestTableGen.cpp
index c96866f0840f..8ca6bf97e5a6 100644
--- a/clang/unittests/Format/FormatTestTableGen.cpp
+++ b/clang/unittests/Format/FormatTestTableGen.cpp
@@ -411,6 +411,38 @@ TEST_F(FormatTestTableGen, DAGArgBreakAll) {
Style);
}
+TEST_F(FormatTestTableGen, DAGArgAlignment) {
+ FormatStyle Style = getGoogleStyle(FormatStyle::LK_TableGen);
+ Style.ColumnLimit = 60;
+ Style.TableGenBreakInsideDAGArg = FormatStyle::DAS_BreakAll;
+ Style.TableGenBreakingDAGArgOperators = {"ins", "outs"};
+ verifyFormat("def Def : Parent {\n"
+ " let dagarg = (ins\n"
+ " a:$src1,\n"
+ " aa:$src2,\n"
+ " aaa:$src3\n"
+ " )\n"
+ "}\n",
+ Style);
+ verifyFormat("def Def : Parent {\n"
+ " let dagarg = (not a:$src1, aa:$src2, aaa:$src2)\n"
+ "}\n",
+ Style);
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons.Enabled = true;
+ verifyFormat("def Def : Parent {\n"
+ " let dagarg = (ins\n"
+ " a :$src1,\n"
+ " aa :$src2,\n"
+ " aaa:$src3\n"
+ " )\n"
+ "}\n",
+ Style);
+ verifyFormat("def Def : Parent {\n"
+ " let dagarg = (not a:$src1, aa:$src2, aaa:$src2)\n"
+ "}\n",
+ Style);
+}
+
TEST_F(FormatTestTableGen, CondOperatorAlignment) {
FormatStyle Style = getGoogleStyle(FormatStyle::LK_TableGen);
Style.ColumnLimit = 60;
diff --git a/clang/unittests/Format/QualifierFixerTest.cpp b/clang/unittests/Format/QualifierFixerTest.cpp
index 43476aea6633..792d8f3c3a98 100644
--- a/clang/unittests/Format/QualifierFixerTest.cpp
+++ b/clang/unittests/Format/QualifierFixerTest.cpp
@@ -305,7 +305,7 @@ TEST_F(QualifierFixerTest, RightQualifier) {
verifyFormat("Foo inline static const;", "Foo inline const static;", Style);
verifyFormat("Foo inline static const;", Style);
- verifyFormat("Foo<T volatile>::Bar<Type const, 5> const volatile A::*;",
+ verifyFormat("Foo<T volatile>::Bar<Type const, 5> const volatile A:: *;",
"volatile const Foo<volatile T>::Bar<const Type, 5> A::*;",
Style);
@@ -523,14 +523,15 @@ TEST_F(QualifierFixerTest, RightQualifier) {
verifyFormat("const INTPTR a;", Style);
// Pointers to members
- verifyFormat("int S::*a;", Style);
- verifyFormat("int const S::*a;", "const int S:: *a;", Style);
- verifyFormat("int const S::*const a;", "const int S::* const a;", Style);
- verifyFormat("int A::*const A::*p1;", Style);
- verifyFormat("float (C::*p)(int);", Style);
- verifyFormat("float (C::*const p)(int);", Style);
- verifyFormat("float (C::*p)(int) const;", Style);
- verifyFormat("float const (C::*p)(int);", "const float (C::*p)(int);", Style);
+ verifyFormat("int S:: *a;", Style);
+ verifyFormat("int const S:: *a;", "const int S:: *a;", Style);
+ verifyFormat("int const S:: *const a;", "const int S::* const a;", Style);
+ verifyFormat("int A:: *const A:: *p1;", Style);
+ verifyFormat("float (C:: *p)(int);", Style);
+ verifyFormat("float (C:: *const p)(int);", Style);
+ verifyFormat("float (C:: *p)(int) const;", Style);
+ verifyFormat("float const (C:: *p)(int);", "const float (C::*p)(int);",
+ Style);
}
TEST_F(QualifierFixerTest, LeftQualifier) {
@@ -830,14 +831,15 @@ TEST_F(QualifierFixerTest, LeftQualifier) {
verifyFormat("INTPTR const a;", Style);
// Pointers to members
- verifyFormat("int S::*a;", Style);
- verifyFormat("const int S::*a;", "int const S:: *a;", Style);
- verifyFormat("const int S::*const a;", "int const S::* const a;", Style);
- verifyFormat("int A::*const A::*p1;", Style);
- verifyFormat("float (C::*p)(int);", Style);
- verifyFormat("float (C::*const p)(int);", Style);
- verifyFormat("float (C::*p)(int) const;", Style);
- verifyFormat("const float (C::*p)(int);", "float const (C::*p)(int);", Style);
+ verifyFormat("int S:: *a;", Style);
+ verifyFormat("const int S:: *a;", "int const S:: *a;", Style);
+ verifyFormat("const int S:: *const a;", "int const S::* const a;", Style);
+ verifyFormat("int A:: *const A:: *p1;", Style);
+ verifyFormat("float (C:: *p)(int);", Style);
+ verifyFormat("float (C:: *const p)(int);", Style);
+ verifyFormat("float (C:: *p)(int) const;", Style);
+ verifyFormat("const float (C:: *p)(int);", "float const (C::*p)(int);",
+ Style);
}
TEST_F(QualifierFixerTest, ConstVolatileQualifiersOrder) {
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 1aa855b34198..2539d3d76ef0 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -2424,6 +2424,22 @@ TEST_F(TokenAnnotatorTest, UnderstandTableGenTokens) {
EXPECT_TOKEN(Tokens[1], tok::identifier, TT_Unknown); // other
EXPECT_TOKEN(Tokens[5], tok::comma, TT_TableGenDAGArgListComma);
EXPECT_TOKEN(Tokens[9], tok::r_paren, TT_TableGenDAGArgCloser);
+
+ // If TableGenBreakingDAGArgOperators is enabled, it uses
+ // TT_TableGenDAGArgListColonToAlign to annotate the colon to align.
+ Style.AlignConsecutiveTableGenBreakingDAGArgColons.Enabled = true;
+ Tokens = AnnotateValue("(ins type1:$src1, type2:$src2)");
+ ASSERT_EQ(Tokens.size(), 10u) << Tokens;
+ EXPECT_TOKEN(Tokens[1], tok::identifier,
+ TT_TableGenDAGArgOperatorToBreak); // ins
+ EXPECT_TOKEN(Tokens[3], tok::colon, TT_TableGenDAGArgListColonToAlign);
+ EXPECT_TOKEN(Tokens[7], tok::colon, TT_TableGenDAGArgListColonToAlign);
+
+ Tokens = AnnotateValue("(other type1:$src1, type2:$src2)");
+ ASSERT_EQ(Tokens.size(), 10u) << Tokens;
+ EXPECT_TOKEN(Tokens[1], tok::identifier, TT_Unknown); // other
+ EXPECT_TOKEN(Tokens[3], tok::colon, TT_TableGenDAGArgListColon);
+ EXPECT_TOKEN(Tokens[7], tok::colon, TT_TableGenDAGArgListColon);
}
TEST_F(TokenAnnotatorTest, UnderstandConstructors) {
diff --git a/clang/unittests/Interpreter/CMakeLists.txt b/clang/unittests/Interpreter/CMakeLists.txt
index b56e1e21015d..e5a77e77de75 100644
--- a/clang/unittests/Interpreter/CMakeLists.txt
+++ b/clang/unittests/Interpreter/CMakeLists.txt
@@ -1,6 +1,7 @@
set(LLVM_LINK_COMPONENTS
${LLVM_TARGETS_TO_BUILD}
Core
+ MC
OrcJIT
Support
TargetParser
diff --git a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
index b7708616fd24..1ba865a79ed7 100644
--- a/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterExtensionsTest.cpp
@@ -18,14 +18,22 @@
#include "clang/Sema/Sema.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Threading.h"
#include "llvm/Testing/Support/Error.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+
#include <system_error>
+#if defined(_AIX)
+#define CLANG_INTERPRETER_PLATFORM_CANNOT_CREATE_LLJIT
+#endif
+
using namespace clang;
namespace {
@@ -37,10 +45,23 @@ static bool HostSupportsJit() {
return false;
}
+// Some tests require a arm-registered-target
+static bool IsARMTargetRegistered() {
+ llvm::Triple TT;
+ TT.setArch(llvm::Triple::arm);
+ TT.setVendor(llvm::Triple::UnknownVendor);
+ TT.setOS(llvm::Triple::UnknownOS);
+
+ std::string UnusedErr;
+ return llvm::TargetRegistry::lookupTarget(TT.str(), UnusedErr);
+}
+
struct LLVMInitRAII {
LLVMInitRAII() {
- llvm::InitializeNativeTarget();
- llvm::InitializeNativeTargetAsmPrinter();
+ llvm::InitializeAllTargets();
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmPrinters();
}
~LLVMInitRAII() { llvm::llvm_shutdown(); }
} LLVMInit;
@@ -51,12 +72,30 @@ public:
llvm::Error &Err)
: Interpreter(std::move(CI), Err) {}
- llvm::Error testCreateExecutor() { return Interpreter::CreateExecutor(); }
+ llvm::Error testCreateJITBuilderError() {
+ JB = nullptr;
+ return Interpreter::CreateExecutor();
+ }
+
+ llvm::Error testCreateExecutor() {
+ JB = std::make_unique<llvm::orc::LLJITBuilder>();
+ return Interpreter::CreateExecutor();
+ }
void resetExecutor() { Interpreter::ResetExecutor(); }
+
+private:
+ llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+ CreateJITBuilder(CompilerInstance &CI) override {
+ if (JB)
+ return std::move(JB);
+ return llvm::make_error<llvm::StringError>("TestError", std::error_code());
+ }
+
+ std::unique_ptr<llvm::orc::LLJITBuilder> JB;
};
-#ifdef _AIX
+#ifdef CLANG_INTERPRETER_PLATFORM_CANNOT_CREATE_LLJIT
TEST(InterpreterExtensionsTest, DISABLED_ExecutorCreateReset) {
#else
TEST(InterpreterExtensionsTest, ExecutorCreateReset) {
@@ -69,6 +108,8 @@ TEST(InterpreterExtensionsTest, ExecutorCreateReset) {
llvm::Error ErrOut = llvm::Error::success();
TestCreateResetExecutor Interp(cantFail(CB.CreateCpp()), ErrOut);
cantFail(std::move(ErrOut));
+ EXPECT_THAT_ERROR(Interp.testCreateJITBuilderError(),
+ llvm::FailedWithMessage("TestError"));
cantFail(Interp.testCreateExecutor());
Interp.resetExecutor();
cantFail(Interp.testCreateExecutor());
@@ -126,4 +167,96 @@ TEST(InterpreterExtensionsTest, FindRuntimeInterface) {
EXPECT_EQ(1U, Interp.RuntimeIBPtr->TransformerQueries);
}
+class CustomJBInterpreter : public Interpreter {
+ using CustomJITBuilderCreatorFunction =
+ std::function<llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>()>;
+ CustomJITBuilderCreatorFunction JBCreator = nullptr;
+
+public:
+ CustomJBInterpreter(std::unique_ptr<CompilerInstance> CI, llvm::Error &ErrOut)
+ : Interpreter(std::move(CI), ErrOut) {}
+
+ ~CustomJBInterpreter() override {
+ // Skip cleanUp() because it would trigger LLJIT default dtors
+ Interpreter::ResetExecutor();
+ }
+
+ void setCustomJITBuilderCreator(CustomJITBuilderCreatorFunction Fn) {
+ JBCreator = std::move(Fn);
+ }
+
+ llvm::Expected<std::unique_ptr<llvm::orc::LLJITBuilder>>
+ CreateJITBuilder(CompilerInstance &CI) override {
+ if (JBCreator)
+ return JBCreator();
+ return Interpreter::CreateJITBuilder(CI);
+ }
+
+ llvm::Error CreateExecutor() { return Interpreter::CreateExecutor(); }
+};
+
+#ifdef CLANG_INTERPRETER_PLATFORM_CANNOT_CREATE_LLJIT
+TEST(InterpreterExtensionsTest, DISABLED_DefaultCrossJIT) {
+#else
+TEST(InterpreterExtensionsTest, DefaultCrossJIT) {
+#endif
+ if (!IsARMTargetRegistered())
+ GTEST_SKIP();
+
+ IncrementalCompilerBuilder CB;
+ CB.SetTargetTriple("armv6-none-eabi");
+ auto CI = cantFail(CB.CreateCpp());
+ llvm::Error ErrOut = llvm::Error::success();
+ CustomJBInterpreter Interp(std::move(CI), ErrOut);
+ cantFail(std::move(ErrOut));
+ cantFail(Interp.CreateExecutor());
+}
+
+#ifdef CLANG_INTERPRETER_PLATFORM_CANNOT_CREATE_LLJIT
+TEST(InterpreterExtensionsTest, DISABLED_CustomCrossJIT) {
+#else
+TEST(InterpreterExtensionsTest, CustomCrossJIT) {
+#endif
+ if (!IsARMTargetRegistered())
+ GTEST_SKIP();
+
+ std::string TargetTriple = "armv6-none-eabi";
+
+ IncrementalCompilerBuilder CB;
+ CB.SetTargetTriple(TargetTriple);
+ auto CI = cantFail(CB.CreateCpp());
+ llvm::Error ErrOut = llvm::Error::success();
+ CustomJBInterpreter Interp(std::move(CI), ErrOut);
+ cantFail(std::move(ErrOut));
+
+ using namespace llvm::orc;
+ LLJIT *JIT = nullptr;
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>> Objs;
+ Interp.setCustomJITBuilderCreator([&]() {
+ auto JTMB = JITTargetMachineBuilder(llvm::Triple(TargetTriple));
+ JTMB.setCPU("cortex-m0plus");
+ auto JB = std::make_unique<LLJITBuilder>();
+ JB->setJITTargetMachineBuilder(JTMB);
+ JB->setPlatformSetUp(setUpInactivePlatform);
+ JB->setNotifyCreatedCallback([&](LLJIT &J) {
+ ObjectLayer &ObjLayer = J.getObjLinkingLayer();
+ auto *JITLinkObjLayer = llvm::dyn_cast<ObjectLinkingLayer>(&ObjLayer);
+ JITLinkObjLayer->setReturnObjectBuffer(
+ [&Objs](std::unique_ptr<llvm::MemoryBuffer> MB) {
+ Objs.push_back(std::move(MB));
+ });
+ JIT = &J;
+ return llvm::Error::success();
+ });
+ return JB;
+ });
+
+ EXPECT_EQ(0U, Objs.size());
+ cantFail(Interp.CreateExecutor());
+ cantFail(Interp.ParseAndExecute("int a = 1;"));
+ ExecutorAddr Addr = cantFail(JIT->lookup("a"));
+ EXPECT_NE(0U, Addr.getValue());
+ EXPECT_EQ(1U, Objs.size());
+}
+
} // end anonymous namespace
diff --git a/clang/unittests/Interpreter/InterpreterTest.cpp b/clang/unittests/Interpreter/InterpreterTest.cpp
index e76c0677db5e..69bc2da24288 100644
--- a/clang/unittests/Interpreter/InterpreterTest.cpp
+++ b/clang/unittests/Interpreter/InterpreterTest.cpp
@@ -340,6 +340,12 @@ TEST(InterpreterTest, Value) {
EXPECT_EQ(V1.getKind(), Value::K_Int);
EXPECT_FALSE(V1.isManuallyAlloc());
+ Value V1b;
+ llvm::cantFail(Interp->ParseAndExecute("char c = 42;"));
+ llvm::cantFail(Interp->ParseAndExecute("c", &V1b));
+ EXPECT_TRUE(V1b.getKind() == Value::K_Char_S ||
+ V1b.getKind() == Value::K_Char_U);
+
Value V2;
llvm::cantFail(Interp->ParseAndExecute("double y = 3.14;"));
llvm::cantFail(Interp->ParseAndExecute("y", &V2));
diff --git a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
index 6ff87f720a55..0c396720ece6 100644
--- a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
+++ b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
@@ -117,11 +117,6 @@ TEST_F(PPDependencyDirectivesTest, MacroGuard) {
};
auto PPOpts = std::make_shared<PreprocessorOptions>();
- PPOpts->DependencyDirectivesForFile = [&](FileEntryRef File)
- -> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
- return getDependencyDirectives(File);
- };
-
TrivialModuleLoader ModLoader;
HeaderSearch HeaderInfo(std::make_shared<HeaderSearchOptions>(), SourceMgr,
Diags, LangOpts, Target.get());
@@ -130,6 +125,12 @@ TEST_F(PPDependencyDirectivesTest, MacroGuard) {
/*OwnsHeaderSearch =*/false);
PP.Initialize(*Target);
+ PP.setDependencyDirectivesFn(
+ [&](FileEntryRef File)
+ -> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
+ return getDependencyDirectives(File);
+ });
+
SmallVector<StringRef> IncludedFiles;
PP.addPPCallbacks(std::make_unique<IncludeCollector>(PP, IncludedFiles));
PP.EnterMainSourceFile();
diff --git a/clang/unittests/StaticAnalyzer/CMakeLists.txt b/clang/unittests/StaticAnalyzer/CMakeLists.txt
index 775f0f8486b8..ff34d5747cc8 100644
--- a/clang/unittests/StaticAnalyzer/CMakeLists.txt
+++ b/clang/unittests/StaticAnalyzer/CMakeLists.txt
@@ -11,6 +11,8 @@ add_clang_unittest(StaticAnalysisTests
CallEventTest.cpp
ConflictingEvalCallsTest.cpp
FalsePositiveRefutationBRVisitorTest.cpp
+ IsCLibraryFunctionTest.cpp
+ MemRegionDescriptiveNameTest.cpp
NoStateChangeFuncVisitorTest.cpp
ParamRegionTest.cpp
RangeSetTest.cpp
diff --git a/clang/unittests/StaticAnalyzer/IsCLibraryFunctionTest.cpp b/clang/unittests/StaticAnalyzer/IsCLibraryFunctionTest.cpp
new file mode 100644
index 000000000000..d6dfcaac6f3b
--- /dev/null
+++ b/clang/unittests/StaticAnalyzer/IsCLibraryFunctionTest.cpp
@@ -0,0 +1,84 @@
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Tooling/Tooling.h"
+#include "gtest/gtest.h"
+
+#include <memory>
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+class IsCLibraryFunctionTest : public testing::Test {
+ std::unique_ptr<ASTUnit> ASTUnitP;
+ const FunctionDecl *Result = nullptr;
+
+public:
+ const FunctionDecl *getFunctionDecl() const { return Result; }
+
+ testing::AssertionResult buildAST(StringRef Code) {
+ ASTUnitP = tooling::buildASTFromCode(Code);
+ if (!ASTUnitP)
+ return testing::AssertionFailure() << "AST construction failed";
+
+ ASTContext &Context = ASTUnitP->getASTContext();
+ if (Context.getDiagnostics().hasErrorOccurred())
+ return testing::AssertionFailure() << "Compilation error";
+
+ auto Matches = ast_matchers::match(functionDecl().bind("fn"), Context);
+ if (Matches.empty())
+ return testing::AssertionFailure() << "No function declaration found";
+
+ if (Matches.size() > 1)
+ return testing::AssertionFailure()
+ << "Multiple function declarations found";
+
+ Result = Matches[0].getNodeAs<FunctionDecl>("fn");
+ return testing::AssertionSuccess();
+ }
+};
+
+TEST_F(IsCLibraryFunctionTest, AcceptsGlobal) {
+ ASSERT_TRUE(buildAST(R"cpp(void fun();)cpp"));
+ EXPECT_TRUE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, AcceptsExternCGlobal) {
+ ASSERT_TRUE(buildAST(R"cpp(extern "C" { void fun(); })cpp"));
+ EXPECT_TRUE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, RejectsNoInlineNoExternalLinkage) {
+ // Functions that are neither inlined nor externally visible cannot be C
+ // library functions.
+ ASSERT_TRUE(buildAST(R"cpp(static void fun();)cpp"));
+ EXPECT_FALSE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, RejectsAnonymousNamespace) {
+ ASSERT_TRUE(buildAST(R"cpp(namespace { void fun(); })cpp"));
+ EXPECT_FALSE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, AcceptsStdNamespace) {
+ ASSERT_TRUE(buildAST(R"cpp(namespace std { void fun(); })cpp"));
+ EXPECT_TRUE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, RejectsOtherNamespaces) {
+ ASSERT_TRUE(buildAST(R"cpp(namespace stdx { void fun(); })cpp"));
+ EXPECT_FALSE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, RejectsClassStatic) {
+ ASSERT_TRUE(buildAST(R"cpp(class A { static void fun(); };)cpp"));
+ EXPECT_FALSE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
+
+TEST_F(IsCLibraryFunctionTest, RejectsClassMember) {
+ ASSERT_TRUE(buildAST(R"cpp(class A { void fun(); };)cpp"));
+ EXPECT_FALSE(CheckerContext::isCLibraryFunction(getFunctionDecl()));
+}
diff --git a/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp b/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp
new file mode 100644
index 000000000000..ba0c4d25e13b
--- /dev/null
+++ b/clang/unittests/StaticAnalyzer/MemRegionDescriptiveNameTest.cpp
@@ -0,0 +1,145 @@
+//===- MemRegionDescriptiveNameTest.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "gtest/gtest.h"
+#include <fstream>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class DescriptiveNameChecker : public Checker<check::PreCall> {
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+ if (!HandlerFn.matches(Call))
+ return;
+
+ const MemRegion *ArgReg = Call.getArgSVal(0).getAsRegion();
+ assert(ArgReg && "expecting a location as the first argument");
+
+ auto DescriptiveName = ArgReg->getDescriptiveName(/*UseQuotes=*/false);
+ if (ExplodedNode *Node = C.generateNonFatalErrorNode(C.getState())) {
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(Bug, DescriptiveName, Node);
+ C.emitReport(std::move(Report));
+ }
+ }
+
+private:
+ const BugType Bug{this, "DescriptiveNameBug"};
+ const CallDescription HandlerFn = {{"reportDescriptiveName"}, 1};
+};
+
+void addDescriptiveNameChecker(AnalysisASTConsumer &AnalysisConsumer,
+ AnalyzerOptions &AnOpts) {
+ AnOpts.CheckersAndPackages = {{"DescriptiveNameChecker", true}};
+ AnalysisConsumer.AddCheckerRegistrationFn([](CheckerRegistry &Registry) {
+ Registry.addChecker<DescriptiveNameChecker>("DescriptiveNameChecker",
+ "Desc", "DocsURI");
+ });
+}
+
+bool runChecker(StringRef Code, std::string &Output) {
+ return runCheckerOnCode<addDescriptiveNameChecker>(Code.str(), Output,
+ /*OnlyEmitWarnings=*/true);
+}
+
+TEST(MemRegionDescriptiveNameTest, ConcreteIntElementRegionIndex) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+const unsigned int index = 1;
+extern int array[3];
+void top() {
+ reportDescriptiveName(&array[index]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ EXPECT_EQ(Output, "DescriptiveNameChecker: array[1]\n");
+}
+
+TEST(MemRegionDescriptiveNameTest, SymbolicElementRegionIndex) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+extern unsigned int index;
+extern int array[3];
+void top() {
+ reportDescriptiveName(&array[index]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ EXPECT_EQ(Output, "DescriptiveNameChecker: array[index]\n");
+}
+
+TEST(MemRegionDescriptiveNameTest, SymbolicElementRegionIndexSymbolValFails) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+extern int* ptr;
+extern int array[3];
+void top() {
+ reportDescriptiveName(&array[(long)ptr]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ EXPECT_EQ(Output, "DescriptiveNameChecker: \n");
+}
+
+TEST(MemRegionDescriptiveNameTest, SymbolicElementRegionIndexOrigRegionFails) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+extern int getInt(void);
+extern int array[3];
+void top() {
+ reportDescriptiveName(&array[getInt()]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ EXPECT_EQ(Output, "DescriptiveNameChecker: \n");
+}
+
+TEST(MemRegionDescriptiveNameTest, SymbolicElementRegionIndexDescrNameFails) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+extern int *ptr;
+extern int array[3];
+void top() {
+ reportDescriptiveName(&array[*ptr]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ EXPECT_EQ(Output, "DescriptiveNameChecker: \n");
+}
+
+TEST(MemRegionDescriptiveNameTest,
+ SymbolicElementRegionIndexIncorrectSymbolName) {
+ StringRef Code = R"cpp(
+void reportDescriptiveName(int *p);
+extern int x, y;
+extern int array[3];
+void top() {
+ y = x;
+ reportDescriptiveName(&array[y]);
+})cpp";
+
+ std::string Output;
+ ASSERT_TRUE(runChecker(Code, Output));
+ // FIXME: Should return array[y], but returns array[x] (OriginRegion).
+ EXPECT_EQ(Output, "DescriptiveNameChecker: array[x]\n");
+}
+
+} // namespace
diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp
index 3a90eee5f1c9..aa20c758d84a 100644
--- a/clang/utils/TableGen/MveEmitter.cpp
+++ b/clang/utils/TableGen/MveEmitter.cpp
@@ -575,7 +575,7 @@ public:
// Emit code to generate this result as a Value *.
std::string asValue() override {
if (AddressType)
- return "(" + varname() + ".getPointer())";
+ return "(" + varname() + ".emitRawPointer(*this))";
return Result::asValue();
}
bool hasIntegerValue() const override { return Immediate; }
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 8513174c88bf..5e41ef9f9d26 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -334,10 +334,6 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
OS << "#include <stdint.h>\n";
OS << "#include <stddef.h>\n\n";
- OS << "#ifndef __riscv_vector\n";
- OS << "#error \"Vector intrinsics require the vector extension.\"\n";
- OS << "#endif\n\n";
-
OS << "#ifdef __cplusplus\n";
OS << "extern \"C\" {\n";
OS << "#endif\n\n";
diff --git a/clang/utils/analyzer/exploded-graph-rewriter.py b/clang/utils/analyzer/exploded-graph-rewriter.py
index c7c6315a0a27..5eaa7738103f 100755
--- a/clang/utils/analyzer/exploded-graph-rewriter.py
+++ b/clang/utils/analyzer/exploded-graph-rewriter.py
@@ -479,11 +479,19 @@ class ExplodedGraph:
# A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
# syntax highlighing.
class DotDumpVisitor:
- def __init__(self, do_diffs, dark_mode, gray_mode, topo_mode, dump_dot_only):
+ def __init__(
+ self, do_diffs, dark_mode, gray_mode, topo_mode, dump_html_only, dump_dot_only
+ ):
+ assert not (dump_html_only and dump_dot_only), (
+ "Option dump_html_only and dump_dot_only are conflict, "
+ "they cannot be true at the same time."
+ )
+
self._do_diffs = do_diffs
self._dark_mode = dark_mode
self._gray_mode = gray_mode
self._topo_mode = topo_mode
+ self._dump_html_only = dump_html_only
self._dump_dot_only = dump_dot_only
self._output = []
@@ -998,6 +1006,8 @@ class DotDumpVisitor:
'<html><body bgcolor="%s">%s</body></html>'
% ("#1a1a1a" if self._dark_mode else "white", svg),
)
+ if self._dump_html_only:
+ return
if sys.platform == "win32":
os.startfile(filename)
elif sys.platform == "darwin":
@@ -1176,7 +1186,17 @@ def main():
default=False,
help="black-and-white mode",
)
- parser.add_argument(
+ dump_conflict = parser.add_mutually_exclusive_group()
+ dump_conflict.add_argument(
+ "--dump-html-only",
+ action="store_const",
+ dest="dump_html_only",
+ const=True,
+ default=False,
+ help="dump the rewritten egraph to a temporary HTML file, "
+ "but do not open it immediately as by default",
+ )
+ dump_conflict.add_argument(
"--dump-dot-only",
action="store_const",
dest="dump_dot_only",
@@ -1206,7 +1226,12 @@ def main():
explorer = BasicExplorer()
visitor = DotDumpVisitor(
- args.diff, args.dark, args.gray, args.topology, args.dump_dot_only
+ args.diff,
+ args.dark,
+ args.gray,
+ args.topology,
+ args.dump_html_only,
+ args.dump_dot_only,
)
for trimmer in trimmers:
diff --git a/clang/www/analyzer/alpha_checks.html b/clang/www/analyzer/alpha_checks.html
index 7bbe4a20288f..f040d1957b0f 100644
--- a/clang/www/analyzer/alpha_checks.html
+++ b/clang/www/analyzer/alpha_checks.html
@@ -307,26 +307,6 @@ void test(int x) {
<tbody>
-<tr><td><a id="alpha.cplusplus.ArrayDelete"><div class="namedescr expandable"><span class="name">
-alpha.cplusplus.ArrayDelete</span><span class="lang">
-(C++)</span><div class="descr">
-Reports destructions of arrays of polymorphic objects that are destructed as
-their base class
-</div></div></a></td>
-<td><div class="exampleContainer expandable">
-<div class="example"><pre>
-Base *create() {
- Base *x = new Derived[10]; // note: Casting from 'Derived' to 'Base' here
- return x;
-}
-
-void sink(Base *x) {
- delete[] x; // warn: Deleting an array of 'Derived' objects as their base class 'Base' undefined
-}
-
-</pre></div></div></td></tr>
-
-
<tr><td><a id="alpha.cplusplus.DeleteWithNonVirtualDtor"><div class="namedescr expandable"><span class="name">
alpha.cplusplus.DeleteWithNonVirtualDtor</span><span class="lang">
(C++)</span><div class="descr">
diff --git a/clang/www/analyzer/available_checks.html b/clang/www/analyzer/available_checks.html
index 501a2b306dfa..c23865e57e87 100644
--- a/clang/www/analyzer/available_checks.html
+++ b/clang/www/analyzer/available_checks.html
@@ -369,6 +369,33 @@ int test() {
<thead><tr><td>Name, Description</td><td>Example</td></tr></thead>
<tbody>
+
+<tr><td><a id="cplusplus.ArrayDelete"><div class="namedescr expandable"><span class="name">
+cplusplus.ArrayDelete</span><span class="lang">
+(C++)</span><div class="descr">
+Reports destructions of arrays of polymorphic objects that are destructed as
+their base class. If the type of an array-pointer is different from the type of
+its underlying objects, calling <code>delete[]</code> is undefined.
+</div></div></a></td>
+<td><div class="exampleContainer expandable">
+<div class="example"><pre>
+class Base {
+public:
+ virtual ~Base() {}
+};
+class Derived : public Base {};
+
+Base *create() {
+ Base *x = new Derived[10]; // note: Casting from 'Derived' to 'Base' here
+ return x;
+}
+
+void sink(Base *x) {
+ delete[] x; // warn: Deleting an array of 'Derived' objects as their base class 'Base' undefined
+}
+</pre></div></div></td></tr>
+
+
<tr><td><a id="cplusplus.NewDelete"><div class="namedescr expandable"><span class="name">
cplusplus.NewDelete</span><span class="lang">
(C++)</span><div class="descr">
diff --git a/clang/www/c_dr_status.html b/clang/www/c_dr_status.html
index fa2ceb1be58b..a41c4f717067 100644
--- a/clang/www/c_dr_status.html
+++ b/clang/www/c_dr_status.html
@@ -577,7 +577,7 @@ conformance.</p>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_087.html">87</a></td>
<td>NAD</td>
<td>Order of evaluation</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr id="88">
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_088.html">88</a></td>
@@ -1686,7 +1686,7 @@ conformance.</p>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_290.htm">290</a></td>
<td>C99</td>
<td>FLT_EVAL_METHOD and extra precision and/or range</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="none" align="center">No</td>
</tr>
<tr id="291">
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_291.htm">291</a></td>
diff --git a/clang/www/c_status.html b/clang/www/c_status.html
index b1f5ab4cbc4f..028234a8961d 100644
--- a/clang/www/c_status.html
+++ b/clang/www/c_status.html
@@ -105,7 +105,7 @@ conformance.</p>
<tr>
<td>restricted character set support via digraphs and &lt;iso646.h&gt;</td>
<td>Unknown</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td>more precise aliasing rules via effective type</td>
@@ -132,11 +132,6 @@ conformance.</p>
<td>Unknown</td>
<td class="full" align="center">Yes</td>
</tr>
- <tr>
- <td>more precise aliasing rules via effective type</td>
- <td>Unknown</td>
- <td class="unknown" align="center">Unknown</td>
- </tr>
<tr id="complex">
<td rowspan="6">complex and imaginary support in &lt;complex.h&gt;</td>
</tr>
@@ -173,7 +168,7 @@ conformance.</p>
<tr>
<td>increase minimum translation limits</td>
<td>N590</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 3.2</td>
</tr>
<tr>
<td>additional floating-point characteristics in &lt;float.h&gt;</td>
@@ -260,7 +255,7 @@ conformance.</p>
<tr>
<td>new block scopes for selection and iteration statements</td>
<td>Unknown</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td>integer constant type rules</td>
@@ -315,7 +310,13 @@ conformance.</p>
<tr>
<td>additional predefined macro names</td>
<td>Unknown</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
+ <!-- It is unknown which paper brought in this change, which was listed in
+ the C99 front matter. After hunting around for what these changes are,
+ I found a mention in the C99 rationale document that implementers who
+ wish to add their own predefined macros must not start them with
+ __STDC_, which was a new restriction in C99. As best I can tell, that
+ is what this particular feature is about. -->
</tr>
<tr>
<td>_Pragma preprocessing operator</td>
@@ -330,11 +331,11 @@ conformance.</p>
</tr>
<tr>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n631.htm">N631</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n696.ps">N696</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td>__func__ predefined identifier</td>
@@ -369,7 +370,7 @@ conformance.</p>
<tr>
<td>relaxed restrictions on portable header names</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n772.htm">N772</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td>return without an expression not permitted in function that returns a value</td>
@@ -401,7 +402,7 @@ conformance.</p>
<tr>
<td>Clarification of expressions</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n1282.pdf">N1282</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Yes</td>
</tr>
<tr>
<td>Extending the lifetime of temporary objects (factored approach)</td>
@@ -471,7 +472,7 @@ conformance.</p>
<tr>
<td>Constant expressions</td>
<td><a href="https://www.open-std.org/jtc1/sc22/wg14/www/docs/n1365.htm">N1365</a></td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 16</td>
</tr>
<tr>
<td>Contractions and expression evaluation methods</td>
diff --git a/clang/www/cxx_status.html b/clang/www/cxx_status.html
index 1e36b90356c3..c1d95dadbb27 100755
--- a/clang/www/cxx_status.html
+++ b/clang/www/cxx_status.html
@@ -163,7 +163,47 @@ C++23, informally referred to as C++26.</p>
<td><a href="https://wg21.link/P2864R2">P2864R2</a></td>
<td class="full" align="center">Clang 18</td>
</tr>
-
+ <!-- Winter 2024 papers (Tokyo) -->
+ <tr>
+ <td>Disallow Binding a Returned Glvalue to a Temporary</td>
+ <td><a href="https://wg21.link/P2748R5">P2748R5</a></td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Clarifying rules for brace elision in aggregate initialization</td>
+ <td><a href="https://wg21.link/P3106R1">P3106R1</a> (<a href="#dr">DR</a>)</td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Attributes for Structured Bindings</td>
+ <td><a href="https://wg21.link/P0609R3">P0609R3</a></td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Module Declarations Shouldn’t be Macros</td>
+ <td><a href="https://wg21.link/P3034R1">P3034R1</a> (<a href="#dr">DR</a>)</td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Trivial infinite loops are not Undefined Behavior</td>
+ <td><a href="https://wg21.link/P2809R3">P2809R3</a> (<a href="#dr">DR</a>)</td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Erroneous behaviour for uninitialized reads</td>
+ <td><a href="https://wg21.link/P2795R5">P2795R5</a></td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td><tt>= delete("should have a reason");</tt></td>
+ <td><a href="https://wg21.link/P2573R2">P2573R2</a></td>
+ <td class="none" align="center">No</td>
+ </tr>
+ <tr>
+ <td>Variadic friends</td>
+ <td><a href="https://wg21.link/P2893R3">P2893R3</a></td>
+ <td class="none" align="center">No</td>
+ </tr>
</table>
</details>
diff --git a/cmake/Modules/GetDarwinLinkerVersion.cmake b/cmake/Modules/GetDarwinLinkerVersion.cmake
new file mode 100644
index 000000000000..c27e50128586
--- /dev/null
+++ b/cmake/Modules/GetDarwinLinkerVersion.cmake
@@ -0,0 +1,19 @@
+# Get the linker version on Darwin
+function(get_darwin_linker_version variable)
+ set(LINK_VERSION)
+ set(LD_V_OUTPUT)
+ execute_process(
+ COMMAND sh -c "${CMAKE_LINKER} -v 2>&1 | head -1"
+ RESULT_VARIABLE HAD_ERROR
+ OUTPUT_VARIABLE LD_V_OUTPUT
+ )
+ if (HAD_ERROR)
+ message(FATAL_ERROR "${CMAKE_LINKER} failed with status ${HAD_ERROR}")
+ endif()
+ if ("${LD_V_OUTPUT}" MATCHES ".*ld64-([0-9.]+).*")
+ string(REGEX REPLACE ".*ld64-([0-9.]+).*" "\\1" LINK_VERSION ${LD_V_OUTPUT})
+ elseif ("${LD_V_OUTPUT}" MATCHES "[^0-9]*([0-9.]+).*")
+ string(REGEX REPLACE "[^0-9]*([0-9.]+).*" "\\1" LINK_VERSION ${LD_V_OUTPUT})
+ endif()
+ set(${variable} ${LINK_VERSION} PARENT_SCOPE)
+endfunction()
diff --git a/compiler-rt/CMakeLists.txt b/compiler-rt/CMakeLists.txt
index d562a6206c00..f4e92f14db85 100644
--- a/compiler-rt/CMakeLists.txt
+++ b/compiler-rt/CMakeLists.txt
@@ -36,6 +36,7 @@ include(SetPlatformToolchainTools)
include(base-config-ix)
include(CompilerRTUtils)
include(CMakeDependentOption)
+include(GetDarwinLinkerVersion)
option(COMPILER_RT_BUILD_BUILTINS "Build builtins" ON)
mark_as_advanced(COMPILER_RT_BUILD_BUILTINS)
@@ -444,6 +445,15 @@ else()
set(SANITIZER_USE_SYMBOLS FALSE)
endif()
+# Get the linker version while configuring compiler-rt and explicitly pass it
+# in cflags during testing. This fixes the compiler/linker version mismatch
+# issue when running a clang built with a newer Xcode in an older Xcode
+set(COMPILER_RT_DARWIN_LINKER_VERSION)
+if (APPLE AND NOT CMAKE_LINKER MATCHES ".*lld.*")
+ get_darwin_linker_version(COMPILER_RT_DARWIN_LINKER_VERSION)
+ message(STATUS "Host linker version: ${COMPILER_RT_DARWIN_LINKER_VERSION}")
+endif()
+
# Build sanitizer runtimes with debug info.
if(MSVC)
# Use /Z7 instead of /Zi for the asan runtime. This avoids the LNK4099
diff --git a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
index d5b2e7970f11..1629db18f1c2 100644
--- a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
+++ b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake
@@ -46,7 +46,7 @@ function(sanitizer_test_compile obj_list source arch)
# Write out architecture-specific flags into TARGET_CFLAGS variable.
get_target_flags_for_arch(${arch} TARGET_CFLAGS)
set(COMPILE_DEPS ${TEST_COMPILE_DEPS})
- if(NOT COMPILER_RT_STANDALONE_BUILD)
+ if(NOT COMPILER_RT_STANDALONE_BUILD OR COMPILER_RT_TEST_STANDALONE_BUILD_LIBS)
list(APPEND COMPILE_DEPS ${TEST_DEPS})
endif()
clang_compile(${output_obj} ${source}
diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake
index 4f47142850a5..46a6fdf8728f 100644
--- a/compiler-rt/cmake/config-ix.cmake
+++ b/compiler-rt/cmake/config-ix.cmake
@@ -461,7 +461,7 @@ if(APPLE)
set(ORC_SUPPORTED_OS osx)
endif()
- set(DEFAULT_SANITIZER_MIN_OSX_VERSION 10.10)
+ set(DEFAULT_SANITIZER_MIN_OSX_VERSION 10.13)
set(DARWIN_osx_MIN_VER_FLAG "-mmacosx-version-min")
if(NOT SANITIZER_MIN_OSX_VERSION)
string(REGEX MATCH "${DARWIN_osx_MIN_VER_FLAG}=([.0-9]+)"
diff --git a/compiler-rt/include/sanitizer/linux_syscall_hooks.h b/compiler-rt/include/sanitizer/linux_syscall_hooks.h
index 3f3f1e78dfb8..5f262455cb94 100644
--- a/compiler-rt/include/sanitizer/linux_syscall_hooks.h
+++ b/compiler-rt/include/sanitizer/linux_syscall_hooks.h
@@ -1856,6 +1856,15 @@
__sanitizer_syscall_pre_impl_sigaltstack((long)ss, (long)oss)
#define __sanitizer_syscall_post_sigaltstack(res, ss, oss) \
__sanitizer_syscall_post_impl_sigaltstack(res, (long)ss, (long)oss)
+#define __sanitizer_syscall_pre_futex(uaddr, futex_op, val, timeout, uaddr2, \
+ val3) \
+ __sanitizer_syscall_pre_impl_futex((long)uaddr, (long)futex_op, (long)val, \
+ (long)timeout, (long)uaddr2, (long)val3)
+#define __sanitizer_syscall_post_futex(res, uaddr, futex_op, val, timeout, \
+ uaddr2, val3) \
+ __sanitizer_syscall_post_impl_futex(res, (long)uaddr, (long)futex_op, \
+ (long)val, (long)timeout, (long)uaddr2, \
+ (long)val3)
// And now a few syscalls we don't handle yet.
#define __sanitizer_syscall_pre_afs_syscall(...)
@@ -1875,7 +1884,6 @@
#define __sanitizer_syscall_pre_fchown32(...)
#define __sanitizer_syscall_pre_ftime(...)
#define __sanitizer_syscall_pre_ftruncate64(...)
-#define __sanitizer_syscall_pre_futex(...)
#define __sanitizer_syscall_pre_getegid32(...)
#define __sanitizer_syscall_pre_geteuid32(...)
#define __sanitizer_syscall_pre_getgid32(...)
@@ -1954,7 +1962,6 @@
#define __sanitizer_syscall_post_fchown32(res, ...)
#define __sanitizer_syscall_post_ftime(res, ...)
#define __sanitizer_syscall_post_ftruncate64(res, ...)
-#define __sanitizer_syscall_post_futex(res, ...)
#define __sanitizer_syscall_post_getegid32(res, ...)
#define __sanitizer_syscall_post_geteuid32(res, ...)
#define __sanitizer_syscall_post_getgid32(res, ...)
@@ -3093,6 +3100,11 @@ void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
long oldact, long sz);
void __sanitizer_syscall_pre_impl_sigaltstack(long ss, long oss);
void __sanitizer_syscall_post_impl_sigaltstack(long res, long ss, long oss);
+void __sanitizer_syscall_pre_impl_futex(long uaddr, long futex_op, long val,
+ long timeout, long uaddr2, long val3);
+void __sanitizer_syscall_post_impl_futex(long res, long uaddr, long futex_op,
+ long val, long timeout, long uaddr2,
+ long val3);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/compiler-rt/lib/asan/CMakeLists.txt b/compiler-rt/lib/asan/CMakeLists.txt
index f83ae82d4293..601750f72175 100644
--- a/compiler-rt/lib/asan/CMakeLists.txt
+++ b/compiler-rt/lib/asan/CMakeLists.txt
@@ -85,6 +85,9 @@ SET(ASAN_HEADERS
include_directories(..)
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+
+append_list_if(MSVC /Zl ASAN_CFLAGS)
+
set(ASAN_COMMON_DEFINITIONS ${COMPILER_RT_ASAN_SHADOW_SCALE_DEFINITION})
append_rtti_flag(OFF ASAN_CFLAGS)
diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp
index da47317c0e57..6d1360e10497 100644
--- a/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -761,8 +761,8 @@ INTERCEPTOR(int, pthread_atfork, void (*prepare)(), void (*parent)(),
#endif
#if ASAN_INTERCEPT_VFORK
-DEFINE_REAL(int, vfork)
-DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
+DEFINE_REAL(int, vfork,)
+DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
#endif
// ---------------------- InitializeAsanInterceptors ---------------- {{{1
diff --git a/compiler-rt/lib/asan/tests/asan_noinst_test.cpp b/compiler-rt/lib/asan/tests/asan_noinst_test.cpp
index 4c103609c83b..df7de2d7d15e 100644
--- a/compiler-rt/lib/asan/tests/asan_noinst_test.cpp
+++ b/compiler-rt/lib/asan/tests/asan_noinst_test.cpp
@@ -45,7 +45,8 @@ TEST(AddressSanitizer, InternalSimpleDeathTest) {
EXPECT_DEATH(exit(1), "");
}
-static void MallocStress(size_t n) {
+static void *MallocStress(void *NumOfItrPtr) {
+ size_t n = *((size_t *)NumOfItrPtr);
u32 seed = my_rand();
BufferedStackTrace stack1;
stack1.trace_buffer[0] = 0xa123;
@@ -90,20 +91,21 @@ static void MallocStress(size_t n) {
}
for (size_t i = 0; i < vec.size(); i++)
__asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
+ return nullptr;
}
-
TEST(AddressSanitizer, NoInstMallocTest) {
- MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
+ const size_t kNumIterations = (ASAN_LOW_MEMORY) ? 300000 : 1000000;
+ MallocStress((void *)&kNumIterations);
}
TEST(AddressSanitizer, ThreadedMallocStressTest) {
const int kNumThreads = 4;
- const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
+ const size_t kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
pthread_t t[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
- PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
- (void*)kNumIterations);
+ PTHREAD_CREATE(&t[i], 0, (void *(*)(void *x))MallocStress,
+ (void *)&kNumIterations);
}
for (int i = 0; i < kNumThreads; i++) {
PTHREAD_JOIN(t[i], 0);
diff --git a/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp b/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
index 75d91ed09ce1..9af09e2a4bed 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
@@ -184,7 +184,7 @@ INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
INTERCEPTOR_ALIAS(void, cfree, void *ptr);
-INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
+INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo,);
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
# endif
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index d519ac3a459b..08ae435b8214 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -336,8 +336,8 @@ INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
DEFINE_REAL_PTHREAD_FUNCTIONS
-DEFINE_REAL(int, vfork)
-DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
+DEFINE_REAL(int, vfork,)
+DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork,)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
diff --git a/compiler-rt/lib/interception/interception_win.cpp b/compiler-rt/lib/interception/interception_win.cpp
index a04175ba1e4b..a638e66eccee 100644
--- a/compiler-rt/lib/interception/interception_win.cpp
+++ b/compiler-rt/lib/interception/interception_win.cpp
@@ -479,6 +479,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u8*)address) {
case 0x90: // 90 : nop
+ case 0xC3: // C3 : ret (for small/empty function interception
+ case 0xCC: // CC : int 3 i.e. registering weak functions)
return 1;
case 0x50: // push eax / rax
@@ -502,7 +504,6 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
// Cannot overwrite control-instruction. Return 0 to indicate failure.
case 0xE9: // E9 XX XX XX XX : jmp <label>
case 0xE8: // E8 XX XX XX XX : call <func>
- case 0xC3: // C3 : ret
case 0xEB: // EB XX : jmp XX (short jump)
case 0x70: // 7Y YY : jy XX (short conditional jump)
case 0x71:
@@ -545,6 +546,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
return 7;
}
+ switch (0x000000FF & *(u32 *)address) {
+ case 0xc2: // C2 XX XX : ret XX (needed for registering weak functions)
+ return 3;
+ }
+
# if SANITIZER_WINDOWS_x64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
@@ -605,6 +611,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xc18b4c: // 4C 8B C1 : mov r8, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
+ case 0xca3b48: // 48 3b ca : cmp rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xc08b41: // 41 8b c0 : mov eax, r8d
@@ -624,6 +631,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
+ case 0x058d48: // 48 8d 05 XX XX XX XX :
+ // lea rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp
index 6e0b2bf2ef5b..9abf24063325 100644
--- a/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -255,7 +255,7 @@ static NOINLINE void clear_mallinfo(T *sret) {
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
// Interceptors use NRVO and assume that sret will be pre-allocated in
// caller frame.
-INTERCEPTOR(__sanitizer_struct_mallinfo, mallinfo) {
+INTERCEPTOR(__sanitizer_struct_mallinfo, mallinfo,) {
__sanitizer_struct_mallinfo sret;
clear_mallinfo(&sret);
return sret;
diff --git a/compiler-rt/lib/msan/msan_linux.cpp b/compiler-rt/lib/msan/msan_linux.cpp
index cd2d9f5c720c..c68aec60ae13 100644
--- a/compiler-rt/lib/msan/msan_linux.cpp
+++ b/compiler-rt/lib/msan/msan_linux.cpp
@@ -175,7 +175,7 @@ bool InitShadowWithReExec(bool init_origins) {
// Start with dry run: check layout is ok, but don't print warnings because
// warning messages will cause tests to fail (even if we successfully re-exec
// after the warning).
- bool success = InitShadow(__msan_get_track_origins(), true);
+ bool success = InitShadow(init_origins, true);
if (!success) {
# if SANITIZER_LINUX
// Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
@@ -197,7 +197,7 @@ bool InitShadowWithReExec(bool init_origins) {
// The earlier dry run didn't actually map or protect anything. Run again in
// non-dry run mode.
- return success && InitShadow(__msan_get_track_origins(), false);
+ return success && InitShadow(init_origins, false);
}
static void MsanAtExit(void) {
diff --git a/compiler-rt/lib/msan/tests/CMakeLists.txt b/compiler-rt/lib/msan/tests/CMakeLists.txt
index 4f09f1e6a691..e0771dd5a1a7 100644
--- a/compiler-rt/lib/msan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/msan/tests/CMakeLists.txt
@@ -69,9 +69,9 @@ macro(msan_compile obj_list source arch kind cflags)
sanitizer_test_compile(
${obj_list} ${source} ${arch}
KIND ${kind}
- COMPILE_DEPS ${MSAN_UNITTEST_HEADERS}
+ COMPILE_DEPS ${MSAN_UNITTEST_HEADERS} libcxx_msan_${arch}-build
DEPS msan
- CFLAGS -isystem ${CMAKE_CURRENT_BINARY_DIR}/../libcxx_msan_${arch}/include/c++/v1
+ CFLAGS -isystem ${MSAN_LIBCXX_DIR}/../include/c++/v1
${MSAN_UNITTEST_INSTRUMENTED_CFLAGS} ${cflags}
)
endmacro()
@@ -120,7 +120,7 @@ macro(add_msan_tests_for_arch arch kind cflags)
set(MSAN_TEST_DEPS ${MSAN_TEST_OBJECTS} libcxx_msan_${arch}-build
${MSAN_LOADABLE_SO}
"${MSAN_LIBCXX_DIR}/libc++.a" "${MSAN_LIBCXX_DIR}/libc++abi.a")
- list(APPEND MSAN_TEST_DEPS msan)
+ list(APPEND MSAN_TEST_DEPS msan libcxx_msan_${arch}-build)
get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
add_compiler_rt_test(MsanUnitTests "Msan-${arch}${kind}-Test" ${arch}
OBJECTS ${MSAN_TEST_OBJECTS} "${MSAN_LIBCXX_DIR}/libc++.a" "${MSAN_LIBCXX_DIR}/libc++abi.a"
diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
index f762524c333a..f2b4ac72ae15 100644
--- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt
+++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
@@ -218,6 +218,8 @@ include_directories(..)
set(SANITIZER_COMMON_DEFINITIONS
HAVE_RPC_XDR_H=${HAVE_RPC_XDR_H})
+# note: L not I, this is nodefaultlibs for msvc
+append_list_if(MSVC /Zl SANITIZER_COMMON_CFLAGS)
set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
# Too many existing bugs, needs cleanup.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 34a64f26478f..6e73065d7f53 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -639,7 +639,8 @@ class SizeClassAllocator64 {
static_assert(kRegionSize >= SizeClassMap::kMaxSize,
"Region size exceed largest size");
// kRegionSize must be <= 2^36, see CompactPtrT.
- COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ COMPILER_CHECK((kRegionSize) <=
+ (1ULL << (sizeof(CompactPtrT) * 8 + kCompactPtrScale)));
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 18;
// Call mmap for metadata memory with at least this size.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index a1be676730a7..1df61e79f7d8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -7650,9 +7650,9 @@ static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, pp_size * sizeof(char *));
}
-INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
+INTERCEPTOR(struct __sanitizer_protoent *, getprotoent,) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent,);
struct __sanitizer_protoent *p = REAL(getprotoent)();
if (p)
write_protoent(ctx, p);
@@ -7739,9 +7739,9 @@ INTERCEPTOR(int, getprotobynumber_r, int num,
#endif
#if SANITIZER_INTERCEPT_NETENT
-INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
+INTERCEPTOR(struct __sanitizer_netent *, getnetent,) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getnetent);
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetent,);
struct __sanitizer_netent *n = REAL(getnetent)();
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
@@ -9903,9 +9903,9 @@ INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
#endif
#if SANITIZER_INTERCEPT_GETUSERSHELL
-INTERCEPTOR(char *, getusershell) {
+INTERCEPTOR(char *, getusershell,) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
+ COMMON_INTERCEPTOR_ENTER(ctx, getusershell,);
char *res = REAL(getusershell)();
if (res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
index b3161690f3ce..14615f9668de 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -38,6 +38,10 @@
// Called before fork syscall.
// COMMON_SYSCALL_POST_FORK(long res)
// Called after fork syscall.
+// COMMON_SYSCALL_BLOCKING_START()
+// Called before blocking syscall.
+// COMMON_SYSCALL_BLOCKING_END()
+// Called after blocking syscall.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@@ -85,6 +89,16 @@
{}
# endif
+# ifndef COMMON_SYSCALL_BLOCKING_START
+# define COMMON_SYSCALL_BLOCKING_START() \
+ {}
+# endif
+
+# ifndef COMMON_SYSCALL_BLOCKING_END
+# define COMMON_SYSCALL_BLOCKING_END() \
+ {}
+# endif
+
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
extern "C" {
@@ -3176,6 +3190,18 @@ POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
}
}
}
+
+PRE_SYSCALL(futex)
+(void *uaddr, long futex_op, long val, void *timeout, void *uaddr2, long val3) {
+ COMMON_SYSCALL_BLOCKING_START();
+}
+
+POST_SYSCALL(futex)
+(long res, void *uaddr, long futex_op, long val, void *timeout, void *uaddr2,
+ long val3) {
+ COMMON_SYSCALL_BLOCKING_END();
+}
+
} // extern "C"
# undef PRE_SYSCALL
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
index 62c1cf4abe42..24511720bd99 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
@@ -54,7 +54,7 @@ static int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,
uptr num_buffer[kMaxLen];
int pos = 0;
do {
- RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow");
+ RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow",);
num_buffer[pos++] = absolute_value % base;
absolute_value /= base;
} while (absolute_value > 0);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index 10361a320344..e39cb891575e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -30,10 +30,15 @@ class StackTracePrinter {
virtual void RenderFrame(InternalScopedString *buffer, const char *format,
int frame_no, uptr address, const AddressInfo *info,
- bool vs_style,
- const char *strip_path_prefix = "") = 0;
+ bool vs_style, const char *strip_path_prefix = "") {
+ // Should be pure virtual, but we can't depend on __cxa_pure_virtual.
+ UNIMPLEMENTED();
+ }
- virtual bool RenderNeedsSymbolization(const char *format) = 0;
+ virtual bool RenderNeedsSymbolization(const char *format) {
+ // Should be pure virtual, but we can't depend on __cxa_pure_virtual.
+ UNIMPLEMENTED();
+ }
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
@@ -44,7 +49,10 @@ class StackTracePrinter {
const char *strip_path_prefix);
virtual void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI,
- const char *strip_path_prefix = "") = 0;
+ const char *strip_path_prefix = "") {
+ // Should be pure virtual, but we can't depend on __cxa_pure_virtual.
+ UNIMPLEMENTED();
+ }
private:
// To be called from StackTracePrinter::GetOrInit
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index f4dd90aac665..e7bc90cd0960 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -18,6 +18,7 @@
#include "local_cache.h"
#include "mem_map.h"
#include "memtag.h"
+#include "mutex.h"
#include "options.h"
#include "quarantine.h"
#include "report.h"
@@ -178,17 +179,17 @@ public:
Quarantine.init(
static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
-
- mapAndInitializeRingBuffer();
}
- void enableRingBuffer() {
+ void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
AllocationRingBuffer *RB = getRingBuffer();
if (RB)
RB->Depot->enable();
+ RingBufferInitLock.unlock();
}
- void disableRingBuffer() {
+ void disableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
+ RingBufferInitLock.lock();
AllocationRingBuffer *RB = getRingBuffer();
if (RB)
RB->Depot->disable();
@@ -915,9 +916,11 @@ public:
DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
return;
}
- if (Track)
+
+ if (Track) {
+ initRingBufferMaybe();
Primary.Options.set(OptionBit::TrackAllocationStacks);
- else
+ } else
Primary.Options.clear(OptionBit::TrackAllocationStacks);
}
@@ -1092,6 +1095,9 @@ private:
0,
"invalid alignment");
+ // Lock to initialize the RingBuffer
+ HybridMutex RingBufferInitLock;
+
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
// and immediately followed by Size elements of type Entry.
atomic_uptr RingBufferAddress = {};
@@ -1546,11 +1552,16 @@ private:
RBEntryStart)[N];
}
- void mapAndInitializeRingBuffer() {
- if (getFlags()->allocation_ring_buffer_size <= 0)
+ void initRingBufferMaybe() {
+ ScopedLock L(RingBufferInitLock);
+ if (getRingBuffer() != nullptr)
return;
- u32 AllocationRingBufferSize =
- static_cast<u32>(getFlags()->allocation_ring_buffer_size);
+
+ int ring_buffer_size = getFlags()->allocation_ring_buffer_size;
+ if (ring_buffer_size <= 0)
+ return;
+
+ u32 AllocationRingBufferSize = static_cast<u32>(ring_buffer_size);
// We store alloc and free stacks for each entry.
constexpr u32 kStacksPerRingBufferEntry = 2;
diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
index 0788c4198e53..2144f1b63f89 100644
--- a/compiler-rt/lib/scudo/standalone/fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -34,11 +34,10 @@ static_assert(ZX_HANDLE_INVALID == 0, "");
static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
uptr Size) {
- char Error[128];
- formatString(Error, sizeof(Error),
- "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ ScopedString Error;
+ Error.append("SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
Size >> 10, zx_status_get_string(Status));
- outputRaw(Error);
+ outputRaw(Error.data());
die();
}
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
index 0566ab065526..28e5a11a37f2 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -22,11 +22,10 @@ namespace scudo {
static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
uptr Size) {
- char Error[128];
- formatString(Error, sizeof(Error),
- "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ ScopedString Error;
+ Error.append("SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
Size >> 10, _zx_status_get_string(Status));
- outputRaw(Error);
+ outputRaw(Error.data());
die();
}
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index f5e4ab57b4df..abce4bff321c 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -372,10 +372,11 @@ public:
PushedBlocks += Region->FreeListInfo.PushedBlocks;
}
}
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
- "allocations; remains %zu\n",
+ "allocations; remains %zu; ReleaseToOsIntervalMs = %d\n",
TotalMapped >> 20, 0U, PoppedBlocks,
- PoppedBlocks - PushedBlocks);
+ PoppedBlocks - PushedBlocks, IntervalMs >= 0 ? IntervalMs : -1);
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
diff --git a/compiler-rt/lib/scudo/standalone/report_linux.cpp b/compiler-rt/lib/scudo/standalone/report_linux.cpp
index 6a983036e6cd..dfddef3324bd 100644
--- a/compiler-rt/lib/scudo/standalone/report_linux.cpp
+++ b/compiler-rt/lib/scudo/standalone/report_linux.cpp
@@ -24,33 +24,30 @@ namespace scudo {
// Fatal internal map() error (potentially OOM related).
void NORETURN reportMapError(uptr SizeIfOOM) {
- char Error[128] = "Scudo ERROR: internal map failure\n";
+ ScopedString Error;
+ Error.append("Scudo ERROR: internal map failure");
if (SizeIfOOM) {
- formatString(
- Error, sizeof(Error),
- "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
- SizeIfOOM >> 10);
+ Error.append(" (NO MEMORY) requesting %zuKB", SizeIfOOM >> 10);
}
- reportRawError(Error);
+ Error.append("\n");
+ reportRawError(Error.data());
}
void NORETURN reportUnmapError(uptr Addr, uptr Size) {
- char Error[128];
- formatString(Error, sizeof(Error),
- "Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
+ ScopedString Error;
+ Error.append("Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
"Size %zu\n",
strerror(errno), Addr, Size);
- reportRawError(Error);
+ reportRawError(Error.data());
}
void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
- char Error[128];
- formatString(
- Error, sizeof(Error),
+ ScopedString Error;
+ Error.append(
"Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
"Size %zu Prot %x\n",
strerror(errno), Addr, Size, Prot);
- reportRawError(Error);
+ reportRawError(Error.data());
}
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 202c55cc1a92..674af5071775 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -179,10 +179,12 @@ public:
uptr Fractional;
computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
&Fractional);
- Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
- "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
- EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
- atomic_load_relaxed(&MaxEntrySize));
+ const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ Str->append(
+ "Stats: MapAllocatorCache: EntriesCount: %d, "
+ "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
+ EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
+ atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
"(%zu.%02zu%%)\n",
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index cf3cabf7085b..98cd9707a646 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -112,7 +112,7 @@ public:
if (TabMask == 0)
return false;
uptr TabSize = TabMask + 1;
- if (!isPowerOfTwo(TabSize))
+ if (TabSize == 0 || !isPowerOfTwo(TabSize))
return false;
uptr TabBytes = sizeof(atomic_u32) * TabSize;
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.cpp b/compiler-rt/lib/scudo/standalone/string_utils.cpp
index d4e4e3becd0e..e584bd806e57 100644
--- a/compiler-rt/lib/scudo/standalone/string_utils.cpp
+++ b/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -14,30 +14,21 @@
namespace scudo {
-static int appendChar(char **Buffer, const char *BufferEnd, char C) {
- if (*Buffer < BufferEnd) {
- **Buffer = C;
- (*Buffer)++;
- }
- return 1;
-}
-
// Appends number in a given Base to buffer. If its length is less than
// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
// on the value of |PadWithZero|.
-static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
- u8 Base, u8 MinNumberLength, bool PadWithZero,
- bool Negative, bool Upper) {
+void ScopedString::appendNumber(u64 AbsoluteValue, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Negative, bool Upper) {
constexpr uptr MaxLen = 30;
RAW_CHECK(Base == 10 || Base == 16);
RAW_CHECK(Base == 10 || !Negative);
RAW_CHECK(AbsoluteValue || !Negative);
RAW_CHECK(MinNumberLength < MaxLen);
- int Res = 0;
if (Negative && MinNumberLength)
--MinNumberLength;
- if (Negative && PadWithZero)
- Res += appendChar(Buffer, BufferEnd, '-');
+ if (Negative && PadWithZero) {
+ String.push_back('-');
+ }
uptr NumBuffer[MaxLen];
int Pos = 0;
do {
@@ -55,34 +46,32 @@ static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
Pos--;
for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
char c = (PadWithZero || Pos == 0) ? '0' : ' ';
- Res += appendChar(Buffer, BufferEnd, c);
+ String.push_back(c);
}
if (Negative && !PadWithZero)
- Res += appendChar(Buffer, BufferEnd, '-');
+ String.push_back('-');
for (; Pos >= 0; Pos--) {
char Digit = static_cast<char>(NumBuffer[Pos]);
Digit = static_cast<char>((Digit < 10) ? '0' + Digit
: (Upper ? 'A' : 'a') + Digit - 10);
- Res += appendChar(Buffer, BufferEnd, Digit);
+ String.push_back(Digit);
}
- return Res;
}
-static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
- u8 Base, u8 MinNumberLength, bool PadWithZero,
- bool Upper) {
- return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
- PadWithZero, /*Negative=*/false, Upper);
+void ScopedString::appendUnsigned(u64 Num, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Upper) {
+ appendNumber(Num, Base, MinNumberLength, PadWithZero, /*Negative=*/false,
+ Upper);
}
-static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
- u8 MinNumberLength, bool PadWithZero) {
+void ScopedString::appendSignedDecimal(s64 Num, u8 MinNumberLength,
+ bool PadWithZero) {
const bool Negative = (Num < 0);
const u64 UnsignedNum = (Num == INT64_MIN)
? static_cast<u64>(INT64_MAX) + 1
: static_cast<u64>(Negative ? -Num : Num);
- return appendNumber(Buffer, BufferEnd, UnsignedNum, 10, MinNumberLength,
- PadWithZero, Negative, /*Upper=*/false);
+ appendNumber(UnsignedNum, 10, MinNumberLength, PadWithZero, Negative,
+ /*Upper=*/false);
}
// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
@@ -90,44 +79,45 @@ static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
// Width == 0 - no Width requested
// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
// Width > 0 - right-justify S, not implemented yet
-static int appendString(char **Buffer, const char *BufferEnd, int Width,
- int MaxChars, const char *S) {
+void ScopedString::appendString(int Width, int MaxChars, const char *S) {
if (!S)
S = "<null>";
- int Res = 0;
+ int NumChars = 0;
for (; *S; S++) {
- if (MaxChars >= 0 && Res >= MaxChars)
+ if (MaxChars >= 0 && NumChars >= MaxChars)
break;
- Res += appendChar(Buffer, BufferEnd, *S);
+ String.push_back(*S);
+ NumChars++;
+ }
+ if (Width < 0) {
+ // Only left justification supported.
+ Width = -Width - NumChars;
+ while (Width-- > 0)
+ String.push_back(' ');
}
- // Only the left justified strings are supported.
- while (Width < -Res)
- Res += appendChar(Buffer, BufferEnd, ' ');
- return Res;
}
-static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
- int Res = 0;
- Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
- Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
- SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
- /*Upper=*/false);
- return Res;
+void ScopedString::appendPointer(u64 ptr_value) {
+ appendString(0, -1, "0x");
+ appendUnsigned(ptr_value, 16, SCUDO_POINTER_FORMAT_LENGTH,
+ /*PadWithZero=*/true,
+ /*Upper=*/false);
}
-static int formatString(char *Buffer, uptr BufferLength, const char *Format,
- va_list Args) {
+void ScopedString::vappend(const char *Format, va_list &Args) {
+ // Since the string contains the '\0' terminator, put our size before it
+ // so that push_back calls work correctly.
+ DCHECK(String.size() > 0);
+ String.resize(String.size() - 1);
+
static const char *PrintfFormatsHelp =
- "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "Supported formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
RAW_CHECK(Format);
- RAW_CHECK(BufferLength > 0);
- const char *BufferEnd = &Buffer[BufferLength - 1];
const char *Cur = Format;
- int Res = 0;
for (; *Cur; Cur++) {
if (*Cur != '%') {
- Res += appendChar(&Buffer, BufferEnd, *Cur);
+ String.push_back(*Cur);
continue;
}
Cur++;
@@ -162,7 +152,7 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
DVal = HaveLL ? va_arg(Args, s64)
: HaveZ ? va_arg(Args, sptr)
: va_arg(Args, int);
- Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ appendSignedDecimal(DVal, Width, PadWithZero);
break;
}
case 'u':
@@ -172,27 +162,25 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
: HaveZ ? va_arg(Args, uptr)
: va_arg(Args, unsigned);
const bool Upper = (*Cur == 'X');
- Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
- Width, PadWithZero, Upper);
+ appendUnsigned(UVal, (*Cur == 'u') ? 10 : 16, Width, PadWithZero, Upper);
break;
}
case 'p': {
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
- Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
+ appendPointer(va_arg(Args, uptr));
break;
}
case 's': {
RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
// Only left-justified Width is supported.
CHECK(!HaveWidth || LeftJustified);
- Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
- Precision, va_arg(Args, char *));
+ appendString(LeftJustified ? -Width : Width, Precision,
+ va_arg(Args, char *));
break;
}
case 'c': {
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
- Res +=
- appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
+ String.push_back(static_cast<char>(va_arg(Args, int)));
break;
}
// In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
@@ -207,19 +195,17 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
if (*Cur == 'd') {
DVal = va_arg(Args, s64);
- Res +=
- appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ appendSignedDecimal(DVal, Width, PadWithZero);
} else {
UVal = va_arg(Args, u64);
- Res += appendUnsigned(&Buffer, BufferEnd, UVal, 10, Width, PadWithZero,
- false);
+ appendUnsigned(UVal, 10, Width, PadWithZero, false);
}
break;
}
case '%': {
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
- Res += appendChar(&Buffer, BufferEnd, '%');
+ String.push_back('%');
break;
}
default: {
@@ -227,35 +213,13 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
}
}
}
- RAW_CHECK(Buffer <= BufferEnd);
- appendChar(&Buffer, BufferEnd + 1, '\0');
- return Res;
-}
-
-int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
- va_list Args;
- va_start(Args, Format);
- int Res = formatString(Buffer, BufferLength, Format, Args);
- va_end(Args);
- return Res;
-}
-
-void ScopedString::vappend(const char *Format, va_list Args) {
- va_list ArgsCopy;
- va_copy(ArgsCopy, Args);
- // formatString doesn't currently support a null buffer or zero buffer length,
- // so in order to get the resulting formatted string length, we use a one-char
- // buffer.
- char C[1];
- const uptr AdditionalLength =
- static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
- const uptr Length = length();
- String.resize(Length + AdditionalLength);
- const uptr FormattedLength = static_cast<uptr>(formatString(
- String.data() + Length, String.size() - Length, Format, ArgsCopy));
- RAW_CHECK(data()[length()] == '\0');
- RAW_CHECK(FormattedLength + 1 == AdditionalLength);
- va_end(ArgsCopy);
+ String.push_back('\0');
+ if (String.back() != '\0') {
+ // String truncated, make sure the string is terminated properly.
+ // This can happen if there is no more memory when trying to resize
+ // the string.
+ String.back() = '\0';
+ }
}
void ScopedString::append(const char *Format, ...) {
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.h b/compiler-rt/lib/scudo/standalone/string_utils.h
index a4cab5268ede..6e00b6377973 100644
--- a/compiler-rt/lib/scudo/standalone/string_utils.h
+++ b/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -25,17 +25,24 @@ public:
String.clear();
String.push_back('\0');
}
- void vappend(const char *Format, va_list Args);
+ void vappend(const char *Format, va_list &Args);
void append(const char *Format, ...) FORMAT(2, 3);
void output() const { outputRaw(String.data()); }
void reserve(size_t Size) { String.reserve(Size + 1); }
+ uptr capacity() { return String.capacity() - 1; }
private:
+ void appendNumber(u64 AbsoluteValue, u8 Base, u8 MinNumberLength,
+ bool PadWithZero, bool Negative, bool Upper);
+ void appendUnsigned(u64 Num, u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Upper);
+ void appendSignedDecimal(s64 Num, u8 MinNumberLength, bool PadWithZero);
+ void appendString(int Width, int MaxChars, const char *S);
+ void appendPointer(u64 ptr_value);
+
Vector<char> String;
};
-int formatString(char *Buffer, uptr BufferLength, const char *Format, ...)
- FORMAT(3, 4);
void Printf(const char *Format, ...) FORMAT(1, 2);
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 6a311adc55e4..1a36155bcd42 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -867,32 +867,86 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
}
}
+SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferDefaultDisabled) {
+ // The RingBuffer is not initialized until tracking is enabled for the
+ // first time.
+ auto *Allocator = this->Allocator.get();
+ EXPECT_EQ(0u, Allocator->getRingBufferSize());
+ EXPECT_EQ(nullptr, Allocator->getRingBufferAddress());
+}
+
+SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferInitOnce) {
+ auto *Allocator = this->Allocator.get();
+ Allocator->setTrackAllocationStacks(true);
+
+ auto RingBufferSize = Allocator->getRingBufferSize();
+ ASSERT_GT(RingBufferSize, 0u);
+ auto *RingBufferAddress = Allocator->getRingBufferAddress();
+ EXPECT_NE(nullptr, RingBufferAddress);
+
+ // Enable tracking again to verify that the initialization only happens once.
+ Allocator->setTrackAllocationStacks(true);
+ ASSERT_EQ(RingBufferSize, Allocator->getRingBufferSize());
+ EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
+}
+
SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
auto *Allocator = this->Allocator.get();
- auto Size = Allocator->getRingBufferSize();
- ASSERT_GT(Size, 0u);
- EXPECT_EQ(Allocator->getRingBufferAddress()[Size - 1], '\0');
+ Allocator->setTrackAllocationStacks(true);
+
+ auto RingBufferSize = Allocator->getRingBufferSize();
+ ASSERT_GT(RingBufferSize, 0u);
+ EXPECT_EQ(Allocator->getRingBufferAddress()[RingBufferSize - 1], '\0');
}
SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
auto *Allocator = this->Allocator.get();
- auto *Addr = Allocator->getRingBufferAddress();
- EXPECT_NE(Addr, nullptr);
- EXPECT_EQ(Addr, Allocator->getRingBufferAddress());
+ Allocator->setTrackAllocationStacks(true);
+
+ auto *RingBufferAddress = Allocator->getRingBufferAddress();
+ EXPECT_NE(RingBufferAddress, nullptr);
+ EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
+}
+
+SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotDefaultDisabled) {
+ // The StackDepot is not initialized until tracking is enabled for the
+ // first time.
+ auto *Allocator = this->Allocator.get();
+ EXPECT_EQ(0u, Allocator->getStackDepotSize());
+ EXPECT_EQ(nullptr, Allocator->getStackDepotAddress());
+}
+
+SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotInitOnce) {
+ auto *Allocator = this->Allocator.get();
+ Allocator->setTrackAllocationStacks(true);
+
+ auto StackDepotSize = Allocator->getStackDepotSize();
+ EXPECT_GT(StackDepotSize, 0u);
+ auto *StackDepotAddress = Allocator->getStackDepotAddress();
+ EXPECT_NE(nullptr, StackDepotAddress);
+
+ // Enable tracking again to verify that the initialization only happens once.
+ Allocator->setTrackAllocationStacks(true);
+ EXPECT_EQ(StackDepotSize, Allocator->getStackDepotSize());
+ EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
}
SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
auto *Allocator = this->Allocator.get();
- auto Size = Allocator->getStackDepotSize();
- ASSERT_GT(Size, 0u);
- EXPECT_EQ(Allocator->getStackDepotAddress()[Size - 1], '\0');
+ Allocator->setTrackAllocationStacks(true);
+
+ auto StackDepotSize = Allocator->getStackDepotSize();
+ EXPECT_GT(StackDepotSize, 0u);
+ EXPECT_EQ(Allocator->getStackDepotAddress()[StackDepotSize - 1], '\0');
}
SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
auto *Allocator = this->Allocator.get();
- auto *Addr = Allocator->getStackDepotAddress();
- EXPECT_NE(Addr, nullptr);
- EXPECT_EQ(Addr, Allocator->getStackDepotAddress());
+ Allocator->setTrackAllocationStacks(true);
+
+ auto *StackDepotAddress = Allocator->getStackDepotAddress();
+ EXPECT_NE(StackDepotAddress, nullptr);
+ EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
}
SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
diff --git a/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp b/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
index 7a69ffd9762c..abb81803f65e 100644
--- a/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/strings_test.cpp
@@ -66,6 +66,10 @@ TEST(ScudoStringsTest, Precision) {
Str.append("%-6s", "12345");
EXPECT_EQ(Str.length(), strlen(Str.data()));
EXPECT_STREQ("12345 ", Str.data());
+ Str.clear();
+ Str.append("%-8s", "12345");
+ EXPECT_EQ(Str.length(), strlen(Str.data()));
+ EXPECT_STREQ("12345 ", Str.data());
}
static void fillString(scudo::ScopedString &Str, scudo::uptr Size) {
@@ -123,3 +127,42 @@ TEST(ScudoStringsTest, Padding) {
testAgainstLibc<int>("%03d - %03d", 12, 1234);
testAgainstLibc<int>("%03d - %03d", -12, -1234);
}
+
+#if defined(__linux__)
+
+#include <sys/resource.h>
+
+TEST(ScudoStringsTest, CapacityIncreaseFails) {
+ scudo::ScopedString Str;
+
+ rlimit Limit = {};
+ EXPECT_EQ(0, getrlimit(RLIMIT_AS, &Limit));
+
+ rlimit EmptyLimit = {.rlim_cur = 0, .rlim_max = Limit.rlim_max};
+ EXPECT_EQ(0, setrlimit(RLIMIT_AS, &EmptyLimit));
+
+ // qemu does not honor the setrlimit, so verify before proceeding.
+ scudo::MemMapT MemMap;
+ if (MemMap.map(/*Addr=*/0U, scudo::getPageSizeCached(), "scudo:test",
+ MAP_ALLOWNOMEM)) {
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ setrlimit(RLIMIT_AS, &Limit);
+ GTEST_SKIP() << "Limiting address space does not prevent mmap.";
+ }
+
+ // Test requires that the default length is at least 6 characters.
+ scudo::uptr MaxSize = Str.capacity();
+ EXPECT_LE(6u, MaxSize);
+
+ for (size_t i = 0; i < MaxSize - 5; i++) {
+ Str.append("B");
+ }
+
+ // Attempt to append past the end of the current capacity.
+ Str.append("%d", 12345678);
+ EXPECT_EQ(MaxSize, Str.capacity());
+ EXPECT_STREQ("B12345", &Str.data()[MaxSize - 6]);
+
+ EXPECT_EQ(0, setrlimit(RLIMIT_AS, &Limit));
+}
+#endif
diff --git a/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp b/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
index dc23c2a34713..b612676b7bd7 100644
--- a/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/vector_test.cpp
@@ -41,3 +41,47 @@ TEST(ScudoVectorTest, ResizeReduction) {
V.resize(1);
EXPECT_EQ(V.size(), 1U);
}
+
+#if defined(__linux__)
+
+#include <sys/resource.h>
+
+// Verify that if the reallocate fails, nothing new is added.
+TEST(ScudoVectorTest, ReallocateFails) {
+ scudo::Vector<char> V;
+ scudo::uptr capacity = V.capacity();
+
+ // Get the current address space size.
+ rlimit Limit = {};
+ EXPECT_EQ(0, getrlimit(RLIMIT_AS, &Limit));
+
+ rlimit EmptyLimit = {.rlim_cur = 0, .rlim_max = Limit.rlim_max};
+ EXPECT_EQ(0, setrlimit(RLIMIT_AS, &EmptyLimit));
+
+ // qemu does not honor the setrlimit, so verify before proceeding.
+ scudo::MemMapT MemMap;
+ if (MemMap.map(/*Addr=*/0U, scudo::getPageSizeCached(), "scudo:test",
+ MAP_ALLOWNOMEM)) {
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ setrlimit(RLIMIT_AS, &Limit);
+ GTEST_SKIP() << "Limiting address space does not prevent mmap.";
+ }
+
+ V.resize(capacity);
+ // Set the last element so we can check it later.
+ V.back() = '\0';
+
+ // The reallocate should fail, so the capacity should not change.
+ V.reserve(capacity + 1000);
+ EXPECT_EQ(capacity, V.capacity());
+
+ // Now try to do a push back and verify that the size does not change.
+ scudo::uptr Size = V.size();
+ V.push_back('2');
+ EXPECT_EQ(Size, V.size());
+ // Verify that the last element in the vector did not change.
+ EXPECT_EQ('\0', V.back());
+
+ EXPECT_EQ(0, setrlimit(RLIMIT_AS, &Limit));
+}
+#endif
diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
index b2108a01900b..72773f2f72b1 100644
--- a/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -41,9 +41,9 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
- atomic_store_relaxed(&Precedence,
- static_cast<uptr>(getMonotonicTimeFast() >>
- FIRST_32_SECOND_64(16, 0)));
+ atomic_store_relaxed(
+ &Precedence,
+ static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
inline void lock() NO_THREAD_SAFETY_ANALYSIS {
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index c0f1ba0eddfa..ca10cc281d77 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -35,7 +35,9 @@ public:
DCHECK_LE(Size, capacity());
if (Size == capacity()) {
const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
- reallocate(NewCapacity);
+ if (!reallocate(NewCapacity)) {
+ return;
+ }
}
memcpy(&Data[Size++], &Element, sizeof(T));
}
@@ -51,14 +53,17 @@ public:
const T *data() const { return Data; }
T *data() { return Data; }
constexpr uptr capacity() const { return CapacityBytes / sizeof(T); }
- void reserve(uptr NewSize) {
+ bool reserve(uptr NewSize) {
// Never downsize internal buffer.
if (NewSize > capacity())
- reallocate(NewSize);
+ return reallocate(NewSize);
+ return true;
}
void resize(uptr NewSize) {
if (NewSize > Size) {
- reserve(NewSize);
+ if (!reserve(NewSize)) {
+ return;
+ }
memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
}
Size = NewSize;
@@ -86,13 +91,16 @@ protected:
}
private:
- void reallocate(uptr NewCapacity) {
+ bool reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
MemMapT NewExternalBuffer;
NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
- NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
+ if (!NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector",
+ MAP_ALLOWNOMEM)) {
+ return false;
+ }
T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
memcpy(NewExternalData, Data, Size * sizeof(T));
@@ -101,6 +109,7 @@ private:
Data = NewExternalData;
CapacityBytes = NewCapacity;
ExternalBuffer = NewExternalBuffer;
+ return true;
}
T *Data = nullptr;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 2bebe651b994..94adea777caf 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_glibc_version.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
@@ -126,7 +127,6 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-const int SIGPROF = 27;
#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
@@ -1341,7 +1341,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- int res = REAL(pthread_mutex_lock)(m);
+ int res = BLOCK_REAL(pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == errno_EOWNERDEAD)
@@ -1386,7 +1386,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
__sanitizer_clockid_t clock, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
MutexPreLock(thr, pc, (uptr)m);
- int res = REAL(pthread_mutex_clocklock)(m, clock, abstime);
+ int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == errno_EOWNERDEAD)
@@ -1404,7 +1404,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- int res = REAL(__pthread_mutex_lock)(m);
+ int res = BLOCK_REAL(__pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == errno_EOWNERDEAD)
@@ -1447,7 +1447,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
MutexPreLock(thr, pc, (uptr)m);
- int res = REAL(pthread_spin_lock)(m);
+ int res = BLOCK_REAL(pthread_spin_lock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
}
@@ -1522,7 +1522,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
MutexPreLock(thr, pc, (uptr)m);
- int res = REAL(pthread_rwlock_wrlock)(m);
+ int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
if (res == 0) {
MutexPostLock(thr, pc, (uptr)m);
}
@@ -1614,47 +1614,40 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
#else
#define TSAN_MAYBE_INTERCEPT___FXSTAT
#endif
+#if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_GLIBC
- SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
- if (fd > 0)
- FdAccess(thr, pc, fd);
- return REAL(__fxstat)(0, fd, buf);
-#else
SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(fstat)(fd, buf);
-#endif
-}
-
-#if SANITIZER_GLIBC
-TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
- if (fd > 0)
- FdAccess(thr, pc, fd);
- return REAL(__fxstat64)(version, fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+# define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
#else
-#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+# define TSAN_MAYBE_INTERCEPT_FSTAT
#endif
-#if SANITIZER_GLIBC
+#if __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(__fxstat64)(0, fd, buf);
+ return REAL(fstat64)(fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+# define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
#else
-#define TSAN_MAYBE_INTERCEPT_FSTAT64
+# define TSAN_MAYBE_INTERCEPT_FSTAT64
#endif
TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
@@ -2169,8 +2162,7 @@ static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
return false;
#endif
return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- sig == SIGPROF;
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
}
void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
@@ -2675,6 +2667,25 @@ static USED void syscall_fd_release(uptr pc, int fd) {
FdRelease(thr, pc, fd);
}
+static USED void sycall_blocking_start() {
+ DPrintf("sycall_blocking_start()\n");
+ ThreadState *thr = cur_thread();
+ EnterBlockingFunc(thr);
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
+}
+
+static USED void sycall_blocking_end() {
+ DPrintf("sycall_blocking_end()\n");
+ ThreadState *thr = cur_thread();
+ thr->ignore_interceptors--;
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+}
+
static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
static void syscall_post_fork(uptr pc, int pid) {
@@ -2729,6 +2740,9 @@ static void syscall_post_fork(uptr pc, int pid) {
#define COMMON_SYSCALL_POST_FORK(res) \
syscall_post_fork(GET_CALLER_PC(), res)
+#define COMMON_SYSCALL_BLOCKING_START() sycall_blocking_start()
+#define COMMON_SYSCALL_BLOCKING_END() sycall_blocking_end()
+
#include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
@@ -2965,10 +2979,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_once);
- TSAN_INTERCEPT(fstat);
TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT;
TSAN_MAYBE_INTERCEPT_FSTAT64;
- TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index 77488f843285..5316a7862e44 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -160,6 +160,10 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
}
Free(thr->tctx->sync);
+#if !SANITIZER_GO
+ thr->is_inited = true;
+#endif
+
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -200,15 +204,11 @@ void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
}
void ThreadContext::OnStarted(void *arg) {
- thr = static_cast<ThreadState *>(arg);
DPrintf("#%d: ThreadStart\n", tid);
- new (thr) ThreadState(tid);
+ thr = new (arg) ThreadState(tid);
if (common_flags()->detect_deadlocks)
thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
thr->tctx = this;
-#if !SANITIZER_GO
- thr->is_inited = true;
-#endif
}
void ThreadFinish(ThreadState *thr) {
diff --git a/compiler-rt/lib/tsan/tests/CMakeLists.txt b/compiler-rt/lib/tsan/tests/CMakeLists.txt
index ad8cc9b0eb05..1bc08bbf7450 100644
--- a/compiler-rt/lib/tsan/tests/CMakeLists.txt
+++ b/compiler-rt/lib/tsan/tests/CMakeLists.txt
@@ -67,7 +67,7 @@ endforeach()
set(TSAN_DEPS tsan)
# TSan uses C++ standard library headers.
if (TARGET cxx-headers OR HAVE_LIBCXX)
- set(TSAN_DEPS cxx-headers)
+ list(APPEND TSAN_DEPS cxx-headers)
endif()
# add_tsan_unittest(<name>
diff --git a/compiler-rt/lib/ubsan/CMakeLists.txt b/compiler-rt/lib/ubsan/CMakeLists.txt
index 3f1e12ed9ac6..db0b33f1276e 100644
--- a/compiler-rt/lib/ubsan/CMakeLists.txt
+++ b/compiler-rt/lib/ubsan/CMakeLists.txt
@@ -41,6 +41,7 @@ set(UBSAN_HEADERS
include_directories(..)
set(UBSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+append_list_if(MSVC /Zl UBSAN_CFLAGS)
append_rtti_flag(OFF UBSAN_CFLAGS)
append_list_if(SANITIZER_CAN_USE_CXXABI -DUBSAN_CAN_USE_CXXABI UBSAN_CFLAGS)
diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py
index bd9b926c1505..0ac20a9831d9 100644
--- a/compiler-rt/test/lit.common.cfg.py
+++ b/compiler-rt/test/lit.common.cfg.py
@@ -882,6 +882,10 @@ if config.use_lld and config.has_lld and not config.use_lto:
elif config.use_lld and (not config.has_lld):
config.unsupported = True
+if config.host_os == "Darwin":
+ if getattr(config, "darwin_linker_version", None):
+ extra_cflags += ["-mlinker-version=" + config.darwin_linker_version]
+
# Append any extra flags passed in lit_config
append_target_cflags = lit_config.params.get("append_target_cflags", None)
if append_target_cflags:
diff --git a/compiler-rt/test/lit.common.configured.in b/compiler-rt/test/lit.common.configured.in
index db5d7c598b73..fff5dc6cc750 100644
--- a/compiler-rt/test/lit.common.configured.in
+++ b/compiler-rt/test/lit.common.configured.in
@@ -51,6 +51,7 @@ set_default("expensive_checks", @LLVM_ENABLE_EXPENSIVE_CHECKS_PYBOOL@)
set_default("test_standalone_build_libs", @COMPILER_RT_TEST_STANDALONE_BUILD_LIBS_PYBOOL@)
set_default("has_compiler_rt_libatomic", @COMPILER_RT_BUILD_STANDALONE_LIBATOMIC_PYBOOL@)
set_default("aarch64_sme", @COMPILER_RT_HAS_AARCH64_SME_PYBOOL@)
+set_default("darwin_linker_version", "@COMPILER_RT_DARWIN_LINKER_VERSION@")
# True iff the test suite supports ignoring the test compiler's runtime library path
# and using `config.compiler_rt_libdir` instead. This only matters when the runtime
# library paths differ.
diff --git a/compiler-rt/test/tsan/Linux/signal_in_futex_wait.cpp b/compiler-rt/test/tsan/Linux/signal_in_futex_wait.cpp
new file mode 100644
index 000000000000..3c8804aae3d0
--- /dev/null
+++ b/compiler-rt/test/tsan/Linux/signal_in_futex_wait.cpp
@@ -0,0 +1,113 @@
+// RUN: %clang_tsan %s -lstdc++ -o %t && %run %t 2>&1 | FileCheck %s
+
+#include "../test.h"
+#include <errno.h>
+#include <linux/futex.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/syscall.h>
+
+#include <cassert>
+#include <stdexcept>
+#include <thread>
+
+#include <sanitizer/linux_syscall_hooks.h>
+
+int futex(int *uaddr, int futex_op, int val, const struct timespec *timeout,
+ int *uaddr2, int val3) {
+ __sanitizer_syscall_pre_futex(uaddr, futex_op, val, timeout, uaddr2, val3);
+ int result = syscall(SYS_futex, uaddr, futex_op, val, timeout, uaddr2, val3);
+ __sanitizer_syscall_post_futex(result, uaddr, futex_op, val, timeout, uaddr2,
+ val3);
+ return result;
+}
+
+// Simple mutex implementation using futex.
+class Mutex {
+public:
+ Mutex() : value(0) {}
+
+ void lock() {
+ int c;
+ while ((c = __sync_val_compare_and_swap(&value, 0, 1)) != 0) {
+ if (c != 1)
+ continue;
+ int r = futex(&value, FUTEX_WAIT_PRIVATE, 1, nullptr, nullptr, 0);
+ if (r == -1 && errno != EAGAIN) {
+ fprintf(stderr, "futex wait error\n");
+ abort();
+ }
+ }
+ }
+
+ void unlock() {
+ value = 0;
+ int r = futex(&value, FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
+ if (r == -1) {
+ fprintf(stderr, "futex wake error\n");
+ abort();
+ }
+ }
+
+private:
+ int value;
+};
+
+Mutex mutex;
+
+void *Thread(void *x) {
+ // Waiting for the futex.
+ mutex.lock();
+ // Finished waiting.
+ return nullptr;
+}
+
+static void SigprofHandler(int signal, siginfo_t *info, void *context) {
+ // Unlock the futex.
+ mutex.unlock();
+}
+
+void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = SigprofHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, 0) != 0) {
+ fprintf(stderr, "failed to install signal handler\n");
+ abort();
+ }
+}
+
+int main() {
+ alarm(60); // Kill the test if it hangs.
+
+ // Install the signal handler
+ InstallSignalHandler();
+
+ // Lock the futex at first so the other thread will wait for it.
+ mutex.lock();
+
+ // Create the thread to wait for the futex.
+ pthread_t thread;
+ pthread_create(&thread, NULL, Thread, NULL);
+
+ // Just waiting a bit to make sure the thead is at the FUTEX_WAIT_PRIVATE
+ // syscall.
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+ // Send the signal to the other thread, which will send the futex wake
+ // syscall.
+ int r = pthread_kill(thread, SIGPROF);
+ assert(r == 0);
+
+ // Futex should be notified and the thread should be able to continue.
+ pthread_join(thread, NULL);
+
+ // Exiting successfully.
+ fprintf(stderr, "PASS\n");
+ return 0;
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer:
+// CHECK: PASS
diff --git a/compiler-rt/test/tsan/signal_errno.cpp b/compiler-rt/test/tsan/signal_errno.cpp
index 99d4b6d84ca4..7e1fd4b0c5a5 100644
--- a/compiler-rt/test/tsan/signal_errno.cpp
+++ b/compiler-rt/test/tsan/signal_errno.cpp
@@ -18,7 +18,7 @@ static void MyHandler(int, siginfo_t *s, void *c) {
static void* sendsignal(void *p) {
barrier_wait(&barrier);
- pthread_kill(mainth, SIGALRM);
+ pthread_kill(mainth, SIGPROF);
return 0;
}
@@ -37,7 +37,7 @@ int main() {
mainth = pthread_self();
struct sigaction act = {};
act.sa_sigaction = &MyHandler;
- sigaction(SIGALRM, &act, 0);
+ sigaction(SIGPROF, &act, 0);
pthread_t th;
pthread_create(&th, 0, sendsignal, 0);
loop();
@@ -46,7 +46,7 @@ int main() {
}
// CHECK: WARNING: ThreadSanitizer: signal handler spoils errno
-// CHECK: Signal 14 handler invoked at:
+// CHECK: Signal 27 handler invoked at:
// CHECK: #0 MyHandler(int, {{(__)?}}siginfo{{(_t)?}}*, void*) {{.*}}signal_errno.cpp
// CHECK: main
// CHECK: SUMMARY: ThreadSanitizer: signal handler spoils errno{{.*}}MyHandler
diff --git a/compiler-rt/test/tsan/signal_in_mutex_lock.cpp b/compiler-rt/test/tsan/signal_in_mutex_lock.cpp
new file mode 100644
index 000000000000..ec99e2319840
--- /dev/null
+++ b/compiler-rt/test/tsan/signal_in_mutex_lock.cpp
@@ -0,0 +1,71 @@
+// RUN: %clang_tsan %s -lstdc++ -o %t && %run %t 2>&1 | FileCheck %s
+
+#include "test.h"
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+
+#include <cassert>
+#include <condition_variable>
+#include <mutex>
+
+std::mutex sampler_mutex; //dummy mutex to lock in the thread we spawn.
+std::mutex done_mutex; // guards the cv and done variables.
+std::condition_variable cv;
+bool done = false;
+
+void *ThreadFunc(void *x) {
+ while (true) {
+ // Lock the mutex
+ std::lock_guard<std::mutex> guard(sampler_mutex);
+ // Mutex is released at the end
+ }
+
+ return nullptr;
+}
+
+static void SigprofHandler(int signal, siginfo_t *info, void *context) {
+ // Assuming we did some work, change the variable to let the main thread
+ // know that we are done.
+ {
+ std::unique_lock<std::mutex> lck(done_mutex);
+ done = true;
+ cv.notify_one();
+ }
+}
+
+int main() {
+ alarm(60); // Kill the test if it hangs.
+
+ // Install the signal handler
+ struct sigaction sa;
+ sa.sa_sigaction = SigprofHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, 0) != 0) {
+ fprintf(stderr, "failed to install signal handler\n");
+ abort();
+ }
+
+ // Spawn a thread that will just loop and get the mutex lock:
+ pthread_t thread;
+ pthread_create(&thread, NULL, ThreadFunc, NULL);
+
+ // Lock the mutex before sending the signal
+ std::lock_guard<std::mutex> guard(sampler_mutex);
+ // From now on thread 1 will be waiting for the lock
+
+ // Send the SIGPROF signal to thread.
+ int r = pthread_kill(thread, SIGPROF);
+ assert(r == 0);
+
+ // Wait until signal handler sends the data.
+ std::unique_lock lk(done_mutex);
+ cv.wait(lk, [] { return done; });
+
+ // We got the done variable from the signal handler. Exiting successfully.
+ fprintf(stderr, "PASS\n");
+}
+
+// CHECK-NOT: WARNING: ThreadSanitizer:
+// CHECK: PASS
diff --git a/compiler-rt/test/tsan/signal_reset.cpp b/compiler-rt/test/tsan/signal_reset.cpp
index d76b7e5f3b5f..82758d882382 100644
--- a/compiler-rt/test/tsan/signal_reset.cpp
+++ b/compiler-rt/test/tsan/signal_reset.cpp
@@ -28,12 +28,12 @@ static void* reset(void *p) {
struct sigaction act = {};
for (int i = 0; i < 1000000; i++) {
act.sa_handler = &handler;
- if (sigaction(SIGALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
act.sa_handler = SIG_IGN;
- if (sigaction(SIGALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
@@ -44,7 +44,7 @@ static void* reset(void *p) {
int main() {
struct sigaction act = {};
act.sa_handler = SIG_IGN;
- if (sigaction(SIGALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
@@ -53,7 +53,7 @@ int main() {
t.it_value.tv_sec = 0;
t.it_value.tv_usec = 10;
t.it_interval = t.it_value;
- if (setitimer(ITIMER_REAL, &t, 0)) {
+ if (setitimer(ITIMER_PROF, &t, 0)) {
perror("setitimer");
exit(1);
}
diff --git a/compiler-rt/test/tsan/signal_sync.cpp b/compiler-rt/test/tsan/signal_sync.cpp
index 878b3f3b88b9..b529a1859f52 100644
--- a/compiler-rt/test/tsan/signal_sync.cpp
+++ b/compiler-rt/test/tsan/signal_sync.cpp
@@ -30,7 +30,7 @@ int main() {
struct sigaction act = {};
act.sa_handler = &handler;
- if (sigaction(SIGVTALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
@@ -39,7 +39,7 @@ int main() {
t.it_value.tv_sec = 0;
t.it_value.tv_usec = 10;
t.it_interval = t.it_value;
- if (setitimer(ITIMER_VIRTUAL, &t, 0)) {
+ if (setitimer(ITIMER_PROF, &t, 0)) {
perror("setitimer");
exit(1);
}
diff --git a/compiler-rt/test/tsan/signal_thread.cpp b/compiler-rt/test/tsan/signal_thread.cpp
index 7bba8159bf38..aa91d1ddeb10 100644
--- a/compiler-rt/test/tsan/signal_thread.cpp
+++ b/compiler-rt/test/tsan/signal_thread.cpp
@@ -24,7 +24,7 @@ static void* thr(void *p) {
int main() {
struct sigaction act = {};
act.sa_handler = &handler;
- if (sigaction(SIGVTALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
@@ -33,7 +33,7 @@ int main() {
t.it_value.tv_sec = 0;
t.it_value.tv_usec = 10;
t.it_interval = t.it_value;
- if (setitimer(ITIMER_VIRTUAL, &t, 0)) {
+ if (setitimer(ITIMER_PROF, &t, 0)) {
perror("setitimer");
exit(1);
}
diff --git a/compiler-rt/test/tsan/signal_thread2.cpp b/compiler-rt/test/tsan/signal_thread2.cpp
index 5236628e13b6..9bde4f70b39d 100644
--- a/compiler-rt/test/tsan/signal_thread2.cpp
+++ b/compiler-rt/test/tsan/signal_thread2.cpp
@@ -40,7 +40,7 @@ static void *thr(void *p) {
int main() {
struct sigaction act = {};
act.sa_handler = &handler;
- if (sigaction(SIGALRM, &act, 0)) {
+ if (sigaction(SIGPROF, &act, 0)) {
perror("sigaction");
exit(1);
}
@@ -49,7 +49,7 @@ int main() {
t.it_value.tv_sec = 0;
t.it_value.tv_usec = 10;
t.it_interval = t.it_value;
- if (setitimer(ITIMER_REAL, &t, 0)) {
+ if (setitimer(ITIMER_PROF, &t, 0)) {
perror("setitimer");
exit(1);
}
diff --git a/flang/include/flang/Common/Version.h b/flang/include/flang/Common/Version.h
index b1bd2416a618..3257d4a4f645 100644
--- a/flang/include/flang/Common/Version.h
+++ b/flang/include/flang/Common/Version.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_FLANG_COMMON_VERSION_H
-#define LLVM_FLANG_COMMON_VERSION_H
+#ifndef FORTRAN_COMMON_VERSION_H
+#define FORTRAN_COMMON_VERSION_H
#include "flang/Version.inc"
#include "llvm/ADT/StringRef.h"
@@ -53,4 +53,4 @@ std::string getFlangFullVersion();
std::string getFlangToolFullVersion(llvm::StringRef ToolName);
} // namespace Fortran::common
-#endif // LLVM_FLANG_COMMON_VERSION_H
+#endif // FORTRAN_COMMON_VERSION_H
diff --git a/flang/include/flang/Runtime/api-attrs.h b/flang/include/flang/Common/api-attrs.h
index fc3eb42e1b73..4d069c6097dd 100644
--- a/flang/include/flang/Runtime/api-attrs.h
+++ b/flang/include/flang/Common/api-attrs.h
@@ -1,4 +1,4 @@
-/*===-- include/flang/Runtime/api-attrs.h ---------------------------*- C -*-=//
+/*===-- include/flang/Common/api-attrs.h ---------------------------*- C -*-=//
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
@@ -102,7 +102,7 @@
* to appear as part of a C++ decl-specifier.
*/
#ifndef RT_CONST_VAR_ATTRS
-#if defined(__CUDACC__) || defined(__CUDA__)
+#if (defined(__CUDACC__) || defined(__CUDA__)) && defined(__CUDA_ARCH__)
#define RT_CONST_VAR_ATTRS __constant__
#else
#define RT_CONST_VAR_ATTRS
diff --git a/flang/include/flang/Common/idioms.h b/flang/include/flang/Common/idioms.h
index f6c9cbbc0f7c..99f383ec75b9 100644
--- a/flang/include/flang/Common/idioms.h
+++ b/flang/include/flang/Common/idioms.h
@@ -24,6 +24,7 @@
#endif
#include "enum-class.h"
+#include "variant.h"
#include "visit.h"
#include <array>
#include <functional>
@@ -33,7 +34,6 @@
#include <string>
#include <tuple>
#include <type_traits>
-#include <variant>
#if __GNUC__ == 7
// Avoid a deduction bug in GNU 7.x headers by forcing the answer.
@@ -87,6 +87,8 @@ template <typename... LAMBDAS> visitors(LAMBDAS... x) -> visitors<LAMBDAS...>;
// To disable, compile with '-DCHECK=(void)'
#ifndef CHECK
#define CHECK(x) ((x) || (DIE("CHECK(" #x ") failed"), false))
+// Same as above, but with a custom error message.
+#define CHECK_MSG(x, y) ((x) || (DIE("CHECK(" #x ") failed: " #y), false))
#endif
// User-defined type traits that default to false:
diff --git a/flang/include/flang/Common/optional.h b/flang/include/flang/Common/optional.h
index b5623b84dbd3..c0f4278009f4 100644
--- a/flang/include/flang/Common/optional.h
+++ b/flang/include/flang/Common/optional.h
@@ -26,7 +26,7 @@
#ifndef FORTRAN_COMMON_OPTIONAL_H
#define FORTRAN_COMMON_OPTIONAL_H
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#include <optional>
#include <type_traits>
diff --git a/flang/include/flang/Common/real.h b/flang/include/flang/Common/real.h
index 50aab7d89a59..49c400b368a2 100644
--- a/flang/include/flang/Common/real.h
+++ b/flang/include/flang/Common/real.h
@@ -13,6 +13,7 @@
// The various representations are distinguished by their binary precisions
// (number of explicit significand bits and any implicit MSB in the fraction).
+#include "flang/Common/api-attrs.h"
#include <cinttypes>
namespace Fortran::common {
@@ -119,6 +120,7 @@ private:
}
public:
+ RT_OFFLOAD_VAR_GROUP_BEGIN
static constexpr int binaryPrecision{BINARY_PRECISION};
static constexpr int bits{BitsForBinaryPrecision(binaryPrecision)};
static constexpr bool isImplicitMSB{binaryPrecision != 64 /*x87*/};
@@ -138,6 +140,7 @@ public:
static constexpr int maxHexadecimalConversionDigits{
MaxHexadecimalConversionDigits(binaryPrecision)};
+ RT_OFFLOAD_VAR_GROUP_END
static_assert(binaryPrecision > 0);
static_assert(exponentBits > 1);
diff --git a/flang/include/flang/Common/reference-wrapper.h b/flang/include/flang/Common/reference-wrapper.h
index 66f924662d96..2983754108f9 100644
--- a/flang/include/flang/Common/reference-wrapper.h
+++ b/flang/include/flang/Common/reference-wrapper.h
@@ -25,7 +25,7 @@
#ifndef FORTRAN_COMMON_REFERENCE_WRAPPER_H
#define FORTRAN_COMMON_REFERENCE_WRAPPER_H
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#include <functional>
#include <type_traits>
diff --git a/flang/include/flang/Common/restorer.h b/flang/include/flang/Common/restorer.h
index 4d5f5e4e2c81..0f1bc48620d3 100644
--- a/flang/include/flang/Common/restorer.h
+++ b/flang/include/flang/Common/restorer.h
@@ -19,11 +19,13 @@
#ifndef FORTRAN_COMMON_RESTORER_H_
#define FORTRAN_COMMON_RESTORER_H_
#include "idioms.h"
+#include "flang/Common/api-attrs.h"
namespace Fortran::common {
template <typename A> class Restorer {
public:
- explicit Restorer(A &p, A original) : p_{p}, original_{std::move(original)} {}
- ~Restorer() { p_ = std::move(original_); }
+ explicit RT_API_ATTRS Restorer(A &p, A original)
+ : p_{p}, original_{std::move(original)} {}
+ RT_API_ATTRS ~Restorer() { p_ = std::move(original_); }
// Inhibit any recreation of this restorer that would result in two restorers
// trying to restore the same reference.
@@ -38,13 +40,14 @@ private:
};
template <typename A, typename B>
-common::IfNoLvalue<Restorer<A>, B> ScopedSet(A &to, B &&from) {
+RT_API_ATTRS common::IfNoLvalue<Restorer<A>, B> ScopedSet(A &to, B &&from) {
A original{std::move(to)};
to = std::move(from);
return Restorer<A>{to, std::move(original)};
}
template <typename A, typename B>
-common::IfNoLvalue<Restorer<A>, B> ScopedSet(A &to, const B &from) {
+RT_API_ATTRS common::IfNoLvalue<Restorer<A>, B> ScopedSet(
+ A &to, const B &from) {
A original{std::move(to)};
to = from;
return Restorer<A>{to, std::move(original)};
diff --git a/flang/include/flang/Common/template.h b/flang/include/flang/Common/template.h
index 2ab3b8bce1df..51d09fb42ce3 100644
--- a/flang/include/flang/Common/template.h
+++ b/flang/include/flang/Common/template.h
@@ -9,12 +9,12 @@
#ifndef FORTRAN_COMMON_TEMPLATE_H_
#define FORTRAN_COMMON_TEMPLATE_H_
+#include "variant.h"
#include "flang/Common/idioms.h"
#include <functional>
#include <optional>
#include <tuple>
#include <type_traits>
-#include <variant>
#include <vector>
// Utility templates for metaprogramming and for composing the
diff --git a/flang/include/flang/Common/uint128.h b/flang/include/flang/Common/uint128.h
index 03e44eb6997d..821c8c3b08a5 100644
--- a/flang/include/flang/Common/uint128.h
+++ b/flang/include/flang/Common/uint128.h
@@ -20,6 +20,7 @@
#endif
#include "leading-zero-bit-count.h"
+#include "flang/Common/api-attrs.h"
#include <cstdint>
#include <type_traits>
@@ -260,7 +261,9 @@ private:
return LeadingZeroBitCount(high_);
}
}
+ RT_VAR_GROUP_BEGIN
static constexpr std::uint64_t topBit{std::uint64_t{1} << 63};
+ RT_VAR_GROUP_END
#if FLANG_LITTLE_ENDIAN
std::uint64_t low_{0}, high_{0};
#elif FLANG_BIG_ENDIAN
diff --git a/flang/include/flang/Common/unwrap.h b/flang/include/flang/Common/unwrap.h
index edb343d77b53..84582174e4b3 100644
--- a/flang/include/flang/Common/unwrap.h
+++ b/flang/include/flang/Common/unwrap.h
@@ -12,11 +12,11 @@
#include "indirection.h"
#include "reference-counted.h"
#include "reference.h"
+#include "variant.h"
#include "visit.h"
#include <memory>
#include <optional>
#include <type_traits>
-#include <variant>
// Given a nest of variants, optionals, &/or pointers, Unwrap<>() isolates
// a packaged value of a specific type if it is present and returns a pointer
diff --git a/flang/include/flang/Common/variant.h b/flang/include/flang/Common/variant.h
new file mode 100644
index 000000000000..1af85876afac
--- /dev/null
+++ b/flang/include/flang/Common/variant.h
@@ -0,0 +1,30 @@
+//===-- include/flang/Common/variant.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// A single way to expose C++ variant class in files that can be used
+// in F18 runtime build. With inclusion of this file std::variant
+// and the related names become available, though, they may correspond
+// to alternative definitions (e.g. from cuda::std namespace).
+
+#ifndef FORTRAN_COMMON_VARIANT_H
+#define FORTRAN_COMMON_VARIANT_H
+
+#if RT_USE_LIBCUDACXX
+#include <cuda/std/variant>
+namespace std {
+using cuda::std::get;
+using cuda::std::monostate;
+using cuda::std::variant;
+using cuda::std::variant_size_v;
+using cuda::std::visit;
+} // namespace std
+#else // !RT_USE_LIBCUDACXX
+#include <variant>
+#endif // !RT_USE_LIBCUDACXX
+
+#endif // FORTRAN_COMMON_VARIANT_H
diff --git a/flang/include/flang/Common/visit.h b/flang/include/flang/Common/visit.h
index f733b726189c..4d0897301e01 100644
--- a/flang/include/flang/Common/visit.h
+++ b/flang/include/flang/Common/visit.h
@@ -21,15 +21,16 @@
#ifndef FORTRAN_COMMON_VISIT_H_
#define FORTRAN_COMMON_VISIT_H_
+#include "variant.h"
+#include "flang/Common/api-attrs.h"
#include <type_traits>
-#include <variant>
namespace Fortran::common {
namespace log2visit {
template <std::size_t LOW, std::size_t HIGH, typename RESULT, typename VISITOR,
typename... VARIANT>
-inline RESULT Log2VisitHelper(
+inline RT_API_ATTRS RESULT Log2VisitHelper(
VISITOR &&visitor, std::size_t which, VARIANT &&...u) {
if constexpr (LOW + 7 >= HIGH) {
switch (which - LOW) {
@@ -61,7 +62,7 @@ inline RESULT Log2VisitHelper(
}
template <typename VISITOR, typename... VARIANT>
-inline auto visit(VISITOR &&visitor, VARIANT &&...u)
+inline RT_API_ATTRS auto visit(VISITOR &&visitor, VARIANT &&...u)
-> decltype(visitor(std::get<0>(std::forward<VARIANT>(u))...)) {
using Result = decltype(visitor(std::get<0>(std::forward<VARIANT>(u))...));
if constexpr (sizeof...(u) == 1) {
diff --git a/flang/include/flang/Decimal/binary-floating-point.h b/flang/include/flang/Decimal/binary-floating-point.h
index d1992819f85a..4919c1f9d240 100644
--- a/flang/include/flang/Decimal/binary-floating-point.h
+++ b/flang/include/flang/Decimal/binary-floating-point.h
@@ -12,6 +12,7 @@
// Access and manipulate the fields of an IEEE-754 binary
// floating-point value via a generalized template.
+#include "flang/Common/api-attrs.h"
#include "flang/Common/real.h"
#include "flang/Common/uint128.h"
#include <cinttypes>
@@ -47,9 +48,11 @@ public:
using RawType = common::HostUnsignedIntType<bits>;
static_assert(CHAR_BIT * sizeof(RawType) >= bits);
+ RT_OFFLOAD_VAR_GROUP_BEGIN
static constexpr RawType significandMask{(RawType{1} << significandBits) - 1};
- constexpr BinaryFloatingPointNumber() {} // zero
+ constexpr RT_API_ATTRS BinaryFloatingPointNumber() {} // zero
+ RT_OFFLOAD_VAR_GROUP_END
constexpr BinaryFloatingPointNumber(
const BinaryFloatingPointNumber &that) = default;
constexpr BinaryFloatingPointNumber(
@@ -58,26 +61,30 @@ public:
const BinaryFloatingPointNumber &that) = default;
constexpr BinaryFloatingPointNumber &operator=(
BinaryFloatingPointNumber &&that) = default;
- constexpr explicit BinaryFloatingPointNumber(RawType raw) : raw_{raw} {}
+ constexpr explicit RT_API_ATTRS BinaryFloatingPointNumber(RawType raw)
+ : raw_{raw} {}
- RawType raw() const { return raw_; }
+ RT_API_ATTRS RawType raw() const { return raw_; }
- template <typename A> explicit constexpr BinaryFloatingPointNumber(A x) {
+ template <typename A>
+ explicit constexpr RT_API_ATTRS BinaryFloatingPointNumber(A x) {
static_assert(sizeof raw_ <= sizeof x);
std::memcpy(reinterpret_cast<void *>(&raw_),
reinterpret_cast<const void *>(&x), sizeof raw_);
}
- constexpr int BiasedExponent() const {
+ constexpr RT_API_ATTRS int BiasedExponent() const {
return static_cast<int>(
(raw_ >> significandBits) & ((1 << exponentBits) - 1));
}
- constexpr int UnbiasedExponent() const {
+ constexpr RT_API_ATTRS int UnbiasedExponent() const {
int biased{BiasedExponent()};
return biased - exponentBias + (biased == 0);
}
- constexpr RawType Significand() const { return raw_ & significandMask; }
- constexpr RawType Fraction() const {
+ constexpr RT_API_ATTRS RawType Significand() const {
+ return raw_ & significandMask;
+ }
+ constexpr RT_API_ATTRS RawType Fraction() const {
RawType sig{Significand()};
if (isImplicitMSB && BiasedExponent() > 0) {
sig |= RawType{1} << significandBits;
@@ -85,10 +92,10 @@ public:
return sig;
}
- constexpr bool IsZero() const {
+ constexpr RT_API_ATTRS bool IsZero() const {
return (raw_ & ((RawType{1} << (bits - 1)) - 1)) == 0;
}
- constexpr bool IsNaN() const {
+ constexpr RT_API_ATTRS bool IsNaN() const {
auto expo{BiasedExponent()};
auto sig{Significand()};
if constexpr (bits == 80) { // x87
@@ -102,7 +109,7 @@ public:
return expo == maxExponent && sig != 0;
}
}
- constexpr bool IsInfinite() const {
+ constexpr RT_API_ATTRS bool IsInfinite() const {
if constexpr (bits == 80) { // x87
return BiasedExponent() == maxExponent &&
Significand() == ((significandMask >> 1) + 1);
@@ -110,27 +117,30 @@ public:
return BiasedExponent() == maxExponent && Significand() == 0;
}
}
- constexpr bool IsMaximalFiniteMagnitude() const {
+ constexpr RT_API_ATTRS bool IsMaximalFiniteMagnitude() const {
return BiasedExponent() == maxExponent - 1 &&
Significand() == significandMask;
}
- constexpr bool IsNegative() const { return ((raw_ >> (bits - 1)) & 1) != 0; }
+ constexpr RT_API_ATTRS bool IsNegative() const {
+ return ((raw_ >> (bits - 1)) & 1) != 0;
+ }
- constexpr void Negate() { raw_ ^= RawType{1} << (bits - 1); }
+ constexpr RT_API_ATTRS void Negate() { raw_ ^= RawType{1} << (bits - 1); }
// For calculating the nearest neighbors of a floating-point value
- constexpr void Previous() {
+ constexpr RT_API_ATTRS void Previous() {
RemoveExplicitMSB();
--raw_;
InsertExplicitMSB();
}
- constexpr void Next() {
+ constexpr RT_API_ATTRS void Next() {
RemoveExplicitMSB();
++raw_;
InsertExplicitMSB();
}
- static constexpr BinaryFloatingPointNumber Infinity(bool isNegative) {
+ static constexpr RT_API_ATTRS BinaryFloatingPointNumber Infinity(
+ bool isNegative) {
RawType result{RawType{maxExponent} << significandBits};
if (isNegative) {
result |= RawType{1} << (bits - 1);
@@ -139,7 +149,8 @@ public:
}
// Returns true when the result is exact
- constexpr bool RoundToBits(int keepBits, enum FortranRounding mode) {
+ constexpr RT_API_ATTRS bool RoundToBits(
+ int keepBits, enum FortranRounding mode) {
if (IsNaN() || IsInfinite() || keepBits >= binaryPrecision) {
return true;
}
@@ -180,12 +191,12 @@ public:
}
private:
- constexpr void RemoveExplicitMSB() {
+ constexpr RT_API_ATTRS void RemoveExplicitMSB() {
if constexpr (!isImplicitMSB) {
raw_ = (raw_ & (significandMask >> 1)) | ((raw_ & ~significandMask) >> 1);
}
}
- constexpr void InsertExplicitMSB() {
+ constexpr RT_API_ATTRS void InsertExplicitMSB() {
if constexpr (!isImplicitMSB) {
constexpr RawType mask{significandMask >> 1};
raw_ = (raw_ & mask) | ((raw_ & ~mask) << 1);
diff --git a/flang/include/flang/Decimal/decimal.h b/flang/include/flang/Decimal/decimal.h
index f0997fb63df0..443163d058e2 100644
--- a/flang/include/flang/Decimal/decimal.h
+++ b/flang/include/flang/Decimal/decimal.h
@@ -12,6 +12,7 @@
#ifndef FORTRAN_DECIMAL_DECIMAL_H_
#define FORTRAN_DECIMAL_DECIMAL_H_
+#include "flang/Common/api-attrs.h"
#include <stddef.h>
#ifdef __cplusplus
@@ -65,27 +66,27 @@ enum DecimalConversionFlags {
#ifdef __cplusplus
template <int PREC>
-ConversionToDecimalResult ConvertToDecimal(char *, size_t,
+RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal(char *, size_t,
DecimalConversionFlags, int digits, enum FortranRounding rounding,
BinaryFloatingPointNumber<PREC> x);
-extern template ConversionToDecimalResult ConvertToDecimal<8>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<8>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<8>);
-extern template ConversionToDecimalResult ConvertToDecimal<11>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<11>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<11>);
-extern template ConversionToDecimalResult ConvertToDecimal<24>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<24>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<24>);
-extern template ConversionToDecimalResult ConvertToDecimal<53>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<53>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<53>);
-extern template ConversionToDecimalResult ConvertToDecimal<64>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<64>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<64>);
-extern template ConversionToDecimalResult ConvertToDecimal<113>(char *, size_t,
- enum DecimalConversionFlags, int, enum FortranRounding,
+extern template RT_API_ATTRS ConversionToDecimalResult ConvertToDecimal<113>(
+ char *, size_t, enum DecimalConversionFlags, int, enum FortranRounding,
BinaryFloatingPointNumber<113>);
template <int PREC> struct ConversionToBinaryResult {
@@ -94,20 +95,20 @@ template <int PREC> struct ConversionToBinaryResult {
};
template <int PREC>
-ConversionToBinaryResult<PREC> ConvertToBinary(const char *&,
+RT_API_ATTRS ConversionToBinaryResult<PREC> ConvertToBinary(const char *&,
enum FortranRounding = RoundNearest, const char *end = nullptr);
-extern template ConversionToBinaryResult<8> ConvertToBinary<8>(
+extern template RT_API_ATTRS ConversionToBinaryResult<8> ConvertToBinary<8>(
const char *&, enum FortranRounding, const char *end);
-extern template ConversionToBinaryResult<11> ConvertToBinary<11>(
+extern template RT_API_ATTRS ConversionToBinaryResult<11> ConvertToBinary<11>(
const char *&, enum FortranRounding, const char *end);
-extern template ConversionToBinaryResult<24> ConvertToBinary<24>(
+extern template RT_API_ATTRS ConversionToBinaryResult<24> ConvertToBinary<24>(
const char *&, enum FortranRounding, const char *end);
-extern template ConversionToBinaryResult<53> ConvertToBinary<53>(
+extern template RT_API_ATTRS ConversionToBinaryResult<53> ConvertToBinary<53>(
const char *&, enum FortranRounding, const char *end);
-extern template ConversionToBinaryResult<64> ConvertToBinary<64>(
+extern template RT_API_ATTRS ConversionToBinaryResult<64> ConvertToBinary<64>(
const char *&, enum FortranRounding, const char *end);
-extern template ConversionToBinaryResult<113> ConvertToBinary<113>(
+extern template RT_API_ATTRS ConversionToBinaryResult<113> ConvertToBinary<113>(
const char *&, enum FortranRounding, const char *end);
} // namespace Fortran::decimal
extern "C" {
@@ -116,21 +117,21 @@ extern "C" {
#define NS(x) x
#endif /* C++ */
-struct NS(ConversionToDecimalResult)
+RT_API_ATTRS struct NS(ConversionToDecimalResult)
ConvertFloatToDecimal(char *, size_t, enum NS(DecimalConversionFlags),
int digits, enum NS(FortranRounding), float);
-struct NS(ConversionToDecimalResult)
+RT_API_ATTRS struct NS(ConversionToDecimalResult)
ConvertDoubleToDecimal(char *, size_t, enum NS(DecimalConversionFlags),
int digits, enum NS(FortranRounding), double);
-struct NS(ConversionToDecimalResult)
+RT_API_ATTRS struct NS(ConversionToDecimalResult)
ConvertLongDoubleToDecimal(char *, size_t, enum NS(DecimalConversionFlags),
int digits, enum NS(FortranRounding), long double);
-enum NS(ConversionResultFlags)
+RT_API_ATTRS enum NS(ConversionResultFlags)
ConvertDecimalToFloat(const char **, float *, enum NS(FortranRounding));
-enum NS(ConversionResultFlags)
+RT_API_ATTRS enum NS(ConversionResultFlags)
ConvertDecimalToDouble(const char **, double *, enum NS(FortranRounding));
-enum NS(ConversionResultFlags) ConvertDecimalToLongDouble(
+RT_API_ATTRS enum NS(ConversionResultFlags) ConvertDecimalToLongDouble(
const char **, long double *, enum NS(FortranRounding));
#undef NS
#ifdef __cplusplus
diff --git a/flang/include/flang/Frontend/CodeGenOptions.h b/flang/include/flang/Frontend/CodeGenOptions.h
index 0c318e4023af..b0bbace82c04 100644
--- a/flang/include/flang/Frontend/CodeGenOptions.h
+++ b/flang/include/flang/Frontend/CodeGenOptions.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_CODEGENOPTIONS_H
-#define LLVM_CLANG_BASIC_CODEGENOPTIONS_H
+#ifndef FORTRAN_FRONTEND_CODEGENOPTIONS_H
+#define FORTRAN_FRONTEND_CODEGENOPTIONS_H
#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Frontend/Driver/CodeGenOptions.h"
@@ -141,4 +141,4 @@ public:
} // end namespace Fortran::frontend
-#endif
+#endif // FORTRAN_FRONTEND_CODEGENOPTIONS_H
diff --git a/flang/include/flang/Frontend/LangOptions.h b/flang/include/flang/Frontend/LangOptions.h
index 7adf2eec9ca3..7ab219581886 100644
--- a/flang/include/flang/Frontend/LangOptions.h
+++ b/flang/include/flang/Frontend/LangOptions.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_FLANG_FRONTEND_LANGOPTIONS_H
-#define LLVM_FLANG_FRONTEND_LANGOPTIONS_H
+#ifndef FORTRAN_FRONTEND_LANGOPTIONS_H
+#define FORTRAN_FRONTEND_LANGOPTIONS_H
#include <string>
@@ -63,4 +63,4 @@ public:
} // end namespace Fortran::frontend
-#endif
+#endif // FORTRAN_FRONTEND_LANGOPTIONS_H
diff --git a/flang/include/flang/ISO_Fortran_binding_wrapper.h b/flang/include/flang/ISO_Fortran_binding_wrapper.h
index 83c974365e34..37289bdbabd0 100644
--- a/flang/include/flang/ISO_Fortran_binding_wrapper.h
+++ b/flang/include/flang/ISO_Fortran_binding_wrapper.h
@@ -13,7 +13,7 @@
/* A thin wrapper around flang/include/ISO_Fortran_binding.h
* This header file must be included when ISO_Fortran_binding.h
* definitions/declarations are needed in Flang compiler/runtime
- * sources. The inclusion of Runtime/api-attrs.h below sets up
+ * sources. The inclusion of Common/api-attrs.h below sets up
* proper values for the macros used in ISO_Fortran_binding.h
* for the device offload builds.
* flang/include/ISO_Fortran_binding.h is made a standalone
@@ -23,7 +23,7 @@
/* clang-format off */
#include <stddef.h>
-#include "Runtime/api-attrs.h"
+#include "Common/api-attrs.h"
#ifdef __cplusplus
namespace Fortran {
namespace ISO {
diff --git a/flang/include/flang/Lower/ConvertVariable.h b/flang/include/flang/Lower/ConvertVariable.h
index ab30e317d1d9..d70d3268acac 100644
--- a/flang/include/flang/Lower/ConvertVariable.h
+++ b/flang/include/flang/Lower/ConvertVariable.h
@@ -161,9 +161,9 @@ void genDeclareSymbol(Fortran::lower::AbstractConverter &converter,
fir::FortranVariableFlagsEnum::None,
bool force = false);
-/// For the given Cray pointee symbol return the corresponding
-/// Cray pointer symbol. Assert if the pointer symbol cannot be found.
-Fortran::semantics::SymbolRef getCrayPointer(Fortran::semantics::SymbolRef sym);
+/// Given the Fortran type of a Cray pointee, return the fir.box type used to
+/// track the cray pointee as Fortran pointer.
+mlir::Type getCrayPointeeBoxType(mlir::Type);
} // namespace lower
} // namespace Fortran
diff --git a/flang/include/flang/Lower/OpenMP.h b/flang/include/flang/Lower/OpenMP.h
index 3b22a652d1fc..6e150ef4e8e8 100644
--- a/flang/include/flang/Lower/OpenMP.h
+++ b/flang/include/flang/Lower/OpenMP.h
@@ -19,7 +19,6 @@
#include <utility>
namespace mlir {
-class Value;
class Operation;
class Location;
namespace omp {
@@ -30,7 +29,6 @@ enum class DeclareTargetCaptureClause : uint32_t;
namespace fir {
class FirOpBuilder;
-class ConvertOp;
} // namespace fir
namespace Fortran {
@@ -84,16 +82,6 @@ void genOpenMPSymbolProperties(AbstractConverter &converter,
int64_t getCollapseValue(const Fortran::parser::OmpClauseList &clauseList);
void genThreadprivateOp(AbstractConverter &, const pft::Variable &);
void genDeclareTargetIntGlobal(AbstractConverter &, const pft::Variable &);
-void genOpenMPReduction(AbstractConverter &,
- Fortran::semantics::SemanticsContext &,
- const Fortran::parser::OmpClauseList &clauseList);
-
-mlir::Operation *findReductionChain(mlir::Value, mlir::Value * = nullptr);
-fir::ConvertOp getConvertFromReductionOp(mlir::Operation *, mlir::Value);
-void updateReduction(mlir::Operation *, fir::FirOpBuilder &, mlir::Value,
- mlir::Value, fir::ConvertOp * = nullptr);
-void removeStoreOp(mlir::Operation *, mlir::Value);
-
bool isOpenMPTargetConstruct(const parser::OpenMPConstruct &);
bool isOpenMPDeviceDeclareTarget(Fortran::lower::AbstractConverter &,
Fortran::semantics::SemanticsContext &,
diff --git a/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
new file mode 100644
index 000000000000..06a44f188565
--- /dev/null
+++ b/flang/include/flang/Optimizer/CodeGen/FIROpPatterns.h
@@ -0,0 +1,248 @@
+//===-- FIROpPatterns.h -- FIR operation conversion patterns ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FORTRAN_OPTIMIZER_CODEGEN_FIROPPATTERNS_H
+#define FORTRAN_OPTIMIZER_CODEGEN_FIROPPATTERNS_H
+
+#include "flang/Optimizer/CodeGen/TypeConverter.h"
+#include "mlir/Conversion/LLVMCommon/Pattern.h"
+#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+
+namespace fir {
+
+struct FIRToLLVMPassOptions;
+
+static constexpr unsigned defaultAddressSpace = 0u;
+
+class ConvertFIRToLLVMPattern : public mlir::ConvertToLLVMPattern {
+public:
+ ConvertFIRToLLVMPattern(llvm::StringRef rootOpName,
+ mlir::MLIRContext *context,
+ const fir::LLVMTypeConverter &typeConverter,
+ const fir::FIRToLLVMPassOptions &options,
+ mlir::PatternBenefit benefit = 1);
+
+protected:
+ mlir::Type convertType(mlir::Type ty) const {
+ return lowerTy().convertType(ty);
+ }
+
+ // Convert FIR type to LLVM without turning fir.box<T> into memory
+ // reference.
+ mlir::Type convertObjectType(mlir::Type firType) const;
+
+ mlir::LLVM::ConstantOp
+ genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+ int value) const;
+
+ mlir::LLVM::ConstantOp
+ genConstantOffset(mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter,
+ int offset) const;
+
+ /// Perform an extension or truncation as needed on an integer value. Lowering
+ /// to the specific target may involve some sign-extending or truncation of
+ /// values, particularly to fit them from abstract box types to the
+ /// appropriate reified structures.
+ mlir::Value integerCast(mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Type ty, mlir::Value val) const;
+ struct TypePair {
+ mlir::Type fir;
+ mlir::Type llvm;
+ };
+
+ TypePair getBoxTypePair(mlir::Type firBoxTy) const;
+
+ /// Construct code sequence to extract the specific value from a `fir.box`.
+ mlir::Value getValueFromBox(mlir::Location loc, TypePair boxTy,
+ mlir::Value box, mlir::Type resultTy,
+ mlir::ConversionPatternRewriter &rewriter,
+ int boxValue) const;
+
+ /// Method to construct code sequence to get the triple for dimension `dim`
+ /// from a box.
+ llvm::SmallVector<mlir::Value, 3>
+ getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
+ TypePair boxTy, mlir::Value box, mlir::Value dim,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ llvm::SmallVector<mlir::Value, 3>
+ getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
+ TypePair boxTy, mlir::Value box, int dim,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ mlir::Value
+ loadDimFieldFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::Value dim, int off, mlir::Type ty,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ mlir::Value
+ getDimFieldFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
+ int dim, int off, mlir::Type ty,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ mlir::Value getStrideFromBox(mlir::Location loc, TypePair boxTy,
+ mlir::Value box, unsigned dim,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ /// Read base address from a fir.box. Returned address has type ty.
+ mlir::Value
+ getBaseAddrFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ mlir::Value
+ getElementSizeFromBox(mlir::Location loc, mlir::Type resultTy, TypePair boxTy,
+ mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ // Get the element type given an LLVM type that is of the form
+ // (array|struct|vector)+ and the provided indexes.
+ mlir::Type getBoxEleTy(mlir::Type type,
+ llvm::ArrayRef<std::int64_t> indexes) const;
+
+ // Return LLVM type of the object described by a fir.box of \p boxType.
+ mlir::Type getLlvmObjectTypeFromBoxType(mlir::Type boxType) const;
+
+ /// Read the address of the type descriptor from a box.
+ mlir::Value
+ loadTypeDescAddress(mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ // Load the attribute from the \p box and perform a check against \p maskValue
+ // The final comparison is implemented as `(attribute & maskValue) != 0`.
+ mlir::Value genBoxAttributeCheck(mlir::Location loc, TypePair boxTy,
+ mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter,
+ unsigned maskValue) const;
+
+ template <typename... ARGS>
+ mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Value base, ARGS... args) const {
+ llvm::SmallVector<mlir::LLVM::GEPArg> cv = {args...};
+ auto llvmPtrTy =
+ mlir::LLVM::LLVMPointerType::get(ty.getContext(), /*addressSpace=*/0);
+ return rewriter.create<mlir::LLVM::GEPOp>(loc, llvmPtrTy, ty, base, cv);
+ }
+
+ // Find the Block in which the alloca should be inserted.
+ // The order to recursively find the proper block:
+ // 1. An OpenMP Op that will be outlined.
+ // 2. A LLVMFuncOp
+ // 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
+ mlir::Block *getBlockForAllocaInsert(mlir::Operation *op) const;
+
+ // Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
+ // allocation address space provided for the architecture in the DataLayout
+ // specification. If the address space is different from the devices
+ // program address space we perform a cast. In the case of most architectures
+ // the program and allocation address space will be the default of 0 and no
+ // cast will be emitted.
+ mlir::Value
+ genAllocaAndAddrCastWithType(mlir::Location loc, mlir::Type llvmObjectTy,
+ unsigned alignment,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
+ const fir::LLVMTypeConverter &lowerTy() const {
+ return *static_cast<const fir::LLVMTypeConverter *>(
+ this->getTypeConverter());
+ }
+
+ void attachTBAATag(mlir::LLVM::AliasAnalysisOpInterface op,
+ mlir::Type baseFIRType, mlir::Type accessFIRType,
+ mlir::LLVM::GEPOp gep) const {
+ lowerTy().attachTBAATag(op, baseFIRType, accessFIRType, gep);
+ }
+
+ unsigned
+ getAllocaAddressSpace(mlir::ConversionPatternRewriter &rewriter) const;
+
+ unsigned
+ getProgramAddressSpace(mlir::ConversionPatternRewriter &rewriter) const;
+
+ const fir::FIRToLLVMPassOptions &options;
+
+ using ConvertToLLVMPattern::match;
+ using ConvertToLLVMPattern::matchAndRewrite;
+};
+
+template <typename SourceOp>
+class FIROpConversion : public ConvertFIRToLLVMPattern {
+public:
+ using OpAdaptor = typename SourceOp::Adaptor;
+
+ explicit FIROpConversion(const LLVMTypeConverter &typeConverter,
+ const fir::FIRToLLVMPassOptions &options,
+ mlir::PatternBenefit benefit = 1)
+ : ConvertFIRToLLVMPattern(SourceOp::getOperationName(),
+ &typeConverter.getContext(), typeConverter,
+ options, benefit) {}
+
+ /// Wrappers around the RewritePattern methods that pass the derived op type.
+ void rewrite(mlir::Operation *op, mlir::ArrayRef<mlir::Value> operands,
+ mlir::ConversionPatternRewriter &rewriter) const final {
+ rewrite(mlir::cast<SourceOp>(op),
+ OpAdaptor(operands, mlir::cast<SourceOp>(op)), rewriter);
+ }
+ mlir::LogicalResult match(mlir::Operation *op) const final {
+ return match(mlir::cast<SourceOp>(op));
+ }
+ mlir::LogicalResult
+ matchAndRewrite(mlir::Operation *op, mlir::ArrayRef<mlir::Value> operands,
+ mlir::ConversionPatternRewriter &rewriter) const final {
+ return matchAndRewrite(mlir::cast<SourceOp>(op),
+ OpAdaptor(operands, mlir::cast<SourceOp>(op)),
+ rewriter);
+ }
+
+ /// Rewrite and Match methods that operate on the SourceOp type. These must be
+ /// overridden by the derived pattern class.
+ virtual mlir::LogicalResult match(SourceOp op) const {
+ llvm_unreachable("must override match or matchAndRewrite");
+ }
+ virtual void rewrite(SourceOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ llvm_unreachable("must override rewrite or matchAndRewrite");
+ }
+ virtual mlir::LogicalResult
+ matchAndRewrite(SourceOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ if (mlir::failed(match(op)))
+ return mlir::failure();
+ rewrite(op, adaptor, rewriter);
+ return mlir::success();
+ }
+
+private:
+ using ConvertFIRToLLVMPattern::matchAndRewrite;
+ using ConvertToLLVMPattern::match;
+};
+
+/// FIR conversion pattern template
+template <typename FromOp>
+class FIROpAndTypeConversion : public FIROpConversion<FromOp> {
+public:
+ using FIROpConversion<FromOp>::FIROpConversion;
+ using OpAdaptor = typename FromOp::Adaptor;
+
+ mlir::LogicalResult
+ matchAndRewrite(FromOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const final {
+ mlir::Type ty = this->convertType(op.getType());
+ return doRewrite(op, ty, adaptor, rewriter);
+ }
+
+ virtual mlir::LogicalResult
+ doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const = 0;
+};
+
+} // namespace fir
+
+#endif // FORTRAN_OPTIMIZER_CODEGEN_FIROPPATTERNS_H
diff --git a/flang/include/flang/Optimizer/CodeGen/TypeConverter.h b/flang/include/flang/Optimizer/CodeGen/TypeConverter.h
index 396c13639255..79b3bfe4e80e 100644
--- a/flang/include/flang/Optimizer/CodeGen/TypeConverter.h
+++ b/flang/include/flang/Optimizer/CodeGen/TypeConverter.h
@@ -94,8 +94,8 @@ public:
// to LLVM IR dialect here.
//
// fir.complex<T> | std.complex<T> --> llvm<"{t,t}">
- template <typename C> mlir::Type convertComplexType(C cmplx) const {
- LLVM_DEBUG(llvm::dbgs() << "type convert: " << cmplx << '\n');
+ template <typename C>
+ mlir::Type convertComplexType(C cmplx) const {
auto eleTy = cmplx.getElementType();
return convertType(specifics->complexMemoryType(eleTy));
}
diff --git a/flang/include/flang/Optimizer/Dialect/FIRAttr.td b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
index 2ac4af9e66aa..f8b3fb861cc6 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRAttr.td
+++ b/flang/include/flang/Optimizer/Dialect/FIRAttr.td
@@ -137,4 +137,20 @@ def fir_CUDAClusterDimsAttr : fir_Attr<"CUDAClusterDims"> {
let assemblyFormat = "`<` struct(params) `>`";
}
+def fir_CUDADataTransferKind : I32EnumAttr<
+ "CUDADataTransferKind", "CUDA Fortran data transfer kind",
+ [
+ I32EnumAttrCase<"DeviceHost", 0, "device_host">,
+ I32EnumAttrCase<"HostDevice", 1, "host_device">,
+ I32EnumAttrCase<"DeviceDevice", 2, "device_device">,
+ ]> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::fir";
+}
+
+def fir_CUDADataTransferKindAttr :
+ EnumAttr<FIROpsDialect, fir_CUDADataTransferKind, "cuda_transfer"> {
+ let assemblyFormat = [{ ```<` $value `>` }];
+}
+
#endif // FIR_DIALECT_FIR_ATTRS
diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.td b/flang/include/flang/Optimizer/Dialect/FIROps.td
index b991ec76fdd9..dff1cdb20cbf 100644
--- a/flang/include/flang/Optimizer/Dialect/FIROps.td
+++ b/flang/include/flang/Optimizer/Dialect/FIROps.td
@@ -3165,4 +3165,29 @@ def fir_CUDAKernelOp : fir_Op<"cuda_kernel", [AttrSizedOperandSegments,
let hasVerifier = 1;
}
+def fir_CUDADataTransferOp : fir_Op<"cuda_data_transfer", []> {
+ let summary = "Represent a data transfer between host and device memory";
+
+ let description = [{
+ CUDA Fortran allows data transfer to be done via intrinsic assignment
+ between a host and a device variable. This operation is used to materialized
+ the data transfer between the lhs and rhs memory references.
+ The kind of transfer is specified in the attribute.
+
+ ```
+ adev = a ! transfer host to device
+ a = adev ! transfer device to host
+ bdev = adev ! transfer device to device
+ ```
+ }];
+
+ let arguments = (ins Arg<AnyReferenceLike, "", [MemWrite]>:$src,
+ Arg<AnyReferenceLike, "", [MemRead]>:$dst,
+ fir_CUDADataTransferKindAttr:$transfer_kind);
+
+ let assemblyFormat = [{
+ $src `to` $dst attr-dict `:` type(operands)
+ }];
+}
+
#endif
diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h
index b2c3d9290937..06c168a5de61 100644
--- a/flang/include/flang/Parser/dump-parse-tree.h
+++ b/flang/include/flang/Parser/dump-parse-tree.h
@@ -207,6 +207,7 @@ public:
NODE(CompilerDirective, LoopCount)
NODE(CompilerDirective, AssumeAligned)
NODE(CompilerDirective, NameValue)
+ NODE(CompilerDirective, Unrecognized)
NODE(parser, ComplexLiteralConstant)
NODE(parser, ComplexPart)
NODE(parser, ComponentArraySpec)
diff --git a/flang/include/flang/Parser/parse-tree-visitor.h b/flang/include/flang/Parser/parse-tree-visitor.h
index 79ea29f4b7f3..81d01dbdd65c 100644
--- a/flang/include/flang/Parser/parse-tree-visitor.h
+++ b/flang/include/flang/Parser/parse-tree-visitor.h
@@ -861,6 +861,18 @@ template <typename M> void Walk(CompilerDirective &x, M &mutator) {
}
}
template <typename V>
+void Walk(const CompilerDirective::Unrecognized &x, V &visitor) {
+ if (visitor.Pre(x)) {
+ visitor.Post(x);
+ }
+}
+template <typename M>
+void Walk(CompilerDirective::Unrecognized &x, M &mutator) {
+ if (mutator.Pre(x)) {
+ mutator.Post(x);
+ }
+}
+template <typename V>
void Walk(const OmpLinearClause::WithModifier &x, V &visitor) {
if (visitor.Pre(x)) {
Walk(x.modifier, visitor);
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index c96abfba491d..26b2e5f4e34b 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -3298,7 +3298,8 @@ struct StmtFunctionStmt {
// Compiler directives
// !DIR$ IGNORE_TKR [ [(tkrdmac...)] name ]...
// !DIR$ LOOP COUNT (n1[, n2]...)
-// !DIR$ name...
+// !DIR$ name[=value] [, name[=value]]... = can be :
+// !DIR$ <anything else>
struct CompilerDirective {
UNION_CLASS_BOILERPLATE(CompilerDirective);
struct IgnoreTKR {
@@ -3316,9 +3317,10 @@ struct CompilerDirective {
TUPLE_CLASS_BOILERPLATE(NameValue);
std::tuple<Name, std::optional<std::uint64_t>> t;
};
+ EMPTY_CLASS(Unrecognized);
CharBlock source;
std::variant<std::list<IgnoreTKR>, LoopCount, std::list<AssumeAligned>,
- std::list<NameValue>>
+ std::list<NameValue>, Unrecognized>
u;
};
diff --git a/flang/include/flang/Parser/tools.h b/flang/include/flang/Parser/tools.h
index 1e347fab6461..f1ead11734fa 100644
--- a/flang/include/flang/Parser/tools.h
+++ b/flang/include/flang/Parser/tools.h
@@ -40,6 +40,7 @@ const Name &GetFirstName(const ProcedureDesignator &);
const Name &GetFirstName(const Call &);
const Name &GetFirstName(const FunctionReference &);
const Name &GetFirstName(const Variable &);
+const Name &GetFirstName(const EntityDecl &);
// When a parse tree node is an instance of a specific type wrapped in
// layers of packaging, return a pointer to that object.
diff --git a/flang/include/flang/Runtime/entry-names.h b/flang/include/flang/Runtime/entry-names.h
index a233edf8e987..68582b92b549 100644
--- a/flang/include/flang/Runtime/entry-names.h
+++ b/flang/include/flang/Runtime/entry-names.h
@@ -19,7 +19,7 @@
#ifndef FORTRAN_RUNTIME_ENTRY_NAMES_H
#define FORTRAN_RUNTIME_ENTRY_NAMES_H
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#ifndef RTNAME
#define NAME_WITH_PREFIX_AND_REVISION(prefix, revision, name) \
diff --git a/flang/include/flang/Runtime/io-api.h b/flang/include/flang/Runtime/io-api.h
index 556cc20c5a12..1b6c4f5d6a65 100644
--- a/flang/include/flang/Runtime/io-api.h
+++ b/flang/include/flang/Runtime/io-api.h
@@ -51,13 +51,21 @@ constexpr InquiryKeywordHash HashInquiryKeyword(const char *p) {
return hash;
}
-const char *InquiryKeywordHashDecode(
+RT_API_ATTRS const char *InquiryKeywordHashDecode(
char *buffer, std::size_t, InquiryKeywordHash);
extern "C" {
#define IONAME(name) RTNAME(io##name)
+#ifndef IODECL
+#define IODECL(name) RT_API_ATTRS IONAME(name)
+#endif
+
+#ifndef IODEF
+#define IODEF(name) RT_API_ATTRS IONAME(name)
+#endif
+
// These functions initiate data transfer statements (READ, WRITE, PRINT).
// Example: PRINT *, 666 is implemented as the series of calls:
// Cookie cookie{BeginExternalListOutput(DefaultOutputUnit,
@@ -139,7 +147,7 @@ enum Iostat IONAME(CheckUnitNumberInRange128)(common::int128_t unit,
const char *sourceFile = nullptr, int sourceLine = 0);
// External synchronous I/O initiation
-Cookie IONAME(BeginExternalListOutput)(ExternalUnit = DefaultOutputUnit,
+Cookie IODECL(BeginExternalListOutput)(ExternalUnit = DefaultOutputUnit,
const char *sourceFile = nullptr, int sourceLine = 0);
Cookie IONAME(BeginExternalListInput)(ExternalUnit = DefaultInputUnit,
const char *sourceFile = nullptr, int sourceLine = 0);
@@ -253,7 +261,7 @@ bool IONAME(InputDescriptor)(Cookie, const Descriptor &);
// Formatted (including list directed) I/O data items
bool IONAME(OutputInteger8)(Cookie, std::int8_t);
bool IONAME(OutputInteger16)(Cookie, std::int16_t);
-bool IONAME(OutputInteger32)(Cookie, std::int32_t);
+bool IODECL(OutputInteger32)(Cookie, std::int32_t);
bool IONAME(OutputInteger64)(Cookie, std::int64_t);
bool IONAME(OutputInteger128)(Cookie, common::int128_t);
bool IONAME(InputInteger)(Cookie, std::int64_t &, int kind = 8);
@@ -357,7 +365,7 @@ bool IONAME(InquireInteger64)(
// returned is guaranteed to only be one of the problems that the
// EnableHandlers() call has indicated should be handled in compiled code
// rather than by terminating the image.
-enum Iostat IONAME(EndIoStatement)(Cookie);
+enum Iostat IODECL(EndIoStatement)(Cookie);
} // extern "C"
} // namespace Fortran::runtime::io
diff --git a/flang/include/flang/Runtime/iostat.h b/flang/include/flang/Runtime/iostat.h
index afce509cf1f5..6ce7c82b424e 100644
--- a/flang/include/flang/Runtime/iostat.h
+++ b/flang/include/flang/Runtime/iostat.h
@@ -11,6 +11,7 @@
#ifndef FORTRAN_RUNTIME_IOSTAT_H_
#define FORTRAN_RUNTIME_IOSTAT_H_
+#include "flang/Common/api-attrs.h"
#include "flang/Runtime/magic-numbers.h"
namespace Fortran::runtime::io {
@@ -88,7 +89,7 @@ enum Iostat {
IostatNonExternalDefinedUnformattedIo,
};
-const char *IostatErrorString(int);
+RT_API_ATTRS const char *IostatErrorString(int);
} // namespace Fortran::runtime::io
#endif // FORTRAN_RUNTIME_IOSTAT_H_
diff --git a/flang/include/flang/Runtime/memory.h b/flang/include/flang/Runtime/memory.h
index e24c509f4e90..98412a989f89 100644
--- a/flang/include/flang/Runtime/memory.h
+++ b/flang/include/flang/Runtime/memory.h
@@ -12,7 +12,7 @@
#ifndef FORTRAN_RUNTIME_MEMORY_H_
#define FORTRAN_RUNTIME_MEMORY_H_
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#include <cassert>
#include <memory>
#include <type_traits>
@@ -79,6 +79,8 @@ public:
return p;
}
+ RT_DIAG_PUSH
+ RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
// Replace the pointer.
RT_API_ATTRS void reset(pointer_type p = pointer_type{}) {
std::swap(ptr_, p);
@@ -90,6 +92,7 @@ public:
// Exchange the pointer with another object.
RT_API_ATTRS void swap(OwningPtr &other) { std::swap(ptr_, other.ptr_); }
+ RT_DIAG_POP
// Get the stored pointer.
RT_API_ATTRS pointer_type get() const { return ptr_; }
@@ -128,9 +131,12 @@ inline RT_API_ATTRS bool operator!=(std::nullptr_t, const OwningPtr<X> &x) {
template <typename A> class SizedNew {
public:
- explicit SizedNew(const Terminator &terminator) : terminator_{terminator} {}
+ explicit RT_API_ATTRS SizedNew(const Terminator &terminator)
+ : terminator_{terminator} {}
+
template <typename... X>
- [[nodiscard]] OwningPtr<A> operator()(std::size_t bytes, X &&...x) {
+ [[nodiscard]] RT_API_ATTRS OwningPtr<A> operator()(
+ std::size_t bytes, X &&...x) {
return OwningPtr<A>{new (AllocateMemoryOrCrash(terminator_, bytes))
A{std::forward<X>(x)...}};
}
@@ -141,7 +147,8 @@ private:
template <typename A> struct New : public SizedNew<A> {
using SizedNew<A>::SizedNew;
- template <typename... X> [[nodiscard]] OwningPtr<A> operator()(X &&...x) {
+ template <typename... X>
+ [[nodiscard]] RT_API_ATTRS OwningPtr<A> operator()(X &&...x) {
return SizedNew<A>::operator()(sizeof(A), std::forward<X>(x)...);
}
};
diff --git a/flang/include/flang/Runtime/reduce.h b/flang/include/flang/Runtime/reduce.h
new file mode 100644
index 000000000000..975aa6dea305
--- /dev/null
+++ b/flang/include/flang/Runtime/reduce.h
@@ -0,0 +1,257 @@
+//===-- include/flang/Runtime/reduce.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Defines the API for implementations of the transformational intrinsic
+// function REDUCE(); see F'2023 16.9.173.
+//
+// Similar to the definition of the APIs for SUM(), &c., in reduction.h,
+// there are typed functions here like ReduceInteger4() for total reductions
+// to scalars and void functions like ReduceInteger4Dim() for partial
+// reductions to smaller arrays.
+
+#ifndef FORTRAN_RUNTIME_REDUCE_H_
+#define FORTRAN_RUNTIME_REDUCE_H_
+
+#include "flang/Common/float128.h"
+#include "flang/Common/uint128.h"
+#include "flang/Runtime/cpp-type.h"
+#include "flang/Runtime/entry-names.h"
+#include <complex>
+#include <cstdint>
+
+namespace Fortran::runtime {
+
+class Descriptor;
+
+template <typename T> using ReductionOperation = T (*)(const T *, const T *);
+template <typename CHAR>
+using ReductionCharOperation = void (*)(CHAR *hiddenResult,
+ std::size_t resultLen, const CHAR *x, const CHAR *y, std::size_t xLen,
+ std::size_t yLen);
+using ReductionDerivedTypeOperation = void (*)(
+ void *hiddenResult, const void *x, const void *y);
+
+extern "C" {
+
+std::int8_t RTDECL(ReduceInteger1)(const Descriptor &,
+ ReductionOperation<std::int8_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int8_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceInteger1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int8_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int8_t *identity = nullptr,
+ bool ordered = true);
+std::int16_t RTDECL(ReduceInteger2)(const Descriptor &,
+ ReductionOperation<std::int16_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int16_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceInteger2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int16_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int16_t *identity = nullptr,
+ bool ordered = true);
+std::int32_t RTDECL(ReduceInteger4)(const Descriptor &,
+ ReductionOperation<std::int32_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int32_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceInteger4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int32_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int32_t *identity = nullptr,
+ bool ordered = true);
+std::int64_t RTDECL(ReduceInteger8)(const Descriptor &,
+ ReductionOperation<std::int64_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int64_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceInteger8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int64_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int64_t *identity = nullptr,
+ bool ordered = true);
+#ifdef __SIZEOF_INT128__
+common::int128_t RTDECL(ReduceInteger16)(const Descriptor &,
+ ReductionOperation<common::int128_t>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const common::int128_t *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceInteger16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<common::int128_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr,
+ const common::int128_t *identity = nullptr, bool ordered = true);
+#endif
+
+// REAL/COMPLEX(2 & 3) return 32-bit float results for the caller to downconvert
+float RTDECL(ReduceReal2)(const Descriptor &, ReductionOperation<float>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const float *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceReal2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<float>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const float *identity = nullptr,
+ bool ordered = true);
+float RTDECL(ReduceReal3)(const Descriptor &, ReductionOperation<float>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const float *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceReal3Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<float>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const float *identity = nullptr,
+ bool ordered = true);
+float RTDECL(ReduceReal4)(const Descriptor &, ReductionOperation<float>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const float *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceReal4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<float>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const float *identity = nullptr,
+ bool ordered = true);
+double RTDECL(ReduceReal8)(const Descriptor &, ReductionOperation<double>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const double *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceReal8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<double>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const double *identity = nullptr,
+ bool ordered = true);
+#if LDBL_MANT_DIG == 64
+long double RTDECL(ReduceReal10)(const Descriptor &,
+ ReductionOperation<long double>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const long double *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceReal10Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<long double>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const long double *identity = nullptr,
+ bool ordered = true);
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CppFloat128Type RTDECL(ReduceReal16)(const Descriptor &,
+ ReductionOperation<CppFloat128Type>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const CppFloat128Type *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceReal16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<CppFloat128Type>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const CppFloat128Type *identity = nullptr,
+ bool ordered = true);
+#endif
+
+void RTDECL(CppReduceComplex2)(std::complex<float> &, const Descriptor &,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex3)(std::complex<float> &, const Descriptor &,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex3Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex4)(std::complex<float> &, const Descriptor &,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<float>>, const char *source, int line,
+ int dim, const Descriptor *mask = nullptr,
+ const std::complex<float> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex8)(std::complex<double> &, const Descriptor &,
+ ReductionOperation<std::complex<double>>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<double> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<double>>, const char *source, int line,
+ int dim, const Descriptor *mask = nullptr,
+ const std::complex<double> *identity = nullptr, bool ordered = true);
+#if LDBL_MANT_DIG == 64
+void RTDECL(CppReduceComplex10)(std::complex<long double> &, const Descriptor &,
+ ReductionOperation<std::complex<long double>>, const char *source, int line,
+ int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<long double> *identity = nullptr, bool ordered = true);
+void RTDECL(CppReduceComplex10Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<long double>>, const char *source, int line,
+ int dim, const Descriptor *mask = nullptr,
+ const std::complex<long double> *identity = nullptr, bool ordered = true);
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+void RTDECL(CppReduceComplex16)(std::complex<CppFloat128Type> &,
+ const Descriptor &, ReductionOperation<std::complex<CppFloat128Type>>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const std::complex<CppFloat128Type> *identity = nullptr,
+ bool ordered = true);
+void RTDECL(CppReduceComplex16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<CppFloat128Type>>, const char *source,
+ int line, int dim, const Descriptor *mask = nullptr,
+ const std::complex<CppFloat128Type> *identity = nullptr,
+ bool ordered = true);
+#endif
+
+bool RTDECL(ReduceLogical1)(const Descriptor &, ReductionOperation<std::int8_t>,
+ const char *source, int line, int dim = 0, const Descriptor *mask = nullptr,
+ const std::int8_t *identity = nullptr, bool ordered = true);
+void RTDECL(ReduceLogical1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int8_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int8_t *identity = nullptr,
+ bool ordered = true);
+bool RTDECL(ReduceLogical2)(const Descriptor &,
+ ReductionOperation<std::int16_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int16_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceLogical2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int16_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int16_t *identity = nullptr,
+ bool ordered = true);
+bool RTDECL(ReduceLogical4)(const Descriptor &,
+ ReductionOperation<std::int32_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int32_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceLogical4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int32_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int32_t *identity = nullptr,
+ bool ordered = true);
+bool RTDECL(ReduceLogical8)(const Descriptor &,
+ ReductionOperation<std::int64_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const std::int64_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceLogical8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int64_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const std::int64_t *identity = nullptr,
+ bool ordered = true);
+
+void RTDECL(ReduceChar1)(char *result, const Descriptor &array,
+ ReductionCharOperation<char>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const char *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceCharacter1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const char *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceChar2)(char16_t *result, const Descriptor &array,
+ ReductionCharOperation<char16_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const char16_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceCharacter2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char16_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const char16_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceChar4)(char32_t *result, const Descriptor &array,
+ ReductionCharOperation<char32_t>, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const char32_t *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceCharacter4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char32_t>, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const char32_t *identity = nullptr,
+ bool ordered = true);
+
+void RTDECL(ReduceDerivedType)(char *result, const Descriptor &array,
+ ReductionDerivedTypeOperation, const char *source, int line, int dim = 0,
+ const Descriptor *mask = nullptr, const char *identity = nullptr,
+ bool ordered = true);
+void RTDECL(ReduceDerivedTypeDim)(Descriptor &result, const Descriptor &array,
+ ReductionDerivedTypeOperation, const char *source, int line, int dim,
+ const Descriptor *mask = nullptr, const char *identity = nullptr,
+ bool ordered = true);
+
+} // extern "C"
+} // namespace Fortran::runtime
+#endif // FORTRAN_RUNTIME_REDUCE_H_
diff --git a/flang/include/flang/Runtime/reduction.h b/flang/include/flang/Runtime/reduction.h
index 5b6077658575..97986c12e8a1 100644
--- a/flang/include/flang/Runtime/reduction.h
+++ b/flang/include/flang/Runtime/reduction.h
@@ -89,9 +89,11 @@ void RTDECL(CppSumComplex4)(std::complex<float> &, const Descriptor &,
void RTDECL(CppSumComplex8)(std::complex<double> &, const Descriptor &,
const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
+#if LDBL_MANT_DIG == 64
void RTDECL(CppSumComplex10)(std::complex<long double> &, const Descriptor &,
const char *source, int line, int dim = 0,
const Descriptor *mask = nullptr);
+#endif
#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
void RTDECL(CppSumComplex16)(std::complex<CppFloat128Type> &,
const Descriptor &, const char *source, int line, int dim = 0,
diff --git a/flang/include/flang/Runtime/type-code.h b/flang/include/flang/Runtime/type-code.h
index f7419249c2ba..8e7314e0af1e 100644
--- a/flang/include/flang/Runtime/type-code.h
+++ b/flang/include/flang/Runtime/type-code.h
@@ -12,7 +12,6 @@
#include "flang/Common/Fortran.h"
#include "flang/Common/optional.h"
#include "flang/ISO_Fortran_binding_wrapper.h"
-#include <optional>
#include <utility>
namespace Fortran::runtime {
diff --git a/flang/include/flang/Semantics/tools.h b/flang/include/flang/Semantics/tools.h
index dc3cd6c894a2..f0eb82eebefa 100644
--- a/flang/include/flang/Semantics/tools.h
+++ b/flang/include/flang/Semantics/tools.h
@@ -53,7 +53,8 @@ const Symbol *FindPointerComponent(const Symbol &);
const Symbol *FindInterface(const Symbol &);
const Symbol *FindSubprogram(const Symbol &);
const Symbol *FindFunctionResult(const Symbol &);
-const Symbol *FindOverriddenBinding(const Symbol &);
+const Symbol *FindOverriddenBinding(
+ const Symbol &, bool &isInaccessibleDeferred);
const Symbol *FindGlobal(const Symbol &);
const DeclTypeSpec *FindParentTypeSpec(const DerivedTypeSpec &);
@@ -282,6 +283,9 @@ const Symbol *FindExternallyVisibleObject(
// specific procedure of the same name, return it instead.
const Symbol &BypassGeneric(const Symbol &);
+// Given a cray pointee symbol, returns the related cray pointer symbol.
+const Symbol &GetCrayPointer(const Symbol &crayPointee);
+
using SomeExpr = evaluate::Expr<evaluate::SomeType>;
bool ExprHasTypeCategory(
diff --git a/flang/lib/Evaluate/check-expression.cpp b/flang/lib/Evaluate/check-expression.cpp
index 0e7d97900328..7d721399072c 100644
--- a/flang/lib/Evaluate/check-expression.cpp
+++ b/flang/lib/Evaluate/check-expression.cpp
@@ -358,10 +358,14 @@ bool IsInitialProcedureTarget(const semantics::Symbol &symbol) {
const auto &ultimate{symbol.GetUltimate()};
return common::visit(
common::visitors{
- [](const semantics::SubprogramDetails &subp) {
- return !subp.isDummy();
+ [&](const semantics::SubprogramDetails &subp) {
+ return !subp.isDummy() && !subp.stmtFunction() &&
+ symbol.owner().kind() != semantics::Scope::Kind::MainProgram &&
+ symbol.owner().kind() != semantics::Scope::Kind::Subprogram;
+ },
+ [](const semantics::SubprogramNameDetails &x) {
+ return x.kind() != semantics::SubprogramKind::Internal;
},
- [](const semantics::SubprogramNameDetails &) { return true; },
[&](const semantics::ProcEntityDetails &proc) {
return !semantics::IsPointer(ultimate) && !proc.isDummy();
},
diff --git a/flang/lib/Evaluate/constant.cpp b/flang/lib/Evaluate/constant.cpp
index a3bdefb76a41..990339958399 100644
--- a/flang/lib/Evaluate/constant.cpp
+++ b/flang/lib/Evaluate/constant.cpp
@@ -160,7 +160,7 @@ template <typename RESULT, typename ELEMENT>
auto ConstantBase<RESULT, ELEMENT>::Reshape(
const ConstantSubscripts &dims) const -> std::vector<Element> {
std::optional<uint64_t> optN{TotalElementCount(dims)};
- CHECK(optN);
+ CHECK_MSG(optN, "Overflow in TotalElementCount");
uint64_t n{*optN};
CHECK(!empty() || n == 0);
std::vector<Element> elements;
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index c3cb9ba6a47e..91b898eb513e 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -3490,7 +3490,8 @@ private:
if (Fortran::evaluate::UnwrapExpr<Fortran::evaluate::NullPointer>(
assign.rhs)) {
// rhs is null(). rhs being null(pptr) is handled in genNull.
- auto boxTy{Fortran::lower::getUntypedBoxProcType(&getMLIRContext())};
+ auto boxTy{
+ Fortran::lower::getUntypedBoxProcType(builder->getContext())};
hlfir::Entity rhs(
fir::factory::createNullBoxProc(*builder, loc, boxTy));
builder->createStoreWithConvert(loc, rhs, lhs);
@@ -3706,15 +3707,39 @@ private:
return false;
}
+ static void genCUDADataTransfer(fir::FirOpBuilder &builder,
+ mlir::Location loc, bool lhsIsDevice,
+ hlfir::Entity &lhs, bool rhsIsDevice,
+ hlfir::Entity &rhs) {
+ if (rhs.isBoxAddressOrValue() || lhs.isBoxAddressOrValue())
+ TODO(loc, "CUDA data transfler with descriptors");
+ if (lhsIsDevice && !rhsIsDevice) {
+ auto transferKindAttr = fir::CUDADataTransferKindAttr::get(
+ builder.getContext(), fir::CUDADataTransferKind::HostDevice);
+ // device = host
+ if (!rhs.isVariable()) {
+ auto associate = hlfir::genAssociateExpr(
+ loc, builder, rhs, rhs.getType(), ".cuf_host_tmp");
+ builder.create<fir::CUDADataTransferOp>(loc, associate.getBase(), lhs,
+ transferKindAttr);
+ builder.create<hlfir::EndAssociateOp>(loc, associate);
+ } else {
+ builder.create<fir::CUDADataTransferOp>(loc, rhs, lhs,
+ transferKindAttr);
+ }
+ return;
+ }
+ TODO(loc, "Assignement with CUDA Fortran variables");
+ }
+
void genDataAssignment(
const Fortran::evaluate::Assignment &assign,
const Fortran::evaluate::ProcedureRef *userDefinedAssignment) {
mlir::Location loc = getCurrentLocation();
fir::FirOpBuilder &builder = getFirOpBuilder();
- if (Fortran::evaluate::HasCUDAAttrs(assign.lhs) ||
- Fortran::evaluate::HasCUDAAttrs(assign.rhs))
- TODO(loc, "Assignement with CUDA Fortran variables");
+ bool lhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.lhs);
+ bool rhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.rhs);
// Gather some information about the assignment that will impact how it is
// lowered.
@@ -3772,9 +3797,13 @@ private:
Fortran::lower::StatementContext localStmtCtx;
hlfir::Entity rhs = evaluateRhs(localStmtCtx);
hlfir::Entity lhs = evaluateLhs(localStmtCtx);
- builder.create<hlfir::AssignOp>(loc, rhs, lhs,
- isWholeAllocatableAssignment,
- keepLhsLengthInAllocatableAssignment);
+ if (lhsIsDevice || rhsIsDevice) {
+ genCUDADataTransfer(builder, loc, lhsIsDevice, lhs, rhsIsDevice, rhs);
+ } else {
+ builder.create<hlfir::AssignOp>(loc, rhs, lhs,
+ isWholeAllocatableAssignment,
+ keepLhsLengthInAllocatableAssignment);
+ }
return;
}
// Assignments inside Forall, Where, or assignments to a vector subscripted
@@ -3995,11 +4024,12 @@ private:
sym->Rank() == 0) {
// get the corresponding Cray pointer
- auto ptrSym = Fortran::lower::getCrayPointer(*sym);
+ const Fortran::semantics::Symbol &ptrSym =
+ Fortran::semantics::GetCrayPointer(*sym);
fir::ExtendedValue ptr =
getSymbolExtendedValue(ptrSym, nullptr);
mlir::Value ptrVal = fir::getBase(ptr);
- mlir::Type ptrTy = genType(*ptrSym);
+ mlir::Type ptrTy = genType(ptrSym);
fir::ExtendedValue pte =
getSymbolExtendedValue(*sym, nullptr);
diff --git a/flang/lib/Lower/ConvertConstant.cpp b/flang/lib/Lower/ConvertConstant.cpp
index 336944d35b7e..ed389bbe4ae5 100644
--- a/flang/lib/Lower/ConvertConstant.cpp
+++ b/flang/lib/Lower/ConvertConstant.cpp
@@ -14,9 +14,12 @@
#include "flang/Evaluate/expression.h"
#include "flang/Lower/AbstractConverter.h"
#include "flang/Lower/BuiltinModules.h"
+#include "flang/Lower/ConvertExprToHLFIR.h"
#include "flang/Lower/ConvertType.h"
#include "flang/Lower/ConvertVariable.h"
#include "flang/Lower/Mangler.h"
+#include "flang/Lower/StatementContext.h"
+#include "flang/Lower/SymbolMap.h"
#include "flang/Optimizer/Builder/Complex.h"
#include "flang/Optimizer/Builder/MutableBox.h"
#include "flang/Optimizer/Builder/Todo.h"
@@ -380,10 +383,21 @@ static mlir::Value genStructureComponentInit(
}
if (Fortran::semantics::IsPointer(sym)) {
- if (Fortran::semantics::IsProcedure(sym))
- TODO(loc, "procedure pointer component initial value");
- mlir::Value initialTarget =
- Fortran::lower::genInitialDataTarget(converter, loc, componentTy, expr);
+ mlir::Value initialTarget;
+ if (Fortran::semantics::IsProcedure(sym)) {
+ if (Fortran::evaluate::UnwrapExpr<Fortran::evaluate::NullPointer>(expr))
+ initialTarget =
+ fir::factory::createNullBoxProc(builder, loc, componentTy);
+ else {
+ Fortran::lower::SymMap globalOpSymMap;
+ Fortran::lower::StatementContext stmtCtx;
+ auto box{getBase(Fortran::lower::convertExprToAddress(
+ loc, converter, expr, globalOpSymMap, stmtCtx))};
+ initialTarget = builder.createConvert(loc, componentTy, box);
+ }
+ } else
+ initialTarget = Fortran::lower::genInitialDataTarget(converter, loc,
+ componentTy, expr);
res = builder.create<fir::InsertValueOp>(
loc, recTy, res, initialTarget,
builder.getArrayAttr(field.getAttributes()));
diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp
index d157db2cde49..fb7807718ff8 100644
--- a/flang/lib/Lower/ConvertExpr.cpp
+++ b/flang/lib/Lower/ConvertExpr.cpp
@@ -862,7 +862,8 @@ public:
addr);
} else if (sym->test(Fortran::semantics::Symbol::Flag::CrayPointee)) {
// get the corresponding Cray pointer
- auto ptrSym = Fortran::lower::getCrayPointer(sym);
+ Fortran::semantics::SymbolRef ptrSym{
+ Fortran::semantics::GetCrayPointer(sym)};
ExtValue ptr = gen(ptrSym);
mlir::Value ptrVal = fir::getBase(ptr);
mlir::Type ptrTy = converter.genType(*ptrSym);
@@ -1537,8 +1538,8 @@ public:
auto baseSym = getFirstSym(aref);
if (baseSym.test(Fortran::semantics::Symbol::Flag::CrayPointee)) {
// get the corresponding Cray pointer
- auto ptrSym = Fortran::lower::getCrayPointer(baseSym);
-
+ Fortran::semantics::SymbolRef ptrSym{
+ Fortran::semantics::GetCrayPointer(baseSym)};
fir::ExtendedValue ptr = gen(ptrSym);
mlir::Value ptrVal = fir::getBase(ptr);
mlir::Type ptrTy = ptrVal.getType();
@@ -6946,7 +6947,8 @@ private:
ComponentPath &components) {
mlir::Value ptrVal = nullptr;
if (x.test(Fortran::semantics::Symbol::Flag::CrayPointee)) {
- auto ptrSym = Fortran::lower::getCrayPointer(x);
+ Fortran::semantics::SymbolRef ptrSym{
+ Fortran::semantics::GetCrayPointer(x)};
ExtValue ptr = converter.getSymbolExtendedValue(ptrSym);
ptrVal = fir::getBase(ptr);
}
diff --git a/flang/lib/Lower/ConvertExprToHLFIR.cpp b/flang/lib/Lower/ConvertExprToHLFIR.cpp
index c5bfbdf6b8c1..6e57b31d022b 100644
--- a/flang/lib/Lower/ConvertExprToHLFIR.cpp
+++ b/flang/lib/Lower/ConvertExprToHLFIR.cpp
@@ -130,7 +130,8 @@ public:
// shape is deferred and should not be loaded now to preserve
// pointer/allocatable aspects.
if (componentSym.Rank() == 0 ||
- Fortran::semantics::IsAllocatableOrObjectPointer(&componentSym))
+ Fortran::semantics::IsAllocatableOrObjectPointer(&componentSym) ||
+ Fortran::semantics::IsProcedurePointer(&componentSym))
return mlir::Value{};
fir::FirOpBuilder &builder = getBuilder();
@@ -284,7 +285,7 @@ private:
// value of the Cray pointer variable.
fir::FirOpBuilder &builder = getBuilder();
fir::FortranVariableOpInterface ptrVar =
- gen(Fortran::lower::getCrayPointer(symbolRef));
+ gen(Fortran::semantics::GetCrayPointer(symbolRef));
mlir::Value ptrAddr = ptrVar.getBase();
// Reinterpret the reference to a Cray pointer so that
@@ -306,10 +307,17 @@ private:
}
return *varDef;
}
+ llvm::errs() << *symbolRef << "\n";
TODO(getLoc(), "lowering symbol to HLFIR");
}
fir::FortranVariableOpInterface
+ gen(const Fortran::semantics::Symbol &symbol) {
+ Fortran::evaluate::SymbolRef symref{symbol};
+ return gen(symref);
+ }
+
+ fir::FortranVariableOpInterface
gen(const Fortran::evaluate::Component &component) {
if (Fortran::semantics::IsAllocatableOrPointer(component.GetLastSymbol()))
return genWholeAllocatableOrPointerComponent(component);
@@ -1760,8 +1768,22 @@ private:
if (attrs && bitEnumContainsAny(attrs.getFlags(),
fir::FortranVariableFlagsEnum::pointer)) {
- if (Fortran::semantics::IsProcedure(sym))
- TODO(loc, "procedure pointer component in structure constructor");
+ if (Fortran::semantics::IsProcedure(sym)) {
+ // Procedure pointer components.
+ if (Fortran::evaluate::UnwrapExpr<Fortran::evaluate::NullPointer>(
+ expr)) {
+ auto boxTy{
+ Fortran::lower::getUntypedBoxProcType(builder.getContext())};
+ hlfir::Entity rhs(
+ fir::factory::createNullBoxProc(builder, loc, boxTy));
+ builder.createStoreWithConvert(loc, rhs, lhs);
+ continue;
+ }
+ hlfir::Entity rhs(getBase(Fortran::lower::convertExprToAddress(
+ loc, converter, expr, symMap, stmtCtx)));
+ builder.createStoreWithConvert(loc, rhs, lhs);
+ continue;
+ }
// Pointer component construction is just a copy of the box contents.
fir::ExtendedValue lhsExv =
hlfir::translateToExtendedValue(loc, builder, lhs);
diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp
index 94d849862099..e07ae42dc749 100644
--- a/flang/lib/Lower/ConvertVariable.cpp
+++ b/flang/lib/Lower/ConvertVariable.cpp
@@ -1554,6 +1554,11 @@ fir::FortranVariableFlagsAttr Fortran::lower::translateSymbolAttributes(
mlir::MLIRContext *mlirContext, const Fortran::semantics::Symbol &sym,
fir::FortranVariableFlagsEnum extraFlags) {
fir::FortranVariableFlagsEnum flags = extraFlags;
+ if (sym.test(Fortran::semantics::Symbol::Flag::CrayPointee)) {
+ // CrayPointee are represented as pointers.
+ flags = flags | fir::FortranVariableFlagsEnum::pointer;
+ return fir::FortranVariableFlagsAttr::get(mlirContext, flags);
+ }
const auto &attrs = sym.attrs();
if (attrs.test(Fortran::semantics::Attr::ALLOCATABLE))
flags = flags | fir::FortranVariableFlagsEnum::allocatable;
@@ -1615,8 +1620,6 @@ static void genDeclareSymbol(Fortran::lower::AbstractConverter &converter,
(!Fortran::semantics::IsProcedure(sym) ||
Fortran::semantics::IsPointer(sym)) &&
!sym.detailsIf<Fortran::semantics::CommonBlockDetails>()) {
- bool isCrayPointee =
- sym.test(Fortran::semantics::Symbol::Flag::CrayPointee);
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
const mlir::Location loc = genLocation(converter, sym);
mlir::Value shapeOrShift;
@@ -1636,31 +1639,21 @@ static void genDeclareSymbol(Fortran::lower::AbstractConverter &converter,
Fortran::lower::translateSymbolCUDADataAttribute(builder.getContext(),
sym);
- if (isCrayPointee) {
- mlir::Type baseType =
- hlfir::getFortranElementOrSequenceType(base.getType());
- if (auto seqType = mlir::dyn_cast<fir::SequenceType>(baseType)) {
- // The pointer box's sequence type must be with unknown shape.
- llvm::SmallVector<int64_t> shape(seqType.getDimension(),
- fir::SequenceType::getUnknownExtent());
- baseType = fir::SequenceType::get(shape, seqType.getEleTy());
- }
- fir::BoxType ptrBoxType =
- fir::BoxType::get(fir::PointerType::get(baseType));
+ if (sym.test(Fortran::semantics::Symbol::Flag::CrayPointee)) {
+ mlir::Type ptrBoxType =
+ Fortran::lower::getCrayPointeeBoxType(base.getType());
mlir::Value boxAlloc = builder.createTemporary(loc, ptrBoxType);
// Declare a local pointer variable.
- attributes = fir::FortranVariableFlagsAttr::get(
- builder.getContext(), fir::FortranVariableFlagsEnum::pointer);
auto newBase = builder.create<hlfir::DeclareOp>(
loc, boxAlloc, name, /*shape=*/nullptr, lenParams, attributes);
- mlir::Value nullAddr =
- builder.createNullConstant(loc, ptrBoxType.getEleTy());
+ mlir::Value nullAddr = builder.createNullConstant(
+ loc, llvm::cast<fir::BaseBoxType>(ptrBoxType).getEleTy());
// If the element type is known-length character, then
// EmboxOp does not need the length parameters.
if (auto charType = mlir::dyn_cast<fir::CharacterType>(
- fir::unwrapSequenceType(baseType)))
+ hlfir::getFortranElementType(base.getType())))
if (!charType.hasDynamicLen())
lenParams.clear();
@@ -2346,16 +2339,13 @@ void Fortran::lower::createRuntimeTypeInfoGlobal(
defineGlobal(converter, var, globalName, linkage);
}
-Fortran::semantics::SymbolRef
-Fortran::lower::getCrayPointer(Fortran::semantics::SymbolRef sym) {
- assert(!sym->GetUltimate().owner().crayPointers().empty() &&
- "empty Cray pointer/pointee map");
- for (const auto &[pointee, pointer] :
- sym->GetUltimate().owner().crayPointers()) {
- if (pointee == sym->name()) {
- Fortran::semantics::SymbolRef v{pointer.get()};
- return v;
- }
+mlir::Type Fortran::lower::getCrayPointeeBoxType(mlir::Type fortranType) {
+ mlir::Type baseType = hlfir::getFortranElementOrSequenceType(fortranType);
+ if (auto seqType = mlir::dyn_cast<fir::SequenceType>(baseType)) {
+ // The pointer box's sequence type must be with unknown shape.
+ llvm::SmallVector<int64_t> shape(seqType.getDimension(),
+ fir::SequenceType::getUnknownExtent());
+ baseType = fir::SequenceType::get(shape, seqType.getEleTy());
}
- llvm_unreachable("corresponding Cray pointer cannot be found");
+ return fir::BoxType::get(fir::PointerType::get(baseType));
}
diff --git a/flang/lib/Lower/HostAssociations.cpp b/flang/lib/Lower/HostAssociations.cpp
index 414673b00f44..8eb548eb2bd5 100644
--- a/flang/lib/Lower/HostAssociations.cpp
+++ b/flang/lib/Lower/HostAssociations.cpp
@@ -315,7 +315,11 @@ class CapturedAllocatableAndPointer
public:
static mlir::Type getType(Fortran::lower::AbstractConverter &converter,
const Fortran::semantics::Symbol &sym) {
- return fir::ReferenceType::get(converter.genType(sym));
+ mlir::Type baseType = converter.genType(sym);
+ if (sym.GetUltimate().test(Fortran::semantics::Symbol::Flag::CrayPointee))
+ return fir::ReferenceType::get(
+ Fortran::lower::getCrayPointeeBoxType(baseType));
+ return fir::ReferenceType::get(baseType);
}
static void instantiateHostTuple(const InstantiateHostTuple &args,
Fortran::lower::AbstractConverter &converter,
@@ -507,7 +511,8 @@ walkCaptureCategories(T visitor, Fortran::lower::AbstractConverter &converter,
if (Fortran::semantics::IsProcedure(sym))
return CapturedProcedure::visit(visitor, converter, sym, ba);
ba.analyze(sym);
- if (Fortran::semantics::IsAllocatableOrPointer(sym))
+ if (Fortran::semantics::IsAllocatableOrPointer(sym) ||
+ sym.GetUltimate().test(Fortran::semantics::Symbol::Flag::CrayPointee))
return CapturedAllocatableAndPointer::visit(visitor, converter, sym, ba);
if (ba.isArray())
return CapturedArrays::visit(visitor, converter, sym, ba);
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 7b7e4a875cd8..6e6714454f05 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -1667,15 +1667,17 @@ static void privatizeIv(Fortran::lower::AbstractConverter &converter,
ivPrivate.push_back(privateValue);
}
-static mlir::acc::LoopOp
-createLoopOp(Fortran::lower::AbstractConverter &converter,
- mlir::Location currentLocation,
- Fortran::semantics::SemanticsContext &semanticsContext,
- Fortran::lower::StatementContext &stmtCtx,
- const Fortran::parser::DoConstruct &outerDoConstruct,
- Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::AccClauseList &accClauseList,
- bool needEarlyReturnHandling = false) {
+static mlir::acc::LoopOp createLoopOp(
+ Fortran::lower::AbstractConverter &converter,
+ mlir::Location currentLocation,
+ Fortran::semantics::SemanticsContext &semanticsContext,
+ Fortran::lower::StatementContext &stmtCtx,
+ const Fortran::parser::DoConstruct &outerDoConstruct,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::AccClauseList &accClauseList,
+ std::optional<mlir::acc::CombinedConstructsType> combinedConstructs =
+ std::nullopt,
+ bool needEarlyReturnHandling = false) {
fir::FirOpBuilder &builder = converter.getFirOpBuilder();
llvm::SmallVector<mlir::Value> tileOperands, privateOperands, ivPrivate,
reductionOperands, cacheOperands, vectorOperands, workerNumOperands,
@@ -2015,6 +2017,10 @@ createLoopOp(Fortran::lower::AbstractConverter &converter,
if (!collapseDeviceTypes.empty())
loopOp.setCollapseDeviceTypeAttr(builder.getArrayAttr(collapseDeviceTypes));
+ if (combinedConstructs)
+ loopOp.setCombinedAttr(mlir::acc::CombinedConstructsTypeAttr::get(
+ builder.getContext(), *combinedConstructs));
+
return loopOp;
}
@@ -2060,7 +2066,7 @@ genACC(Fortran::lower::AbstractConverter &converter,
std::get<std::optional<Fortran::parser::DoConstruct>>(loopConstruct.t);
auto loopOp = createLoopOp(converter, currentLocation, semanticsContext,
stmtCtx, *outerDoConstruct, eval, accClauseList,
- needEarlyExitHandling);
+ /*combinedConstructs=*/{}, needEarlyExitHandling);
if (needEarlyExitHandling)
return loopOp.getResult(0);
@@ -2092,14 +2098,14 @@ static void genDataOperandOperationsWithModifier(
}
template <typename Op>
-static Op
-createComputeOp(Fortran::lower::AbstractConverter &converter,
- mlir::Location currentLocation,
- Fortran::lower::pft::Evaluation &eval,
- Fortran::semantics::SemanticsContext &semanticsContext,
- Fortran::lower::StatementContext &stmtCtx,
- const Fortran::parser::AccClauseList &accClauseList,
- bool outerCombined = false) {
+static Op createComputeOp(
+ Fortran::lower::AbstractConverter &converter,
+ mlir::Location currentLocation, Fortran::lower::pft::Evaluation &eval,
+ Fortran::semantics::SemanticsContext &semanticsContext,
+ Fortran::lower::StatementContext &stmtCtx,
+ const Fortran::parser::AccClauseList &accClauseList,
+ std::optional<mlir::acc::CombinedConstructsType> combinedConstructs =
+ std::nullopt) {
// Parallel operation operands
mlir::Value ifCond;
@@ -2292,7 +2298,7 @@ createComputeOp(Fortran::lower::AbstractConverter &converter,
} else if (const auto *privateClause =
std::get_if<Fortran::parser::AccClause::Private>(
&clause.u)) {
- if (!outerCombined)
+ if (!combinedConstructs)
genPrivatizations<mlir::acc::PrivateRecipeOp>(
privateClause->v, converter, semanticsContext, stmtCtx,
privateOperands, privatizations);
@@ -2310,7 +2316,7 @@ createComputeOp(Fortran::lower::AbstractConverter &converter,
// combined - delay it to the loop. However, a reduction clause on a
// combined construct implies a copy clause so issue an implicit copy
// instead.
- if (!outerCombined) {
+ if (!combinedConstructs) {
genReductions(reductionClause->v, converter, semanticsContext, stmtCtx,
reductionOperands, reductionRecipes);
} else {
@@ -2362,11 +2368,11 @@ createComputeOp(Fortran::lower::AbstractConverter &converter,
if constexpr (std::is_same_v<Op, mlir::acc::KernelsOp>)
computeOp = createRegionOp<Op, mlir::acc::TerminatorOp>(
builder, currentLocation, currentLocation, eval, operands,
- operandSegments, outerCombined);
+ operandSegments, /*outerCombined=*/combinedConstructs.has_value());
else
computeOp = createRegionOp<Op, mlir::acc::YieldOp>(
builder, currentLocation, currentLocation, eval, operands,
- operandSegments, outerCombined);
+ operandSegments, /*outerCombined=*/combinedConstructs.has_value());
if (addSelfAttr)
computeOp.setSelfAttrAttr(builder.getUnitAttr());
@@ -2419,6 +2425,9 @@ createComputeOp(Fortran::lower::AbstractConverter &converter,
mlir::ArrayAttr::get(builder.getContext(), firstPrivatizations));
}
+ if (combinedConstructs)
+ computeOp.setCombinedAttr(builder.getUnitAttr());
+
auto insPt = builder.saveInsertionPoint();
builder.setInsertionPointAfter(computeOp);
@@ -2734,21 +2743,24 @@ genACC(Fortran::lower::AbstractConverter &converter,
if (combinedDirective.v == llvm::acc::ACCD_kernels_loop) {
createComputeOp<mlir::acc::KernelsOp>(
converter, currentLocation, eval, semanticsContext, stmtCtx,
- accClauseList, /*outerCombined=*/true);
+ accClauseList, mlir::acc::CombinedConstructsType::KernelsLoop);
createLoopOp(converter, currentLocation, semanticsContext, stmtCtx,
- *outerDoConstruct, eval, accClauseList);
+ *outerDoConstruct, eval, accClauseList,
+ mlir::acc::CombinedConstructsType::KernelsLoop);
} else if (combinedDirective.v == llvm::acc::ACCD_parallel_loop) {
createComputeOp<mlir::acc::ParallelOp>(
converter, currentLocation, eval, semanticsContext, stmtCtx,
- accClauseList, /*outerCombined=*/true);
+ accClauseList, mlir::acc::CombinedConstructsType::ParallelLoop);
createLoopOp(converter, currentLocation, semanticsContext, stmtCtx,
- *outerDoConstruct, eval, accClauseList);
+ *outerDoConstruct, eval, accClauseList,
+ mlir::acc::CombinedConstructsType::ParallelLoop);
} else if (combinedDirective.v == llvm::acc::ACCD_serial_loop) {
- createComputeOp<mlir::acc::SerialOp>(converter, currentLocation, eval,
- semanticsContext, stmtCtx,
- accClauseList, /*outerCombined=*/true);
+ createComputeOp<mlir::acc::SerialOp>(
+ converter, currentLocation, eval, semanticsContext, stmtCtx,
+ accClauseList, mlir::acc::CombinedConstructsType::SerialLoop);
createLoopOp(converter, currentLocation, semanticsContext, stmtCtx,
- *outerDoConstruct, eval, accClauseList);
+ *outerDoConstruct, eval, accClauseList,
+ mlir::acc::CombinedConstructsType::SerialLoop);
} else {
llvm::report_fatal_error("Unknown combined construct encountered");
}
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
index 95faa0767e36..0a57a1496289 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
@@ -31,57 +31,33 @@ static void checkMapType(mlir::Location location, mlir::Type type) {
}
static mlir::omp::ScheduleModifier
-translateScheduleModifier(const omp::clause::Schedule::ModType &m) {
+translateScheduleModifier(const omp::clause::Schedule::OrderingModifier &m) {
switch (m) {
- case omp::clause::Schedule::ModType::Monotonic:
+ case omp::clause::Schedule::OrderingModifier::Monotonic:
return mlir::omp::ScheduleModifier::monotonic;
- case omp::clause::Schedule::ModType::Nonmonotonic:
+ case omp::clause::Schedule::OrderingModifier::Nonmonotonic:
return mlir::omp::ScheduleModifier::nonmonotonic;
- case omp::clause::Schedule::ModType::Simd:
- return mlir::omp::ScheduleModifier::simd;
}
return mlir::omp::ScheduleModifier::none;
}
static mlir::omp::ScheduleModifier
getScheduleModifier(const omp::clause::Schedule &clause) {
- using ScheduleModifier = omp::clause::Schedule::ScheduleModifier;
- const auto &modifier = std::get<std::optional<ScheduleModifier>>(clause.t);
- // The input may have the modifier any order, so we look for one that isn't
- // SIMD. If modifier is not set at all, fall down to the bottom and return
- // "none".
- if (modifier) {
- using ModType = omp::clause::Schedule::ModType;
- const auto &modType1 = std::get<ModType>(modifier->t);
- if (modType1 == ModType::Simd) {
- const auto &modType2 = std::get<std::optional<ModType>>(modifier->t);
- if (modType2 && *modType2 != ModType::Simd)
- return translateScheduleModifier(*modType2);
- return mlir::omp::ScheduleModifier::none;
- }
-
- return translateScheduleModifier(modType1);
- }
+ using Schedule = omp::clause::Schedule;
+ const auto &modifier =
+ std::get<std::optional<Schedule::OrderingModifier>>(clause.t);
+ if (modifier)
+ return translateScheduleModifier(*modifier);
return mlir::omp::ScheduleModifier::none;
}
static mlir::omp::ScheduleModifier
getSimdModifier(const omp::clause::Schedule &clause) {
- using ScheduleModifier = omp::clause::Schedule::ScheduleModifier;
- const auto &modifier = std::get<std::optional<ScheduleModifier>>(clause.t);
- // Either of the two possible modifiers in the input can be the SIMD modifier,
- // so look in either one, and return simd if we find one. Not found = return
- // "none".
- if (modifier) {
- using ModType = omp::clause::Schedule::ModType;
- const auto &modType1 = std::get<ModType>(modifier->t);
- if (modType1 == ModType::Simd)
- return mlir::omp::ScheduleModifier::simd;
-
- const auto &modType2 = std::get<std::optional<ModType>>(modifier->t);
- if (modType2 && *modType2 == ModType::Simd)
- return mlir::omp::ScheduleModifier::simd;
- }
+ using Schedule = omp::clause::Schedule;
+ const auto &modifier =
+ std::get<std::optional<Schedule::ChunkModifier>>(clause.t);
+ if (modifier && *modifier == Schedule::ChunkModifier::Simd)
+ return mlir::omp::ScheduleModifier::simd;
return mlir::omp::ScheduleModifier::none;
}
@@ -94,36 +70,31 @@ genAllocateClause(Fortran::lower::AbstractConverter &converter,
mlir::Location currentLocation = converter.getCurrentLocation();
Fortran::lower::StatementContext stmtCtx;
- const omp::ObjectList &objectList = std::get<omp::ObjectList>(clause.t);
- const auto &modifier =
- std::get<std::optional<omp::clause::Allocate::Modifier>>(clause.t);
-
- // If the allocate modifier is present, check if we only use the allocator
- // submodifier. ALIGN in this context is unimplemented
- const bool onlyAllocator =
- modifier &&
- std::holds_alternative<omp::clause::Allocate::Modifier::Allocator>(
- modifier->u);
+ auto &objects = std::get<omp::ObjectList>(clause.t);
- if (modifier && !onlyAllocator) {
+ using Allocate = omp::clause::Allocate;
+ // ALIGN in this context is unimplemented
+ if (std::get<std::optional<Allocate::AlignModifier>>(clause.t))
TODO(currentLocation, "OmpAllocateClause ALIGN modifier");
- }
// Check if allocate clause has allocator specified. If so, add it
// to list of allocators, otherwise, add default allocator to
// list of allocators.
- if (onlyAllocator) {
- const auto &value =
- std::get<omp::clause::Allocate::Modifier::Allocator>(modifier->u);
- mlir::Value operand =
- fir::getBase(converter.genExprValue(value.v, stmtCtx));
- allocatorOperands.append(objectList.size(), operand);
+ using SimpleModifier = Allocate::AllocatorSimpleModifier;
+ using ComplexModifier = Allocate::AllocatorComplexModifier;
+ if (auto &mod = std::get<std::optional<SimpleModifier>>(clause.t)) {
+ mlir::Value operand = fir::getBase(converter.genExprValue(*mod, stmtCtx));
+ allocatorOperands.append(objects.size(), operand);
+ } else if (auto &mod = std::get<std::optional<ComplexModifier>>(clause.t)) {
+ mlir::Value operand = fir::getBase(converter.genExprValue(mod->v, stmtCtx));
+ allocatorOperands.append(objects.size(), operand);
} else {
mlir::Value operand = firOpBuilder.createIntegerConstant(
currentLocation, firOpBuilder.getI32Type(), 1);
- allocatorOperands.append(objectList.size(), operand);
+ allocatorOperands.append(objects.size(), operand);
}
- genObjectList(objectList, converter, allocateOperands);
+
+ genObjectList(objects, converter, allocateOperands);
}
static mlir::omp::ClauseProcBindKindAttr
@@ -131,16 +102,16 @@ genProcBindKindAttr(fir::FirOpBuilder &firOpBuilder,
const omp::clause::ProcBind &clause) {
mlir::omp::ClauseProcBindKind procBindKind;
switch (clause.v) {
- case omp::clause::ProcBind::Type::Master:
+ case omp::clause::ProcBind::AffinityPolicy::Master:
procBindKind = mlir::omp::ClauseProcBindKind::Master;
break;
- case omp::clause::ProcBind::Type::Close:
+ case omp::clause::ProcBind::AffinityPolicy::Close:
procBindKind = mlir::omp::ClauseProcBindKind::Close;
break;
- case omp::clause::ProcBind::Type::Spread:
+ case omp::clause::ProcBind::AffinityPolicy::Spread:
procBindKind = mlir::omp::ClauseProcBindKind::Spread;
break;
- case omp::clause::ProcBind::Type::Primary:
+ case omp::clause::ProcBind::AffinityPolicy::Primary:
procBindKind = mlir::omp::ClauseProcBindKind::Primary;
break;
}
@@ -150,21 +121,22 @@ genProcBindKindAttr(fir::FirOpBuilder &firOpBuilder,
static mlir::omp::ClauseTaskDependAttr
genDependKindAttr(fir::FirOpBuilder &firOpBuilder,
- const omp::clause::Depend &clause) {
+ const omp::clause::Depend::TaskDependenceType kind) {
mlir::omp::ClauseTaskDepend pbKind;
- const auto &inOut = std::get<omp::clause::Depend::InOut>(clause.u);
- switch (std::get<omp::clause::Depend::Type>(inOut.t)) {
- case omp::clause::Depend::Type::In:
+ switch (kind) {
+ case omp::clause::Depend::TaskDependenceType::In:
pbKind = mlir::omp::ClauseTaskDepend::taskdependin;
break;
- case omp::clause::Depend::Type::Out:
+ case omp::clause::Depend::TaskDependenceType::Out:
pbKind = mlir::omp::ClauseTaskDepend::taskdependout;
break;
- case omp::clause::Depend::Type::Inout:
+ case omp::clause::Depend::TaskDependenceType::Inout:
pbKind = mlir::omp::ClauseTaskDepend::taskdependinout;
break;
- default:
- llvm_unreachable("unknown parser task dependence type");
+ case omp::clause::Depend::TaskDependenceType::Mutexinoutset:
+ case omp::clause::Depend::TaskDependenceType::Inoutset:
+ case omp::clause::Depend::TaskDependenceType::Depobj:
+ llvm_unreachable("unhandled parser task dependence type");
break;
}
return mlir::omp::ClauseTaskDependAttr::get(firOpBuilder.getContext(),
@@ -208,6 +180,25 @@ addUseDeviceClause(Fortran::lower::AbstractConverter &converter,
useDeviceSymbols.push_back(object.id());
}
+static void convertLoopBounds(Fortran::lower::AbstractConverter &converter,
+ mlir::Location loc,
+ llvm::SmallVectorImpl<mlir::Value> &lowerBound,
+ llvm::SmallVectorImpl<mlir::Value> &upperBound,
+ llvm::SmallVectorImpl<mlir::Value> &step,
+ std::size_t loopVarTypeSize) {
+ fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
+ // The types of lower bound, upper bound, and step are converted into the
+ // type of the loop variable if necessary.
+ mlir::Type loopVarType = getLoopVarType(converter, loopVarTypeSize);
+ for (unsigned it = 0; it < (unsigned)lowerBound.size(); it++) {
+ lowerBound[it] =
+ firOpBuilder.createConvert(loc, loopVarType, lowerBound[it]);
+ upperBound[it] =
+ firOpBuilder.createConvert(loc, loopVarType, upperBound[it]);
+ step[it] = firOpBuilder.createConvert(loc, loopVarType, step[it]);
+ }
+}
+
//===----------------------------------------------------------------------===//
// ClauseProcessor unique clauses
//===----------------------------------------------------------------------===//
@@ -217,8 +208,7 @@ bool ClauseProcessor::processCollapse(
llvm::SmallVectorImpl<mlir::Value> &lowerBound,
llvm::SmallVectorImpl<mlir::Value> &upperBound,
llvm::SmallVectorImpl<mlir::Value> &step,
- llvm::SmallVectorImpl<const Fortran::semantics::Symbol *> &iv,
- std::size_t &loopVarTypeSize) const {
+ llvm::SmallVectorImpl<const Fortran::semantics::Symbol *> &iv) const {
bool found = false;
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
@@ -236,7 +226,7 @@ bool ClauseProcessor::processCollapse(
found = true;
}
- loopVarTypeSize = 0;
+ std::size_t loopVarTypeSize = 0;
do {
Fortran::lower::pft::Evaluation *doLoop =
&doConstructEval->getFirstNestedEvaluation();
@@ -267,6 +257,9 @@ bool ClauseProcessor::processCollapse(
&*std::next(doConstructEval->getNestedEvaluations().begin());
} while (collapseValue > 0);
+ convertLoopBounds(converter, currentLocation, lowerBound, upperBound, step,
+ loopVarTypeSize);
+
return found;
}
@@ -274,16 +267,16 @@ bool ClauseProcessor::processDefault() const {
if (auto *clause = findUniqueClause<omp::clause::Default>()) {
// Private, Firstprivate, Shared, None
switch (clause->v) {
- case omp::clause::Default::Type::Shared:
- case omp::clause::Default::Type::None:
+ case omp::clause::Default::DataSharingAttribute::Shared:
+ case omp::clause::Default::DataSharingAttribute::None:
// Default clause with shared or none do not require any handling since
// Shared is the default behavior in the IR and None is only required
// for semantic checks.
break;
- case omp::clause::Default::Type::Private:
+ case omp::clause::Default::DataSharingAttribute::Private:
// TODO Support default(private)
break;
- case omp::clause::Default::Type::Firstprivate:
+ case omp::clause::Default::DataSharingAttribute::Firstprivate:
// TODO Support default(firstprivate)
break;
}
@@ -316,13 +309,13 @@ bool ClauseProcessor::processDeviceType(
if (auto *clause = findUniqueClause<omp::clause::DeviceType>()) {
// Case: declare target ... device_type(any | host | nohost)
switch (clause->v) {
- case omp::clause::DeviceType::Type::Nohost:
+ case omp::clause::DeviceType::DeviceTypeDescription::Nohost:
result = mlir::omp::DeclareTargetDeviceType::nohost;
break;
- case omp::clause::DeviceType::Type::Host:
+ case omp::clause::DeviceType::DeviceTypeDescription::Host:
result = mlir::omp::DeclareTargetDeviceType::host;
break;
- case omp::clause::DeviceType::Type::Any:
+ case omp::clause::DeviceType::DeviceTypeDescription::Any:
result = mlir::omp::DeclareTargetDeviceType::any;
break;
}
@@ -370,7 +363,9 @@ bool ClauseProcessor::processNumTeams(Fortran::lower::StatementContext &stmtCtx,
// TODO Get lower and upper bounds for num_teams when parser is updated to
// accept both.
if (auto *clause = findUniqueClause<omp::clause::NumTeams>()) {
- result = fir::getBase(converter.genExprValue(clause->v, stmtCtx));
+ // auto lowerBound = std::get<std::optional<ExprTy>>(clause->t);
+ auto &upperBound = std::get<ExprTy>(clause->t);
+ result = fir::getBase(converter.genExprValue(upperBound, stmtCtx));
return true;
}
return false;
@@ -435,24 +430,23 @@ bool ClauseProcessor::processSchedule(
if (auto *clause = findUniqueClause<omp::clause::Schedule>()) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
mlir::MLIRContext *context = firOpBuilder.getContext();
- const auto &scheduleType =
- std::get<omp::clause::Schedule::ScheduleType>(clause->t);
+ const auto &scheduleType = std::get<omp::clause::Schedule::Kind>(clause->t);
mlir::omp::ClauseScheduleKind scheduleKind;
switch (scheduleType) {
- case omp::clause::Schedule::ScheduleType::Static:
+ case omp::clause::Schedule::Kind::Static:
scheduleKind = mlir::omp::ClauseScheduleKind::Static;
break;
- case omp::clause::Schedule::ScheduleType::Dynamic:
+ case omp::clause::Schedule::Kind::Dynamic:
scheduleKind = mlir::omp::ClauseScheduleKind::Dynamic;
break;
- case omp::clause::Schedule::ScheduleType::Guided:
+ case omp::clause::Schedule::Kind::Guided:
scheduleKind = mlir::omp::ClauseScheduleKind::Guided;
break;
- case omp::clause::Schedule::ScheduleType::Auto:
+ case omp::clause::Schedule::Kind::Auto:
scheduleKind = mlir::omp::ClauseScheduleKind::Auto;
break;
- case omp::clause::Schedule::ScheduleType::Runtime:
+ case omp::clause::Schedule::Kind::Runtime:
scheduleKind = mlir::omp::ClauseScheduleKind::Runtime;
break;
}
@@ -728,13 +722,15 @@ bool ClauseProcessor::processDepend(
return findRepeatableClause<omp::clause::Depend>(
[&](const omp::clause::Depend &clause,
const Fortran::parser::CharBlock &) {
- assert(std::holds_alternative<omp::clause::Depend::InOut>(clause.u) &&
- "Only InOut is handled at the moment");
- const auto &inOut = std::get<omp::clause::Depend::InOut>(clause.u);
- const auto &objects = std::get<omp::ObjectList>(inOut.t);
+ using Depend = omp::clause::Depend;
+ assert(std::holds_alternative<Depend::WithLocators>(clause.u) &&
+ "Only the modern form is handled at the moment");
+ auto &modern = std::get<Depend::WithLocators>(clause.u);
+ auto kind = std::get<Depend::TaskDependenceType>(modern.t);
+ auto &objects = std::get<omp::ObjectList>(modern.t);
mlir::omp::ClauseTaskDependAttr dependTypeOperand =
- genDependKindAttr(firOpBuilder, clause);
+ genDependKindAttr(firOpBuilder, kind);
dependTypeOperands.append(objects.size(), dependTypeOperand);
for (const omp::Object &object : objects) {
@@ -823,39 +819,42 @@ bool ClauseProcessor::processMap(
const Fortran::parser::CharBlock &source) {
using Map = omp::clause::Map;
mlir::Location clauseLocation = converter.genLocation(source);
- const auto &oMapType = std::get<std::optional<Map::MapType>>(clause.t);
+ const auto &mapType = std::get<std::optional<Map::MapType>>(clause.t);
llvm::omp::OpenMPOffloadMappingFlags mapTypeBits =
llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_NONE;
// If the map type is specified, then process it else Tofrom is the
// default.
- if (oMapType) {
- const Map::MapType::Type &mapType =
- std::get<Map::MapType::Type>(oMapType->t);
- switch (mapType) {
- case Map::MapType::Type::To:
+ if (mapType) {
+ switch (*mapType) {
+ case Map::MapType::To:
mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO;
break;
- case Map::MapType::Type::From:
+ case Map::MapType::From:
mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
break;
- case Map::MapType::Type::Tofrom:
+ case Map::MapType::Tofrom:
mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
break;
- case Map::MapType::Type::Alloc:
- case Map::MapType::Type::Release:
+ case Map::MapType::Alloc:
+ case Map::MapType::Release:
// alloc and release is the default map_type for the Target Data
// Ops, i.e. if no bits for map_type is supplied then alloc/release
// is implicitly assumed based on the target directive. Default
// value for Target Data and Enter Data is alloc and for Exit Data
// it is release.
break;
- case Map::MapType::Type::Delete:
+ case Map::MapType::Delete:
mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_DELETE;
}
- if (std::get<std::optional<Map::MapType::Always>>(oMapType->t))
- mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS;
+ auto &modTypeMods =
+ std::get<std::optional<Map::MapTypeModifiers>>(clause.t);
+ if (modTypeMods) {
+ if (llvm::is_contained(*modTypeMods, Map::MapTypeModifier::Always))
+ mapTypeBits |=
+ llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS;
+ }
} else {
mapTypeBits |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO |
llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
@@ -902,17 +901,39 @@ bool ClauseProcessor::processMap(
bool ClauseProcessor::processReduction(
mlir::Location currentLocation,
- llvm::SmallVectorImpl<mlir::Value> &reductionVars,
- llvm::SmallVectorImpl<mlir::Attribute> &reductionDeclSymbols,
- llvm::SmallVectorImpl<const Fortran::semantics::Symbol *> *reductionSymbols)
- const {
+ llvm::SmallVectorImpl<mlir::Value> &outReductionVars,
+ llvm::SmallVectorImpl<mlir::Type> &outReductionTypes,
+ llvm::SmallVectorImpl<mlir::Attribute> &outReductionDeclSymbols,
+ llvm::SmallVectorImpl<const Fortran::semantics::Symbol *>
+ *outReductionSymbols) const {
return findRepeatableClause<omp::clause::Reduction>(
[&](const omp::clause::Reduction &clause,
const Fortran::parser::CharBlock &) {
+ // Use local lists of reductions to prevent variables from other
+ // already-processed reduction clauses from impacting this reduction.
+ // For example, the whole `reductionVars` array is queried to decide
+ // whether to do the reduction byref.
+ llvm::SmallVector<mlir::Value> reductionVars;
+ llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
+ llvm::SmallVector<const Fortran::semantics::Symbol *> reductionSymbols;
ReductionProcessor rp;
rp.addDeclareReduction(currentLocation, converter, clause,
reductionVars, reductionDeclSymbols,
- reductionSymbols);
+ outReductionSymbols ? &reductionSymbols
+ : nullptr);
+
+ // Copy local lists into the output.
+ llvm::copy(reductionVars, std::back_inserter(outReductionVars));
+ llvm::copy(reductionDeclSymbols,
+ std::back_inserter(outReductionDeclSymbols));
+ if (outReductionSymbols)
+ llvm::copy(reductionSymbols,
+ std::back_inserter(*outReductionSymbols));
+
+ outReductionTypes.reserve(outReductionTypes.size() +
+ reductionVars.size());
+ llvm::transform(reductionVars, std::back_inserter(outReductionTypes),
+ [](mlir::Value v) { return v.getType(); });
});
}
@@ -929,7 +950,7 @@ bool ClauseProcessor::processTo(
return findRepeatableClause<omp::clause::To>(
[&](const omp::clause::To &clause, const Fortran::parser::CharBlock &) {
// Case: declare target to(func, var1, var2)...
- gatherFuncAndVarSyms(clause.v,
+ gatherFuncAndVarSyms(std::get<ObjectList>(clause.t),
mlir::omp::DeclareTargetCaptureClause::to, result);
});
}
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.h b/flang/lib/Lower/OpenMP/ClauseProcessor.h
index ffa8a5e05593..d31d6a5c2062 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.h
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.h
@@ -46,24 +46,20 @@ namespace omp {
/// methods that relate to clauses that can impact the lowering of that
/// construct.
class ClauseProcessor {
- using ClauseTy = Fortran::parser::OmpClause;
-
public:
ClauseProcessor(Fortran::lower::AbstractConverter &converter,
Fortran::semantics::SemanticsContext &semaCtx,
const Fortran::parser::OmpClauseList &clauses)
- : converter(converter), semaCtx(semaCtx), clauses2(clauses),
- clauses(makeList(clauses, semaCtx)) {}
+ : converter(converter), semaCtx(semaCtx),
+ clauses(makeClauses(clauses, semaCtx)) {}
// 'Unique' clauses: They can appear at most once in the clause list.
- bool
- processCollapse(mlir::Location currentLocation,
- Fortran::lower::pft::Evaluation &eval,
- llvm::SmallVectorImpl<mlir::Value> &lowerBound,
- llvm::SmallVectorImpl<mlir::Value> &upperBound,
- llvm::SmallVectorImpl<mlir::Value> &step,
- llvm::SmallVectorImpl<const Fortran::semantics::Symbol *> &iv,
- std::size_t &loopVarTypeSize) const;
+ bool processCollapse(
+ mlir::Location currentLocation, Fortran::lower::pft::Evaluation &eval,
+ llvm::SmallVectorImpl<mlir::Value> &lowerBound,
+ llvm::SmallVectorImpl<mlir::Value> &upperBound,
+ llvm::SmallVectorImpl<mlir::Value> &step,
+ llvm::SmallVectorImpl<const Fortran::semantics::Symbol *> &iv) const;
bool processDefault() const;
bool processDevice(Fortran::lower::StatementContext &stmtCtx,
mlir::Value &result) const;
@@ -126,6 +122,7 @@ public:
bool
processReduction(mlir::Location currentLocation,
llvm::SmallVectorImpl<mlir::Value> &reductionVars,
+ llvm::SmallVectorImpl<mlir::Type> &reductionTypes,
llvm::SmallVectorImpl<mlir::Attribute> &reductionDeclSymbols,
llvm::SmallVectorImpl<const Fortran::semantics::Symbol *>
*reductionSymbols = nullptr) const;
@@ -157,7 +154,6 @@ public:
private:
using ClauseIterator = List<Clause>::const_iterator;
- using ClauseIterator2 = std::list<ClauseTy>::const_iterator;
/// Utility to find a clause within a range in the clause list.
template <typename T>
@@ -183,7 +179,6 @@ private:
Fortran::lower::AbstractConverter &converter;
Fortran::semantics::SemanticsContext &semaCtx;
- const Fortran::parser::OmpClauseList &clauses2;
List<Clause> clauses;
};
@@ -205,7 +200,8 @@ bool ClauseProcessor::processMotionClauses(
? llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO
: llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_FROM;
- for (const omp::Object &object : clause.v) {
+ auto &objects = std::get<ObjectList>(clause.t);
+ for (const omp::Object &object : objects) {
llvm::SmallVector<mlir::Value> bounds;
std::stringstream asFortran;
Fortran::lower::AddrAndBoundsInfo info =
@@ -239,19 +235,17 @@ bool ClauseProcessor::processMotionClauses(
template <typename... Ts>
void ClauseProcessor::processTODO(mlir::Location currentLocation,
llvm::omp::Directive directive) const {
- auto checkUnhandledClause = [&](const auto *x) {
+ auto checkUnhandledClause = [&](llvm::omp::Clause id, const auto *x) {
if (!x)
return;
TODO(currentLocation,
- "Unhandled clause " +
- llvm::StringRef(Fortran::parser::ParseTreeDumper::GetNodeName(*x))
- .upper() +
+ "Unhandled clause " + llvm::omp::getOpenMPClauseName(id).upper() +
" in " + llvm::omp::getOpenMPDirectiveName(directive).upper() +
" construct");
};
- for (ClauseIterator2 it = clauses2.v.begin(); it != clauses2.v.end(); ++it)
- (checkUnhandledClause(std::get_if<Ts>(&it->u)), ...);
+ for (ClauseIterator it = clauses.begin(); it != clauses.end(); ++it)
+ (checkUnhandledClause(it->id, std::get_if<Ts>(&it->u)), ...);
}
template <typename T>
diff --git a/flang/lib/Lower/OpenMP/ClauseT.h b/flang/lib/Lower/OpenMP/ClauseT.h
deleted file mode 100644
index 2aae29af2921..000000000000
--- a/flang/lib/Lower/OpenMP/ClauseT.h
+++ /dev/null
@@ -1,714 +0,0 @@
-//===- ClauseT -- clause template definitions -----------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-#ifndef FORTRAN_LOWER_OPENMP_CLAUSET_H
-#define FORTRAN_LOWER_OPENMP_CLAUSET_H
-
-#include "flang/Parser/parse-tree.h" // For enum reuse
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/raw_ostream.h"
-
-#include <algorithm>
-#include <iterator>
-#include <optional>
-#include <tuple>
-#include <type_traits>
-#include <utility>
-#include <variant>
-
-#include "llvm/Frontend/OpenMP/OMP.h.inc"
-
-namespace tomp {
-
-template <typename T>
-using ListT = llvm::SmallVector<T, 0>;
-
-// A specialization of ObjectT<Id, Expr> must provide the following definitions:
-// {
-// using IdType = Id;
-// using ExprType = Expr;
-//
-// auto id() const -> Id {
-// return the identifier of the object (for use in tests for
-// presence/absence of the object)
-// }
-//
-// auto ref() const -> const Expr& {
-// return the expression accessing (referencing) the object
-// }
-// }
-//
-// For example, the ObjectT instance created for "var[x+1]" would have
-// the `id()` return the identifier for `var`, and the `ref()` return the
-// representation of the array-access `var[x+1]`.
-template <typename Id, typename Expr>
-struct ObjectT;
-
-template <typename I, typename E>
-using ObjectListT = ListT<ObjectT<I, E>>;
-
-namespace clause {
-// Helper objects
-
-template <typename I, typename E>
-struct DefinedOperatorT {
- struct DefinedOpName {
- using WrapperTrait = std::true_type;
- ObjectT<I, E> v;
- };
- using IntrinsicOperator = Fortran::parser::DefinedOperator::IntrinsicOperator;
- using UnionTrait = std::true_type;
- std::variant<DefinedOpName, IntrinsicOperator> u;
-};
-
-template <typename I, typename E>
-struct ProcedureDesignatorT {
- using WrapperTrait = std::true_type;
- ObjectT<I, E> v;
-};
-
-template <typename I, typename E>
-struct ReductionOperatorT {
- using UnionTrait = std::true_type;
- std::variant<DefinedOperatorT<I, E>, ProcedureDesignatorT<I, E>> u;
-};
-
-template <typename I, typename E>
-struct AcqRelT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AcquireT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AdjustArgsT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AffinityT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AlignT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AppendArgsT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct AtT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct BindT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct CancellationConstructTypeT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct CaptureT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct CompareT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct DepobjT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct DestroyT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct DetachT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct DoacrossT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct DynamicAllocatorsT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ExclusiveT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct FailT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct FlushT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct FullT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct InbranchT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct InclusiveT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct IndirectT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct InitT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct MatchT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct MemoryOrderT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct MergeableT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct MessageT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct NogroupT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct NotinbranchT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct NowaitT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct OmpxAttributeT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct OmpxBareT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ReadT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct RelaxedT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ReleaseT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ReverseOffloadT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct SeqCstT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct SeverityT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct SimdT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ThreadprivateT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct ThreadsT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UnifiedAddressT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UnifiedSharedMemoryT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UnknownT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UntiedT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UpdateT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UseT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct UsesAllocatorsT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct WeakT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct WhenT {
- using EmptyTrait = std::true_type;
-};
-template <typename I, typename E>
-struct WriteT {
- using EmptyTrait = std::true_type;
-};
-
-template <typename I, typename E>
-struct AlignedT {
- using TupleTrait = std::true_type;
- std::tuple<ObjectListT<I, E>, std::optional<E>> t;
-};
-
-template <typename I, typename E>
-struct AllocateT {
- struct Modifier {
- struct Allocator {
- using WrapperTrait = std::true_type;
- E v;
- };
- struct Align {
- using WrapperTrait = std::true_type;
- E v;
- };
- struct ComplexModifier {
- using TupleTrait = std::true_type;
- std::tuple<Allocator, Align> t;
- };
- using UnionTrait = std::true_type;
- std::variant<Allocator, ComplexModifier, Align> u;
- };
- using TupleTrait = std::true_type;
- std::tuple<std::optional<Modifier>, ObjectListT<I, E>> t;
-};
-
-template <typename I, typename E>
-struct AllocatorT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct AtomicDefaultMemOrderT {
- using WrapperTrait = std::true_type;
- using OmpAtomicDefaultMemOrderType =
- Fortran::common::OmpAtomicDefaultMemOrderType;
- OmpAtomicDefaultMemOrderType v;
-};
-
-template <typename I, typename E>
-struct CollapseT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct CopyinT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct CopyprivateT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct DefaultmapT {
- using ImplicitBehavior =
- Fortran::parser::OmpDefaultmapClause::ImplicitBehavior;
- using VariableCategory =
- Fortran::parser::OmpDefaultmapClause::VariableCategory;
- using TupleTrait = std::true_type;
- std::tuple<ImplicitBehavior, std::optional<VariableCategory>> t;
-};
-
-template <typename I, typename E>
-struct DefaultT {
- using Type = Fortran::parser::OmpDefaultClause::Type;
- using WrapperTrait = std::true_type;
- Type v;
-};
-
-template <typename I, typename E>
-struct DependT {
- struct Source {
- using EmptyTrait = std::true_type;
- };
- struct Sink {
- using Length = std::tuple<DefinedOperatorT<I, E>, E>;
- using Vec = std::tuple<ObjectT<I, E>, std::optional<Length>>;
- using WrapperTrait = std::true_type;
- ListT<Vec> v;
- };
- using Type = Fortran::parser::OmpDependenceType::Type;
- struct InOut {
- using TupleTrait = std::true_type;
- std::tuple<Type, ObjectListT<I, E>> t;
- };
- using UnionTrait = std::true_type;
- std::variant<Source, Sink, InOut> u;
-};
-
-template <typename I, typename E>
-struct DeviceT {
- using DeviceModifier = Fortran::parser::OmpDeviceClause::DeviceModifier;
- using TupleTrait = std::true_type;
- std::tuple<std::optional<DeviceModifier>, E> t;
-};
-
-template <typename I, typename E>
-struct DeviceTypeT {
- using Type = Fortran::parser::OmpDeviceTypeClause::Type;
- using WrapperTrait = std::true_type;
- Type v;
-};
-
-template <typename I, typename E>
-struct DistScheduleT {
- using WrapperTrait = std::true_type;
- std::optional<E> v;
-};
-
-template <typename I, typename E>
-struct EnterT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct FilterT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct FinalT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct FirstprivateT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct FromT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct GrainsizeT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct HasDeviceAddrT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct HintT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct IfT {
- using DirectiveNameModifier =
- Fortran::parser::OmpIfClause::DirectiveNameModifier;
- using TupleTrait = std::true_type;
- std::tuple<std::optional<DirectiveNameModifier>, E> t;
-};
-
-template <typename I, typename E>
-struct InReductionT {
- using TupleTrait = std::true_type;
- std::tuple<ReductionOperatorT<I, E>, ObjectListT<I, E>> t;
-};
-
-template <typename I, typename E>
-struct IsDevicePtrT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct LastprivateT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct LinearT {
- struct Modifier {
- using Type = Fortran::parser::OmpLinearModifier::Type;
- using WrapperTrait = std::true_type;
- Type v;
- };
- using TupleTrait = std::true_type;
- std::tuple<std::optional<Modifier>, ObjectListT<I, E>, std::optional<E>> t;
-};
-
-template <typename I, typename E>
-struct LinkT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct MapT {
- struct MapType {
- struct Always {
- using EmptyTrait = std::true_type;
- };
- using Type = Fortran::parser::OmpMapType::Type;
- using TupleTrait = std::true_type;
- std::tuple<std::optional<Always>, Type> t;
- };
- using TupleTrait = std::true_type;
- std::tuple<std::optional<MapType>, ObjectListT<I, E>> t;
-};
-
-template <typename I, typename E>
-struct NocontextT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct NontemporalT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct NovariantsT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct NumTasksT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct NumTeamsT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct NumThreadsT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct OmpxDynCgroupMemT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct OrderedT {
- using WrapperTrait = std::true_type;
- std::optional<E> v;
-};
-
-template <typename I, typename E>
-struct OrderT {
- using Kind = Fortran::parser::OmpOrderModifier::Kind;
- using Type = Fortran::parser::OmpOrderClause::Type;
- using TupleTrait = std::true_type;
- std::tuple<std::optional<Kind>, Type> t;
-};
-
-template <typename I, typename E>
-struct PartialT {
- using WrapperTrait = std::true_type;
- std::optional<E> v;
-};
-
-template <typename I, typename E>
-struct PriorityT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct PrivateT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct ProcBindT {
- using Type = Fortran::parser::OmpProcBindClause::Type;
- using WrapperTrait = std::true_type;
- Type v;
-};
-
-template <typename I, typename E>
-struct ReductionT {
- using TupleTrait = std::true_type;
- std::tuple<ReductionOperatorT<I, E>, ObjectListT<I, E>> t;
-};
-
-template <typename I, typename E>
-struct SafelenT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct ScheduleT {
- using ModType = Fortran::parser::OmpScheduleModifierType::ModType;
- struct ScheduleModifier {
- using TupleTrait = std::true_type;
- std::tuple<ModType, std::optional<ModType>> t;
- };
- using ScheduleType = Fortran::parser::OmpScheduleClause::ScheduleType;
- using TupleTrait = std::true_type;
- std::tuple<std::optional<ScheduleModifier>, ScheduleType, std::optional<E>> t;
-};
-
-template <typename I, typename E>
-struct SharedT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct SimdlenT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct SizesT {
- using WrapperTrait = std::true_type;
- ListT<E> v;
-};
-
-template <typename I, typename E>
-struct TaskReductionT {
- using TupleTrait = std::true_type;
- std::tuple<ReductionOperatorT<I, E>, ObjectListT<I, E>> t;
-};
-
-template <typename I, typename E>
-struct ThreadLimitT {
- using WrapperTrait = std::true_type;
- E v;
-};
-
-template <typename I, typename E>
-struct ToT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct UniformT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct UseDeviceAddrT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-struct UseDevicePtrT {
- using WrapperTrait = std::true_type;
- ObjectListT<I, E> v;
-};
-
-template <typename I, typename E>
-using UnionOfAllClausesT = std::variant<
- AcqRelT<I, E>, AcquireT<I, E>, AdjustArgsT<I, E>, AffinityT<I, E>,
- AlignT<I, E>, AlignedT<I, E>, AllocateT<I, E>, AllocatorT<I, E>,
- AppendArgsT<I, E>, AtT<I, E>, AtomicDefaultMemOrderT<I, E>, BindT<I, E>,
- CancellationConstructTypeT<I, E>, CaptureT<I, E>, CollapseT<I, E>,
- CompareT<I, E>, CopyprivateT<I, E>, CopyinT<I, E>, DefaultT<I, E>,
- DefaultmapT<I, E>, DependT<I, E>, DepobjT<I, E>, DestroyT<I, E>,
- DetachT<I, E>, DeviceT<I, E>, DeviceTypeT<I, E>, DistScheduleT<I, E>,
- DoacrossT<I, E>, DynamicAllocatorsT<I, E>, EnterT<I, E>, ExclusiveT<I, E>,
- FailT<I, E>, FilterT<I, E>, FinalT<I, E>, FirstprivateT<I, E>, FlushT<I, E>,
- FromT<I, E>, FullT<I, E>, GrainsizeT<I, E>, HasDeviceAddrT<I, E>,
- HintT<I, E>, IfT<I, E>, InReductionT<I, E>, InbranchT<I, E>,
- InclusiveT<I, E>, IndirectT<I, E>, InitT<I, E>, IsDevicePtrT<I, E>,
- LastprivateT<I, E>, LinearT<I, E>, LinkT<I, E>, MapT<I, E>, MatchT<I, E>,
- MemoryOrderT<I, E>, MergeableT<I, E>, MessageT<I, E>, NogroupT<I, E>,
- NowaitT<I, E>, NocontextT<I, E>, NontemporalT<I, E>, NotinbranchT<I, E>,
- NovariantsT<I, E>, NumTasksT<I, E>, NumTeamsT<I, E>, NumThreadsT<I, E>,
- OmpxAttributeT<I, E>, OmpxDynCgroupMemT<I, E>, OmpxBareT<I, E>,
- OrderT<I, E>, OrderedT<I, E>, PartialT<I, E>, PriorityT<I, E>,
- PrivateT<I, E>, ProcBindT<I, E>, ReadT<I, E>, ReductionT<I, E>,
- RelaxedT<I, E>, ReleaseT<I, E>, ReverseOffloadT<I, E>, SafelenT<I, E>,
- ScheduleT<I, E>, SeqCstT<I, E>, SeverityT<I, E>, SharedT<I, E>, SimdT<I, E>,
- SimdlenT<I, E>, SizesT<I, E>, TaskReductionT<I, E>, ThreadLimitT<I, E>,
- ThreadprivateT<I, E>, ThreadsT<I, E>, ToT<I, E>, UnifiedAddressT<I, E>,
- UnifiedSharedMemoryT<I, E>, UniformT<I, E>, UnknownT<I, E>, UntiedT<I, E>,
- UpdateT<I, E>, UseT<I, E>, UseDeviceAddrT<I, E>, UseDevicePtrT<I, E>,
- UsesAllocatorsT<I, E>, WeakT<I, E>, WhenT<I, E>, WriteT<I, E>>;
-} // namespace clause
-
-template <typename Id, typename Expr>
-struct ClauseT {
- llvm::omp::Clause id; // The numeric id of the clause
- using UnionTrait = std::true_type;
- clause::UnionOfAllClausesT<Id, Expr> u;
-};
-
-} // namespace tomp
-
-#endif // FORTRAN_LOWER_OPENMP_CLAUSET_H
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index 70f232a4858e..40da71c8b55f 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -189,29 +189,96 @@ getBaseObject(const Object &object,
return std::nullopt;
}
-namespace clause {
-// Helper objects
-#ifdef EMPTY_CLASS
-#undef EMPTY_CLASS
-#endif
-#define EMPTY_CLASS(cls) \
- cls make(const parser::OmpClause::cls &, semantics::SemanticsContext &) { \
+// Helper macros
+#define MAKE_EMPTY_CLASS(cls, from_cls) \
+ cls make(const parser::OmpClause::from_cls &, \
+ semantics::SemanticsContext &) { \
+ static_assert(cls::EmptyTrait::value); \
return cls{}; \
} \
[[maybe_unused]] extern int xyzzy_semicolon_absorber
-#ifdef WRAPPER_CLASS
-#undef WRAPPER_CLASS
-#endif
-#define WRAPPER_CLASS(cls, content) \
+#define MAKE_INCOMPLETE_CLASS(cls, from_cls) \
+ cls make(const parser::OmpClause::from_cls &, \
+ semantics::SemanticsContext &) { \
+ static_assert(cls::IncompleteTrait::value); \
+ return cls{}; \
+ } \
[[maybe_unused]] extern int xyzzy_semicolon_absorber
-#define GEN_FLANG_CLAUSE_PARSER_CLASSES
-#include "llvm/Frontend/OpenMP/OMP.inc"
-#undef EMPTY_CLASS
-#undef WRAPPER_CLASS
+
+#define MS(x, y) CLAUSET_SCOPED_ENUM_MEMBER_CONVERT(x, y)
+#define MU(x, y) CLAUSET_UNSCOPED_ENUM_MEMBER_CONVERT(x, y)
+
+namespace clause {
+MAKE_EMPTY_CLASS(AcqRel, AcqRel);
+MAKE_EMPTY_CLASS(Acquire, Acquire);
+MAKE_EMPTY_CLASS(Capture, Capture);
+MAKE_EMPTY_CLASS(Compare, Compare);
+MAKE_EMPTY_CLASS(DynamicAllocators, DynamicAllocators);
+MAKE_EMPTY_CLASS(Full, Full);
+MAKE_EMPTY_CLASS(Inbranch, Inbranch);
+MAKE_EMPTY_CLASS(Mergeable, Mergeable);
+MAKE_EMPTY_CLASS(Nogroup, Nogroup);
+// MAKE_EMPTY_CLASS(NoOpenmp, ); // missing-in-parser
+// MAKE_EMPTY_CLASS(NoOpenmpRoutines, ); // missing-in-parser
+// MAKE_EMPTY_CLASS(NoParallelism, ); // missing-in-parser
+MAKE_EMPTY_CLASS(Notinbranch, Notinbranch);
+MAKE_EMPTY_CLASS(Nowait, Nowait);
+MAKE_EMPTY_CLASS(OmpxAttribute, OmpxAttribute);
+MAKE_EMPTY_CLASS(OmpxBare, OmpxBare);
+MAKE_EMPTY_CLASS(Read, Read);
+MAKE_EMPTY_CLASS(Relaxed, Relaxed);
+MAKE_EMPTY_CLASS(Release, Release);
+MAKE_EMPTY_CLASS(ReverseOffload, ReverseOffload);
+MAKE_EMPTY_CLASS(SeqCst, SeqCst);
+MAKE_EMPTY_CLASS(Simd, Simd);
+MAKE_EMPTY_CLASS(Threads, Threads);
+MAKE_EMPTY_CLASS(UnifiedAddress, UnifiedAddress);
+MAKE_EMPTY_CLASS(UnifiedSharedMemory, UnifiedSharedMemory);
+MAKE_EMPTY_CLASS(Unknown, Unknown);
+MAKE_EMPTY_CLASS(Untied, Untied);
+MAKE_EMPTY_CLASS(Weak, Weak);
+MAKE_EMPTY_CLASS(Write, Write);
+
+// Artificial clauses
+MAKE_EMPTY_CLASS(CancellationConstructType, CancellationConstructType);
+MAKE_EMPTY_CLASS(Depobj, Depobj);
+MAKE_EMPTY_CLASS(Flush, Flush);
+MAKE_EMPTY_CLASS(MemoryOrder, MemoryOrder);
+MAKE_EMPTY_CLASS(Threadprivate, Threadprivate);
+
+MAKE_INCOMPLETE_CLASS(AdjustArgs, AdjustArgs);
+MAKE_INCOMPLETE_CLASS(AppendArgs, AppendArgs);
+MAKE_INCOMPLETE_CLASS(Match, Match);
+// MAKE_INCOMPLETE_CLASS(Otherwise, ); // missing-in-parser
+MAKE_INCOMPLETE_CLASS(When, When);
DefinedOperator makeDefinedOperator(const parser::DefinedOperator &inp,
semantics::SemanticsContext &semaCtx) {
+ CLAUSET_ENUM_CONVERT( //
+ convert, parser::DefinedOperator::IntrinsicOperator,
+ DefinedOperator::IntrinsicOperator,
+ // clang-format off
+ MS(Add, Add)
+ MS(AND, AND)
+ MS(Concat, Concat)
+ MS(Divide, Divide)
+ MS(EQ, EQ)
+ MS(EQV, EQV)
+ MS(GE, GE)
+ MS(GT, GT)
+ MS(NOT, NOT)
+ MS(LE, LE)
+ MS(LT, LT)
+ MS(Multiply, Multiply)
+ MS(NE, NE)
+ MS(NEQV, NEQV)
+ MS(OR, OR)
+ MS(Power, Power)
+ MS(Subtract, Subtract)
+ // clang-format on
+ );
+
return std::visit(
common::visitors{
[&](const parser::DefinedOpName &s) {
@@ -219,7 +286,7 @@ DefinedOperator makeDefinedOperator(const parser::DefinedOperator &inp,
DefinedOperator::DefinedOpName{makeObject(s.v, semaCtx)}};
},
[&](const parser::DefinedOperator::IntrinsicOperator &s) {
- return DefinedOperator{s};
+ return DefinedOperator{convert(s)};
},
},
inp.u);
@@ -252,7 +319,26 @@ ReductionOperator makeReductionOperator(const parser::OmpReductionOperator &inp,
inp.u);
}
-// Actual clauses. Each T (where OmpClause::T exists) has its "make".
+// --------------------------------------------------------------------
+// Actual clauses. Each T (where tomp::T exists in ClauseT) has its "make".
+
+// Absent: missing-in-parser
+// AcqRel: empty
+// Acquire: empty
+// AdjustArgs: incomplete
+
+Affinity make(const parser::OmpClause::Affinity &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: affinity");
+}
+
+Align make(const parser::OmpClause::Align &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: align");
+}
+
Aligned make(const parser::OmpClause::Aligned &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpAlignedClause
@@ -260,8 +346,8 @@ Aligned make(const parser::OmpClause::Aligned &inp,
auto &t1 = std::get<std::optional<parser::ScalarIntConstantExpr>>(inp.v.t);
return Aligned{{
- makeList(t0, semaCtx),
- maybeApply(makeExprFn(semaCtx), t1),
+ /*Alignment=*/maybeApply(makeExprFn(semaCtx), t1),
+ /*List=*/makeObjects(t0, semaCtx),
}};
}
@@ -272,39 +358,57 @@ Allocate make(const parser::OmpClause::Allocate &inp,
auto &t0 = std::get<std::optional<wrapped::AllocateModifier>>(inp.v.t);
auto &t1 = std::get<parser::OmpObjectList>(inp.v.t);
- auto convert = [&](auto &&s) -> Allocate::Modifier {
- using Modifier = Allocate::Modifier;
- using Allocator = Modifier::Allocator;
- using Align = Modifier::Align;
- using ComplexModifier = Modifier::ComplexModifier;
+ if (!t0) {
+ return Allocate{{/*AllocatorSimpleModifier=*/std::nullopt,
+ /*AllocatorComplexModifier=*/std::nullopt,
+ /*AlignModifier=*/std::nullopt,
+ /*List=*/makeObjects(t1, semaCtx)}};
+ }
- return std::visit(
- common::visitors{
- [&](const wrapped::AllocateModifier::Allocator &v) {
- return Modifier{Allocator{makeExpr(v.v, semaCtx)}};
- },
- [&](const wrapped::AllocateModifier::ComplexModifier &v) {
- auto &s0 = std::get<wrapped::AllocateModifier::Allocator>(v.t);
- auto &s1 = std::get<wrapped::AllocateModifier::Align>(v.t);
- return Modifier{ComplexModifier{{
- Allocator{makeExpr(s0.v, semaCtx)},
- Align{makeExpr(s1.v, semaCtx)},
- }}};
- },
- [&](const wrapped::AllocateModifier::Align &v) {
- return Modifier{Align{makeExpr(v.v, semaCtx)}};
- },
- },
- s.u);
- };
+ using Tuple = decltype(Allocate::t);
- return Allocate{{maybeApply(convert, t0), makeList(t1, semaCtx)}};
+ return Allocate{std::visit(
+ common::visitors{
+ // simple-modifier
+ [&](const wrapped::AllocateModifier::Allocator &v) -> Tuple {
+ return {/*AllocatorSimpleModifier=*/makeExpr(v.v, semaCtx),
+ /*AllocatorComplexModifier=*/std::nullopt,
+ /*AlignModifier=*/std::nullopt,
+ /*List=*/makeObjects(t1, semaCtx)};
+ },
+ // complex-modifier + align-modifier
+ [&](const wrapped::AllocateModifier::ComplexModifier &v) -> Tuple {
+ auto &s0 = std::get<wrapped::AllocateModifier::Allocator>(v.t);
+ auto &s1 = std::get<wrapped::AllocateModifier::Align>(v.t);
+ return {
+ /*AllocatorSimpleModifier=*/std::nullopt,
+ /*AllocatorComplexModifier=*/Allocator{makeExpr(s0.v, semaCtx)},
+ /*AlignModifier=*/Align{makeExpr(s1.v, semaCtx)},
+ /*List=*/makeObjects(t1, semaCtx)};
+ },
+ // align-modifier
+ [&](const wrapped::AllocateModifier::Align &v) -> Tuple {
+ return {/*AllocatorSimpleModifier=*/std::nullopt,
+ /*AllocatorComplexModifier=*/std::nullopt,
+ /*AlignModifier=*/Align{makeExpr(v.v, semaCtx)},
+ /*List=*/makeObjects(t1, semaCtx)};
+ },
+ },
+ t0->u)};
}
Allocator make(const parser::OmpClause::Allocator &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return Allocator{makeExpr(inp.v, semaCtx)};
+ return Allocator{/*Allocator=*/makeExpr(inp.v, semaCtx)};
+}
+
+// AppendArgs: incomplete
+
+At make(const parser::OmpClause::At &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: at");
}
// Never called, but needed for using "make" as a Clause visitor.
@@ -312,25 +416,65 @@ Allocator make(const parser::OmpClause::Allocator &inp,
AtomicDefaultMemOrder make(const parser::OmpClause::AtomicDefaultMemOrder &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpAtomicDefaultMemOrderClause
- return AtomicDefaultMemOrder{inp.v.v};
+ CLAUSET_ENUM_CONVERT( //
+ convert, common::OmpAtomicDefaultMemOrderType,
+ AtomicDefaultMemOrder::MemoryOrder,
+ // clang-format off
+ MS(AcqRel, AcqRel)
+ MS(Relaxed, Relaxed)
+ MS(SeqCst, SeqCst)
+ // clang-format on
+ );
+
+ return AtomicDefaultMemOrder{/*MemoryOrder=*/convert(inp.v.v)};
}
+Bind make(const parser::OmpClause::Bind &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: bind");
+}
+
+// CancellationConstructType: empty
+// Capture: empty
+
Collapse make(const parser::OmpClause::Collapse &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntConstantExpr
- return Collapse{makeExpr(inp.v, semaCtx)};
+ return Collapse{/*N=*/makeExpr(inp.v, semaCtx)};
}
+// Compare: empty
+// Contains: missing-in-parser
+
Copyin make(const parser::OmpClause::Copyin &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Copyin{makeList(inp.v, semaCtx)};
+ return Copyin{/*List=*/makeObjects(inp.v, semaCtx)};
}
Copyprivate make(const parser::OmpClause::Copyprivate &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Copyprivate{makeList(inp.v, semaCtx)};
+ return Copyprivate{/*List=*/makeObjects(inp.v, semaCtx)};
+}
+
+Default make(const parser::OmpClause::Default &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> parser::OmpDefaultClause
+ using wrapped = parser::OmpDefaultClause;
+
+ CLAUSET_ENUM_CONVERT( //
+ convert, wrapped::Type, Default::DataSharingAttribute,
+ // clang-format off
+ MS(Firstprivate, Firstprivate)
+ MS(None, None)
+ MS(Private, Private)
+ MS(Shared, Shared)
+ // clang-format on
+ );
+
+ return Default{/*DataSharingAttribute=*/convert(inp.v.v)};
}
Defaultmap make(const parser::OmpClause::Defaultmap &inp,
@@ -338,51 +482,111 @@ Defaultmap make(const parser::OmpClause::Defaultmap &inp,
// inp.v -> parser::OmpDefaultmapClause
using wrapped = parser::OmpDefaultmapClause;
+ CLAUSET_ENUM_CONVERT( //
+ convert1, wrapped::ImplicitBehavior, Defaultmap::ImplicitBehavior,
+ // clang-format off
+ MS(Alloc, Alloc)
+ MS(To, To)
+ MS(From, From)
+ MS(Tofrom, Tofrom)
+ MS(Firstprivate, Firstprivate)
+ MS(None, None)
+ MS(Default, Default)
+ // MS(, Present) missing-in-parser
+ // clang-format on
+ );
+
+ CLAUSET_ENUM_CONVERT( //
+ convert2, wrapped::VariableCategory, Defaultmap::VariableCategory,
+ // clang-format off
+ MS(Scalar, Scalar)
+ MS(Aggregate, Aggregate)
+ MS(Pointer, Pointer)
+ MS(Allocatable, Allocatable)
+ // clang-format on
+ );
+
auto &t0 = std::get<wrapped::ImplicitBehavior>(inp.v.t);
auto &t1 = std::get<std::optional<wrapped::VariableCategory>>(inp.v.t);
- return Defaultmap{{t0, t1}};
-}
-
-Default make(const parser::OmpClause::Default &inp,
- semantics::SemanticsContext &semaCtx) {
- // inp.v -> parser::OmpDefaultClause
- return Default{inp.v.v};
+ return Defaultmap{{/*ImplicitBehavior=*/convert1(t0),
+ /*VariableCategory=*/maybeApply(convert2, t1)}};
}
Depend make(const parser::OmpClause::Depend &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpDependClause
using wrapped = parser::OmpDependClause;
-
- return std::visit(
+ using Variant = decltype(Depend::u);
+ // Iteration is the equivalent of parser::OmpDependSinkVec
+ using Iteration = Doacross::Vector::value_type; // LoopIterationT
+
+ CLAUSET_ENUM_CONVERT( //
+ convert1, parser::OmpDependenceType::Type, Depend::TaskDependenceType,
+ // clang-format off
+ MS(In, In)
+ MS(Out, Out)
+ MS(Inout, Inout)
+ // MS(, Mutexinoutset) // missing-in-parser
+ // MS(, Inputset) // missing-in-parser
+ // MS(, Depobj) // missing-in-parser
+ // clang-format on
+ );
+
+ return Depend{std::visit( //
common::visitors{
- [&](const wrapped::Source &s) { return Depend{Depend::Source{}}; },
- [&](const wrapped::Sink &s) {
- auto convert = [&](const parser::OmpDependSinkVec &v) {
+ // Doacross
+ [&](const wrapped::Source &s) -> Variant {
+ return Doacross{
+ {/*DependenceType=*/Doacross::DependenceType::Source,
+ /*Vector=*/{}}};
+ },
+ // Doacross
+ [&](const wrapped::Sink &s) -> Variant {
+ using DependLength = parser::OmpDependSinkVecLength;
+ auto convert2 = [&](const parser::OmpDependSinkVec &v) {
auto &t0 = std::get<parser::Name>(v.t);
- auto &t1 =
- std::get<std::optional<parser::OmpDependSinkVecLength>>(v.t);
- auto convert1 = [&](const parser::OmpDependSinkVecLength &u) {
+ auto &t1 = std::get<std::optional<DependLength>>(v.t);
+
+ auto convert3 = [&](const DependLength &u) {
auto &s0 = std::get<parser::DefinedOperator>(u.t);
auto &s1 = std::get<parser::ScalarIntConstantExpr>(u.t);
- return Depend::Sink::Length{makeDefinedOperator(s0, semaCtx),
- makeExpr(s1, semaCtx)};
+ return Iteration::Distance{
+ {makeDefinedOperator(s0, semaCtx), makeExpr(s1, semaCtx)}};
};
- return Depend::Sink::Vec{makeObject(t0, semaCtx),
- maybeApply(convert1, t1)};
+ return Iteration{
+ {makeObject(t0, semaCtx), maybeApply(convert3, t1)}};
};
- return Depend{Depend::Sink{makeList(s.v, convert)}};
+ return Doacross{{/*DependenceType=*/Doacross::DependenceType::Sink,
+ /*Vector=*/makeList(s.v, convert2)}};
},
- [&](const wrapped::InOut &s) {
+ // Depend::WithLocators
+ [&](const wrapped::InOut &s) -> Variant {
auto &t0 = std::get<parser::OmpDependenceType>(s.t);
auto &t1 = std::get<std::list<parser::Designator>>(s.t);
- auto convert = [&](const parser::Designator &t) {
+ auto convert4 = [&](const parser::Designator &t) {
return makeObject(t, semaCtx);
};
- return Depend{Depend::InOut{{t0.v, makeList(t1, convert)}}};
+ return Depend::WithLocators{
+ {/*TaskDependenceType=*/convert1(t0.v),
+ /*Iterator=*/std::nullopt,
+ /*LocatorList=*/makeList(t1, convert4)}};
},
},
- inp.v.u);
+ inp.v.u)};
+}
+
+// Depobj: empty
+
+Destroy make(const parser::OmpClause::Destroy &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: destroy");
+}
+
+Detach make(const parser::OmpClause::Detach &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: detach");
}
Device make(const parser::OmpClause::Device &inp,
@@ -390,100 +594,188 @@ Device make(const parser::OmpClause::Device &inp,
// inp.v -> parser::OmpDeviceClause
using wrapped = parser::OmpDeviceClause;
+ CLAUSET_ENUM_CONVERT( //
+ convert, parser::OmpDeviceClause::DeviceModifier, Device::DeviceModifier,
+ // clang-format off
+ MS(Ancestor, Ancestor)
+ MS(Device_Num, DeviceNum)
+ // clang-format on
+ );
auto &t0 = std::get<std::optional<wrapped::DeviceModifier>>(inp.v.t);
auto &t1 = std::get<parser::ScalarIntExpr>(inp.v.t);
- return Device{{t0, makeExpr(t1, semaCtx)}};
+ return Device{{/*DeviceModifier=*/maybeApply(convert, t0),
+ /*DeviceDescription=*/makeExpr(t1, semaCtx)}};
}
DeviceType make(const parser::OmpClause::DeviceType &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpDeviceTypeClause
- return DeviceType{inp.v.v};
+ using wrapped = parser::OmpDeviceTypeClause;
+
+ CLAUSET_ENUM_CONVERT( //
+ convert, wrapped::Type, DeviceType::DeviceTypeDescription,
+ // clang-format off
+ MS(Any, Any)
+ MS(Host, Host)
+ MS(Nohost, Nohost)
+ // clang-format om
+ );
+ return DeviceType{/*DeviceTypeDescription=*/convert(inp.v.v)};
}
DistSchedule make(const parser::OmpClause::DistSchedule &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> std::optional<parser::ScalarIntExpr>
- return DistSchedule{maybeApply(makeExprFn(semaCtx), inp.v)};
+ return DistSchedule{{/*Kind=*/DistSchedule::Kind::Static,
+ /*ChunkSize=*/maybeApply(makeExprFn(semaCtx), inp.v)}};
}
+Doacross make(const parser::OmpClause::Doacross &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: doacross");
+}
+
+// DynamicAllocators: empty
+
Enter make(const parser::OmpClause::Enter &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Enter{makeList(inp.v, semaCtx)};
+ return Enter{makeObjects(/*List=*/inp.v, semaCtx)};
+}
+
+Exclusive make(const parser::OmpClause::Exclusive &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: exclusive");
+}
+
+Fail make(const parser::OmpClause::Fail &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: fail");
}
Filter make(const parser::OmpClause::Filter &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return Filter{makeExpr(inp.v, semaCtx)};
+ return Filter{/*ThreadNum=*/makeExpr(inp.v, semaCtx)};
}
Final make(const parser::OmpClause::Final &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarLogicalExpr
- return Final{makeExpr(inp.v, semaCtx)};
+ return Final{/*Finalize=*/makeExpr(inp.v, semaCtx)};
}
Firstprivate make(const parser::OmpClause::Firstprivate &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Firstprivate{makeList(inp.v, semaCtx)};
+ return Firstprivate{/*List=*/makeObjects(inp.v, semaCtx)};
}
+// Flush: empty
+
From make(const parser::OmpClause::From &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return From{makeList(inp.v, semaCtx)};
+ return From{{/*Expectation=*/std::nullopt, /*Mapper=*/std::nullopt,
+ /*Iterator=*/std::nullopt,
+ /*LocatorList=*/makeObjects(inp.v, semaCtx)}};
}
+// Full: empty
+
Grainsize make(const parser::OmpClause::Grainsize &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return Grainsize{makeExpr(inp.v, semaCtx)};
+ return Grainsize{{/*Prescriptiveness=*/std::nullopt,
+ /*GrainSize=*/makeExpr(inp.v, semaCtx)}};
}
HasDeviceAddr make(const parser::OmpClause::HasDeviceAddr &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return HasDeviceAddr{makeList(inp.v, semaCtx)};
+ return HasDeviceAddr{/*List=*/makeObjects(inp.v, semaCtx)};
}
Hint make(const parser::OmpClause::Hint &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ConstantExpr
- return Hint{makeExpr(inp.v, semaCtx)};
+ return Hint{/*HintExpr=*/makeExpr(inp.v, semaCtx)};
}
+// Holds: missing-in-parser
+
If make(const parser::OmpClause::If &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpIfClause
using wrapped = parser::OmpIfClause;
+ CLAUSET_ENUM_CONVERT( //
+ convert, wrapped::DirectiveNameModifier, llvm::omp::Directive,
+ // clang-format off
+ MS(Parallel, OMPD_parallel)
+ MS(Simd, OMPD_simd)
+ MS(Target, OMPD_target)
+ MS(TargetData, OMPD_target_data)
+ MS(TargetEnterData, OMPD_target_enter_data)
+ MS(TargetExitData, OMPD_target_exit_data)
+ MS(TargetUpdate, OMPD_target_update)
+ MS(Task, OMPD_task)
+ MS(Taskloop, OMPD_taskloop)
+ MS(Teams, OMPD_teams)
+ // clang-format on
+ );
auto &t0 = std::get<std::optional<wrapped::DirectiveNameModifier>>(inp.v.t);
auto &t1 = std::get<parser::ScalarLogicalExpr>(inp.v.t);
- return If{{t0, makeExpr(t1, semaCtx)}};
+ return If{{/*DirectiveNameModifier=*/maybeApply(convert, t0),
+ /*IfExpression=*/makeExpr(t1, semaCtx)}};
}
+// Inbranch: empty
+
+Inclusive make(const parser::OmpClause::Inclusive &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: inclusive");
+}
+
+Indirect make(const parser::OmpClause::Indirect &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: indirect");
+}
+
+Init make(const parser::OmpClause::Init &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: init");
+}
+
+// Initializer: missing-in-parser
+
InReduction make(const parser::OmpClause::InReduction &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpInReductionClause
auto &t0 = std::get<parser::OmpReductionOperator>(inp.v.t);
auto &t1 = std::get<parser::OmpObjectList>(inp.v.t);
return InReduction{
- {makeReductionOperator(t0, semaCtx), makeList(t1, semaCtx)}};
+ {/*ReductionIdentifiers=*/{makeReductionOperator(t0, semaCtx)},
+ /*List=*/makeObjects(t1, semaCtx)}};
}
IsDevicePtr make(const parser::OmpClause::IsDevicePtr &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return IsDevicePtr{makeList(inp.v, semaCtx)};
+ return IsDevicePtr{/*List=*/makeObjects(inp.v, semaCtx)};
}
Lastprivate make(const parser::OmpClause::Lastprivate &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Lastprivate{makeList(inp.v, semaCtx)};
+ return Lastprivate{{/*LastprivateModifier=*/std::nullopt,
+ /*List=*/makeObjects(inp.v, semaCtx)}};
}
Linear make(const parser::OmpClause::Linear &inp,
@@ -491,140 +783,244 @@ Linear make(const parser::OmpClause::Linear &inp,
// inp.v -> parser::OmpLinearClause
using wrapped = parser::OmpLinearClause;
- return std::visit(
+ CLAUSET_ENUM_CONVERT( //
+ convert, parser::OmpLinearModifier::Type, Linear::LinearModifier,
+ // clang-format off
+ MS(Ref, Ref)
+ MS(Val, Val)
+ MS(Uval, Uval)
+ // clang-format on
+ );
+
+ using Tuple = decltype(Linear::t);
+
+ return Linear{std::visit(
common::visitors{
- [&](const wrapped::WithModifier &s) {
- return Linear{{Linear::Modifier{s.modifier.v},
- makeList(s.names, makeObjectFn(semaCtx)),
- maybeApply(makeExprFn(semaCtx), s.step)}};
+ [&](const wrapped::WithModifier &s) -> Tuple {
+ return {
+ /*StepSimpleModifier=*/std::nullopt,
+ /*StepComplexModifier=*/maybeApply(makeExprFn(semaCtx), s.step),
+ /*LinearModifier=*/convert(s.modifier.v),
+ /*List=*/makeList(s.names, makeObjectFn(semaCtx))};
},
- [&](const wrapped::WithoutModifier &s) {
- return Linear{{std::nullopt,
- makeList(s.names, makeObjectFn(semaCtx)),
- maybeApply(makeExprFn(semaCtx), s.step)}};
+ [&](const wrapped::WithoutModifier &s) -> Tuple {
+ return {
+ /*StepSimpleModifier=*/maybeApply(makeExprFn(semaCtx), s.step),
+ /*StepComplexModifier=*/std::nullopt,
+ /*LinearModifier=*/std::nullopt,
+ /*List=*/makeList(s.names, makeObjectFn(semaCtx))};
},
},
- inp.v.u);
+ inp.v.u)};
}
Link make(const parser::OmpClause::Link &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Link{makeList(inp.v, semaCtx)};
+ return Link{/*List=*/makeObjects(inp.v, semaCtx)};
}
Map make(const parser::OmpClause::Map &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpMapClause
+
+ CLAUSET_ENUM_CONVERT( //
+ convert1, parser::OmpMapType::Type, Map::MapType,
+ // clang-format off
+ MS(To, To)
+ MS(From, From)
+ MS(Tofrom, Tofrom)
+ MS(Alloc, Alloc)
+ MS(Release, Release)
+ MS(Delete, Delete)
+ // clang-format on
+ );
+
+ // No convert2: MapTypeModifier is not an enum in parser.
+
auto &t0 = std::get<std::optional<parser::OmpMapType>>(inp.v.t);
auto &t1 = std::get<parser::OmpObjectList>(inp.v.t);
- auto convert = [](const parser::OmpMapType &s) {
- auto &s0 = std::get<std::optional<parser::OmpMapType::Always>>(s.t);
- auto &s1 = std::get<parser::OmpMapType::Type>(s.t);
- auto convertT = [](parser::OmpMapType::Always) {
- return Map::MapType::Always{};
- };
- return Map::MapType{{maybeApply(convertT, s0), s1}};
- };
- return Map{{maybeApply(convert, t0), makeList(t1, semaCtx)}};
+
+ if (!t0) {
+ return Map{{/*MapType=*/std::nullopt, /*MapTypeModifiers=*/std::nullopt,
+ /*Mapper=*/std::nullopt, /*Iterator=*/std::nullopt,
+ /*LocatorList=*/makeObjects(t1, semaCtx)}};
+ }
+
+ auto &s0 = std::get<std::optional<parser::OmpMapType::Always>>(t0->t);
+ auto &s1 = std::get<parser::OmpMapType::Type>(t0->t);
+
+ std::optional<Map::MapTypeModifiers> maybeList;
+ if (s0)
+ maybeList = Map::MapTypeModifiers{Map::MapTypeModifier::Always};
+
+ return Map{{/*MapType=*/convert1(s1),
+ /*MapTypeModifiers=*/maybeList,
+ /*Mapper=*/std::nullopt, /*Iterator=*/std::nullopt,
+ /*LocatorList=*/makeObjects(t1, semaCtx)}};
+}
+
+// Match: incomplete
+// MemoryOrder: empty
+// Mergeable: empty
+
+Message make(const parser::OmpClause::Message &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: message");
}
Nocontext make(const parser::OmpClause::Nocontext &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarLogicalExpr
- return Nocontext{makeExpr(inp.v, semaCtx)};
+ return Nocontext{/*DoNotUpdateContext=*/makeExpr(inp.v, semaCtx)};
}
+// Nogroup: empty
+
Nontemporal make(const parser::OmpClause::Nontemporal &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> std::list<parser::Name>
- return Nontemporal{makeList(inp.v, makeObjectFn(semaCtx))};
+ return Nontemporal{/*List=*/makeList(inp.v, makeObjectFn(semaCtx))};
}
+// NoOpenmp: missing-in-parser
+// NoOpenmpRoutines: missing-in-parser
+// NoParallelism: missing-in-parser
+// Notinbranch: empty
+
Novariants make(const parser::OmpClause::Novariants &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarLogicalExpr
- return Novariants{makeExpr(inp.v, semaCtx)};
+ return Novariants{/*DoNotUseVariant=*/makeExpr(inp.v, semaCtx)};
}
+// Nowait: empty
+
NumTasks make(const parser::OmpClause::NumTasks &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return NumTasks{makeExpr(inp.v, semaCtx)};
+ return NumTasks{{/*Prescriptiveness=*/std::nullopt,
+ /*NumTasks=*/makeExpr(inp.v, semaCtx)}};
}
NumTeams make(const parser::OmpClause::NumTeams &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return NumTeams{makeExpr(inp.v, semaCtx)};
+ return NumTeams{{/*LowerBound=*/std::nullopt,
+ /*UpperBound=*/makeExpr(inp.v, semaCtx)}};
}
NumThreads make(const parser::OmpClause::NumThreads &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return NumThreads{makeExpr(inp.v, semaCtx)};
+ return NumThreads{/*Nthreads=*/makeExpr(inp.v, semaCtx)};
}
+// OmpxAttribute: empty
+// OmpxBare: empty
+
OmpxDynCgroupMem make(const parser::OmpClause::OmpxDynCgroupMem &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
return OmpxDynCgroupMem{makeExpr(inp.v, semaCtx)};
}
-Ordered make(const parser::OmpClause::Ordered &inp,
- semantics::SemanticsContext &semaCtx) {
- // inp.v -> std::optional<parser::ScalarIntConstantExpr>
- return Ordered{maybeApply(makeExprFn(semaCtx), inp.v)};
-}
-
Order make(const parser::OmpClause::Order &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpOrderClause
using wrapped = parser::OmpOrderClause;
+
+ CLAUSET_ENUM_CONVERT( //
+ convert1, parser::OmpOrderModifier::Kind, Order::OrderModifier,
+ // clang-format off
+ MS(Reproducible, Reproducible)
+ MS(Unconstrained, Unconstrained)
+ // clang-format on
+ );
+
+ CLAUSET_ENUM_CONVERT( //
+ convert2, wrapped::Type, Order::Ordering,
+ // clang-format off
+ MS(Concurrent, Concurrent)
+ // clang-format on
+ );
+
auto &t0 = std::get<std::optional<parser::OmpOrderModifier>>(inp.v.t);
auto &t1 = std::get<wrapped::Type>(inp.v.t);
- auto convert = [](const parser::OmpOrderModifier &s) -> Order::Kind {
- return std::get<parser::OmpOrderModifier::Kind>(s.u);
+
+ auto convert3 = [&](const parser::OmpOrderModifier &s) {
+ return std::visit(
+ [&](parser::OmpOrderModifier::Kind k) { return convert1(k); }, s.u);
};
- return Order{{maybeApply(convert, t0), t1}};
+ return Order{
+ {/*OrderModifier=*/maybeApply(convert3, t0), /*Ordering=*/convert2(t1)}};
+}
+
+Ordered make(const parser::OmpClause::Ordered &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> std::optional<parser::ScalarIntConstantExpr>
+ return Ordered{/*N=*/maybeApply(makeExprFn(semaCtx), inp.v)};
}
+// Otherwise: incomplete, missing-in-parser
+
Partial make(const parser::OmpClause::Partial &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> std::optional<parser::ScalarIntConstantExpr>
- return Partial{maybeApply(makeExprFn(semaCtx), inp.v)};
+ return Partial{/*UnrollFactor=*/maybeApply(makeExprFn(semaCtx), inp.v)};
}
Priority make(const parser::OmpClause::Priority &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return Priority{makeExpr(inp.v, semaCtx)};
+ return Priority{/*PriorityValue=*/makeExpr(inp.v, semaCtx)};
}
Private make(const parser::OmpClause::Private &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Private{makeList(inp.v, semaCtx)};
+ return Private{/*List=*/makeObjects(inp.v, semaCtx)};
}
ProcBind make(const parser::OmpClause::ProcBind &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpProcBindClause
- return ProcBind{inp.v.v};
+ using wrapped = parser::OmpProcBindClause;
+
+ CLAUSET_ENUM_CONVERT( //
+ convert, wrapped::Type, ProcBind::AffinityPolicy,
+ // clang-format off
+ MS(Close, Close)
+ MS(Master, Master)
+ MS(Spread, Spread)
+ MS(Primary, Primary)
+ // clang-format on
+ );
+ return ProcBind{/*AffinityPolicy=*/convert(inp.v.v)};
}
+// Read: empty
+
Reduction make(const parser::OmpClause::Reduction &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpReductionClause
auto &t0 = std::get<parser::OmpReductionOperator>(inp.v.t);
auto &t1 = std::get<parser::OmpObjectList>(inp.v.t);
- return Reduction{{makeReductionOperator(t0, semaCtx), makeList(t1, semaCtx)}};
+ return Reduction{
+ {/*ReductionIdentifiers=*/{makeReductionOperator(t0, semaCtx)},
+ /*ReductionModifier=*/std::nullopt,
+ /*List=*/makeObjects(t1, semaCtx)}};
}
+// Relaxed: empty
+// Release: empty
+// ReverseOffload: empty
+
Safelen make(const parser::OmpClause::Safelen &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntConstantExpr
- return Safelen{makeExpr(inp.v, semaCtx)};
+ return Safelen{/*Length=*/makeExpr(inp.v, semaCtx)};
}
Schedule make(const parser::OmpClause::Schedule &inp,
@@ -632,41 +1028,97 @@ Schedule make(const parser::OmpClause::Schedule &inp,
// inp.v -> parser::OmpScheduleClause
using wrapped = parser::OmpScheduleClause;
+ CLAUSET_ENUM_CONVERT( //
+ convert1, wrapped::ScheduleType, Schedule::Kind,
+ // clang-format off
+ MS(Static, Static)
+ MS(Dynamic, Dynamic)
+ MS(Guided, Guided)
+ MS(Auto, Auto)
+ MS(Runtime, Runtime)
+ // clang-format on
+ );
+
+ CLAUSET_ENUM_CONVERT( //
+ convert2, parser::OmpScheduleModifierType::ModType,
+ Schedule::OrderingModifier,
+ // clang-format off
+ MS(Monotonic, Monotonic)
+ MS(Nonmonotonic, Nonmonotonic)
+ // clang-format on
+ );
+
+ CLAUSET_ENUM_CONVERT( //
+ convert3, parser::OmpScheduleModifierType::ModType,
+ Schedule::ChunkModifier,
+ // clang-format off
+ MS(Simd, Simd)
+ // clang-format on
+ );
+
auto &t0 = std::get<std::optional<parser::OmpScheduleModifier>>(inp.v.t);
auto &t1 = std::get<wrapped::ScheduleType>(inp.v.t);
auto &t2 = std::get<std::optional<parser::ScalarIntExpr>>(inp.v.t);
- auto convert = [](auto &&s) -> Schedule::ScheduleModifier {
- auto &s0 = std::get<parser::OmpScheduleModifier::Modifier1>(s.t);
- auto &s1 =
- std::get<std::optional<parser::OmpScheduleModifier::Modifier2>>(s.t);
+ if (!t0) {
+ return Schedule{{/*Kind=*/convert1(t1), /*OrderingModifier=*/std::nullopt,
+ /*ChunkModifier=*/std::nullopt,
+ /*ChunkSize=*/maybeApply(makeExprFn(semaCtx), t2)}};
+ }
- auto convert1 = [](auto &&v) { // Modifier1 or Modifier2
- return v.v.v;
- };
- return Schedule::ScheduleModifier{{s0.v.v, maybeApply(convert1, s1)}};
- };
+ // The members of parser::OmpScheduleModifier correspond to OrderingModifier,
+ // and ChunkModifier, but they can appear in any order.
+ auto &m1 = std::get<parser::OmpScheduleModifier::Modifier1>(t0->t);
+ auto &m2 =
+ std::get<std::optional<parser::OmpScheduleModifier::Modifier2>>(t0->t);
+
+ std::optional<Schedule::OrderingModifier> omod;
+ std::optional<Schedule::ChunkModifier> cmod;
+
+ if (m1.v.v == parser::OmpScheduleModifierType::ModType::Simd) {
+ // m1 is chunk-modifier
+ cmod = convert3(m1.v.v);
+ if (m2)
+ omod = convert2(m2->v.v);
+ } else {
+ // m1 is ordering-modifier
+ omod = convert2(m1.v.v);
+ if (m2)
+ cmod = convert3(m2->v.v);
+ }
- return Schedule{
- {maybeApply(convert, t0), t1, maybeApply(makeExprFn(semaCtx), t2)}};
+ return Schedule{{/*Kind=*/convert1(t1),
+ /*OrderingModifier=*/omod,
+ /*ChunkModifier=*/cmod,
+ /*ChunkSize=*/maybeApply(makeExprFn(semaCtx), t2)}};
+}
+
+// SeqCst: empty
+
+Severity make(const parser::OmpClause::Severity &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: severity");
}
Shared make(const parser::OmpClause::Shared &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return Shared{makeList(inp.v, semaCtx)};
+ return Shared{/*List=*/makeObjects(inp.v, semaCtx)};
}
+// Simd: empty
+
Simdlen make(const parser::OmpClause::Simdlen &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntConstantExpr
- return Simdlen{makeExpr(inp.v, semaCtx)};
+ return Simdlen{/*Length=*/makeExpr(inp.v, semaCtx)};
}
Sizes make(const parser::OmpClause::Sizes &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> std::list<parser::ScalarIntExpr>
- return Sizes{makeList(inp.v, makeExprFn(semaCtx))};
+ return Sizes{/*SizeList=*/makeList(inp.v, makeExprFn(semaCtx))};
}
TaskReduction make(const parser::OmpClause::TaskReduction &inp,
@@ -675,38 +1127,72 @@ TaskReduction make(const parser::OmpClause::TaskReduction &inp,
auto &t0 = std::get<parser::OmpReductionOperator>(inp.v.t);
auto &t1 = std::get<parser::OmpObjectList>(inp.v.t);
return TaskReduction{
- {makeReductionOperator(t0, semaCtx), makeList(t1, semaCtx)}};
+ {/*ReductionIdentifiers=*/{makeReductionOperator(t0, semaCtx)},
+ /*List=*/makeObjects(t1, semaCtx)}};
}
ThreadLimit make(const parser::OmpClause::ThreadLimit &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::ScalarIntExpr
- return ThreadLimit{makeExpr(inp.v, semaCtx)};
+ return ThreadLimit{/*Threadlim=*/makeExpr(inp.v, semaCtx)};
}
+// Threadprivate: empty
+// Threads: empty
+
To make(const parser::OmpClause::To &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return To{makeList(inp.v, semaCtx)};
+ return To{{/*Expectation=*/std::nullopt, /*Mapper=*/std::nullopt,
+ /*Iterator=*/std::nullopt,
+ /*LocatorList=*/makeObjects(inp.v, semaCtx)}};
}
+// UnifiedAddress: empty
+// UnifiedSharedMemory: empty
+
Uniform make(const parser::OmpClause::Uniform &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> std::list<parser::Name>
- return Uniform{makeList(inp.v, makeObjectFn(semaCtx))};
+ return Uniform{/*ParameterList=*/makeList(inp.v, makeObjectFn(semaCtx))};
+}
+
+// Unknown: empty
+// Untied: empty
+
+Update make(const parser::OmpClause::Update &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ return Update{/*TaskDependenceType=*/std::nullopt};
+}
+
+Use make(const parser::OmpClause::Use &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: use");
}
UseDeviceAddr make(const parser::OmpClause::UseDeviceAddr &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return UseDeviceAddr{makeList(inp.v, semaCtx)};
+ return UseDeviceAddr{/*List=*/makeObjects(inp.v, semaCtx)};
}
UseDevicePtr make(const parser::OmpClause::UseDevicePtr &inp,
semantics::SemanticsContext &semaCtx) {
// inp.v -> parser::OmpObjectList
- return UseDevicePtr{makeList(inp.v, semaCtx)};
+ return UseDevicePtr{/*List=*/makeObjects(inp.v, semaCtx)};
}
+
+UsesAllocators make(const parser::OmpClause::UsesAllocators &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp -> empty
+ llvm_unreachable("Empty: uses_allocators");
+}
+
+// Weak: empty
+// When: incomplete
+// Write: empty
} // namespace clause
Clause makeClause(const Fortran::parser::OmpClause &cls,
@@ -719,8 +1205,8 @@ Clause makeClause(const Fortran::parser::OmpClause &cls,
cls.u);
}
-List<Clause> makeList(const parser::OmpClauseList &clauses,
- semantics::SemanticsContext &semaCtx) {
+List<Clause> makeClauses(const parser::OmpClauseList &clauses,
+ semantics::SemanticsContext &semaCtx) {
return makeList(clauses.v, [&](const parser::OmpClause &s) {
return makeClause(s, semaCtx);
});
diff --git a/flang/lib/Lower/OpenMP/Clauses.h b/flang/lib/Lower/OpenMP/Clauses.h
index 3fba593b5349..3e776425c733 100644
--- a/flang/lib/Lower/OpenMP/Clauses.h
+++ b/flang/lib/Lower/OpenMP/Clauses.h
@@ -8,8 +8,6 @@
#ifndef FORTRAN_LOWER_OPENMP_CLAUSES_H
#define FORTRAN_LOWER_OPENMP_CLAUSES_H
-#include "ClauseT.h"
-
#include "flang/Evaluate/expression.h"
#include "flang/Parser/parse-tree.h"
#include "flang/Semantics/expression.h"
@@ -17,6 +15,7 @@
#include "flang/Semantics/symbol.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Frontend/OpenMP/ClauseT.h"
#include <optional>
#include <type_traits>
@@ -28,32 +27,32 @@ using SomeType = evaluate::SomeType;
using SomeExpr = semantics::SomeExpr;
using MaybeExpr = semantics::MaybeExpr;
-using SymIdent = semantics::Symbol *;
-using SymReference = SomeExpr;
+using TypeTy = SomeType;
+using IdTy = semantics::Symbol *;
+using ExprTy = SomeExpr;
template <typename T>
using List = tomp::ListT<T>;
} // namespace Fortran::lower::omp
-namespace tomp {
+namespace tomp::type {
template <>
-struct ObjectT<Fortran::lower::omp::SymIdent,
- Fortran::lower::omp::SymReference> {
- using IdType = Fortran::lower::omp::SymIdent;
- using ExprType = Fortran::lower::omp::SymReference;
+struct ObjectT<Fortran::lower::omp::IdTy, Fortran::lower::omp::ExprTy> {
+ using IdTy = Fortran::lower::omp::IdTy;
+ using ExprTy = Fortran::lower::omp::ExprTy;
- const IdType &id() const { return symbol; }
- const std::optional<ExprType> &ref() const { return designator; }
+ const IdTy &id() const { return symbol; }
+ const std::optional<ExprTy> &ref() const { return designator; }
- IdType symbol;
- std::optional<ExprType> designator;
+ IdTy symbol;
+ std::optional<ExprTy> designator;
};
-} // namespace tomp
+} // namespace tomp::type
namespace Fortran::lower::omp {
-using Object = tomp::ObjectT<SymIdent, SymReference>;
-using ObjectList = tomp::ObjectListT<SymIdent, SymReference>;
+using Object = tomp::ObjectT<IdTy, ExprTy>;
+using ObjectList = tomp::ObjectListT<IdTy, ExprTy>;
Object makeObject(const parser::OmpObject &object,
semantics::SemanticsContext &semaCtx);
@@ -89,18 +88,19 @@ List<ResultTy> makeList(ContainerTy &&container, FunctionTy &&func) {
return v;
}
-inline ObjectList makeList(const parser::OmpObjectList &objects,
- semantics::SemanticsContext &semaCtx) {
+inline ObjectList makeObjects(const parser::OmpObjectList &objects,
+ semantics::SemanticsContext &semaCtx) {
return makeList(objects.v, makeObjectFn(semaCtx));
}
-template <typename FuncTy, typename ElemTy,
- typename ResultTy = std::invoke_result_t<FuncTy, ElemTy>>
+template <typename FuncTy, //
+ typename ArgTy, //
+ typename ResultTy = std::invoke_result_t<FuncTy, ArgTy>>
std::optional<ResultTy> maybeApply(FuncTy &&func,
- const std::optional<ElemTy> &inp) {
- if (!inp)
+ const std::optional<ArgTy> &arg) {
+ if (!arg)
return std::nullopt;
- return std::move(func(*inp));
+ return std::move(func(*arg));
}
std::optional<Object>
@@ -108,91 +108,142 @@ getBaseObject(const Object &object,
Fortran::semantics::SemanticsContext &semaCtx);
namespace clause {
-using DefinedOperator = tomp::clause::DefinedOperatorT<SymIdent, SymReference>;
-using ProcedureDesignator =
- tomp::clause::ProcedureDesignatorT<SymIdent, SymReference>;
-using ReductionOperator =
- tomp::clause::ReductionOperatorT<SymIdent, SymReference>;
-
-#ifdef EMPTY_CLASS
-#undef EMPTY_CLASS
-#endif
-#define EMPTY_CLASS(cls) \
- using cls = tomp::clause::cls##T<SymIdent, SymReference>
-
-#ifdef WRAPPER_CLASS
-#undef WRAPPER_CLASS
-#endif
-#define WRAPPER_CLASS(cls, content) \
- [[maybe_unused]] extern int xyzzy_semicolon_absorber
-#define GEN_FLANG_CLAUSE_PARSER_CLASSES
-#include "llvm/Frontend/OpenMP/OMP.inc"
-#undef EMPTY_CLASS
-#undef WRAPPER_CLASS
+using DefinedOperator = tomp::type::DefinedOperatorT<IdTy, ExprTy>;
+using ProcedureDesignator = tomp::type::ProcedureDesignatorT<IdTy, ExprTy>;
+using ReductionOperator = tomp::type::ReductionIdentifierT<IdTy, ExprTy>;
// "Requires" clauses are handled early on, and the aggregated information
// is stored in the Symbol details of modules, programs, and subprograms.
// These clauses are still handled here to cover all alternatives in the
// main clause variant.
-using Aligned = tomp::clause::AlignedT<SymIdent, SymReference>;
-using Allocate = tomp::clause::AllocateT<SymIdent, SymReference>;
-using Allocator = tomp::clause::AllocatorT<SymIdent, SymReference>;
+using AcqRel = tomp::clause::AcqRelT<TypeTy, IdTy, ExprTy>;
+using Acquire = tomp::clause::AcquireT<TypeTy, IdTy, ExprTy>;
+using AdjustArgs = tomp::clause::AdjustArgsT<TypeTy, IdTy, ExprTy>;
+using Affinity = tomp::clause::AffinityT<TypeTy, IdTy, ExprTy>;
+using Aligned = tomp::clause::AlignedT<TypeTy, IdTy, ExprTy>;
+using Align = tomp::clause::AlignT<TypeTy, IdTy, ExprTy>;
+using Allocate = tomp::clause::AllocateT<TypeTy, IdTy, ExprTy>;
+using Allocator = tomp::clause::AllocatorT<TypeTy, IdTy, ExprTy>;
+using AppendArgs = tomp::clause::AppendArgsT<TypeTy, IdTy, ExprTy>;
using AtomicDefaultMemOrder =
- tomp::clause::AtomicDefaultMemOrderT<SymIdent, SymReference>;
-using Collapse = tomp::clause::CollapseT<SymIdent, SymReference>;
-using Copyin = tomp::clause::CopyinT<SymIdent, SymReference>;
-using Copyprivate = tomp::clause::CopyprivateT<SymIdent, SymReference>;
-using Defaultmap = tomp::clause::DefaultmapT<SymIdent, SymReference>;
-using Default = tomp::clause::DefaultT<SymIdent, SymReference>;
-using Depend = tomp::clause::DependT<SymIdent, SymReference>;
-using Device = tomp::clause::DeviceT<SymIdent, SymReference>;
-using DeviceType = tomp::clause::DeviceTypeT<SymIdent, SymReference>;
-using DistSchedule = tomp::clause::DistScheduleT<SymIdent, SymReference>;
-using Enter = tomp::clause::EnterT<SymIdent, SymReference>;
-using Filter = tomp::clause::FilterT<SymIdent, SymReference>;
-using Final = tomp::clause::FinalT<SymIdent, SymReference>;
-using Firstprivate = tomp::clause::FirstprivateT<SymIdent, SymReference>;
-using From = tomp::clause::FromT<SymIdent, SymReference>;
-using Grainsize = tomp::clause::GrainsizeT<SymIdent, SymReference>;
-using HasDeviceAddr = tomp::clause::HasDeviceAddrT<SymIdent, SymReference>;
-using Hint = tomp::clause::HintT<SymIdent, SymReference>;
-using If = tomp::clause::IfT<SymIdent, SymReference>;
-using InReduction = tomp::clause::InReductionT<SymIdent, SymReference>;
-using IsDevicePtr = tomp::clause::IsDevicePtrT<SymIdent, SymReference>;
-using Lastprivate = tomp::clause::LastprivateT<SymIdent, SymReference>;
-using Linear = tomp::clause::LinearT<SymIdent, SymReference>;
-using Link = tomp::clause::LinkT<SymIdent, SymReference>;
-using Map = tomp::clause::MapT<SymIdent, SymReference>;
-using Nocontext = tomp::clause::NocontextT<SymIdent, SymReference>;
-using Nontemporal = tomp::clause::NontemporalT<SymIdent, SymReference>;
-using Novariants = tomp::clause::NovariantsT<SymIdent, SymReference>;
-using NumTasks = tomp::clause::NumTasksT<SymIdent, SymReference>;
-using NumTeams = tomp::clause::NumTeamsT<SymIdent, SymReference>;
-using NumThreads = tomp::clause::NumThreadsT<SymIdent, SymReference>;
-using OmpxDynCgroupMem =
- tomp::clause::OmpxDynCgroupMemT<SymIdent, SymReference>;
-using Ordered = tomp::clause::OrderedT<SymIdent, SymReference>;
-using Order = tomp::clause::OrderT<SymIdent, SymReference>;
-using Partial = tomp::clause::PartialT<SymIdent, SymReference>;
-using Priority = tomp::clause::PriorityT<SymIdent, SymReference>;
-using Private = tomp::clause::PrivateT<SymIdent, SymReference>;
-using ProcBind = tomp::clause::ProcBindT<SymIdent, SymReference>;
-using Reduction = tomp::clause::ReductionT<SymIdent, SymReference>;
-using Safelen = tomp::clause::SafelenT<SymIdent, SymReference>;
-using Schedule = tomp::clause::ScheduleT<SymIdent, SymReference>;
-using Shared = tomp::clause::SharedT<SymIdent, SymReference>;
-using Simdlen = tomp::clause::SimdlenT<SymIdent, SymReference>;
-using Sizes = tomp::clause::SizesT<SymIdent, SymReference>;
-using TaskReduction = tomp::clause::TaskReductionT<SymIdent, SymReference>;
-using ThreadLimit = tomp::clause::ThreadLimitT<SymIdent, SymReference>;
-using To = tomp::clause::ToT<SymIdent, SymReference>;
-using Uniform = tomp::clause::UniformT<SymIdent, SymReference>;
-using UseDeviceAddr = tomp::clause::UseDeviceAddrT<SymIdent, SymReference>;
-using UseDevicePtr = tomp::clause::UseDevicePtrT<SymIdent, SymReference>;
+ tomp::clause::AtomicDefaultMemOrderT<TypeTy, IdTy, ExprTy>;
+using At = tomp::clause::AtT<TypeTy, IdTy, ExprTy>;
+using Bind = tomp::clause::BindT<TypeTy, IdTy, ExprTy>;
+using Capture = tomp::clause::CaptureT<TypeTy, IdTy, ExprTy>;
+using Collapse = tomp::clause::CollapseT<TypeTy, IdTy, ExprTy>;
+using Compare = tomp::clause::CompareT<TypeTy, IdTy, ExprTy>;
+using Copyin = tomp::clause::CopyinT<TypeTy, IdTy, ExprTy>;
+using Copyprivate = tomp::clause::CopyprivateT<TypeTy, IdTy, ExprTy>;
+using Defaultmap = tomp::clause::DefaultmapT<TypeTy, IdTy, ExprTy>;
+using Default = tomp::clause::DefaultT<TypeTy, IdTy, ExprTy>;
+using Depend = tomp::clause::DependT<TypeTy, IdTy, ExprTy>;
+using Destroy = tomp::clause::DestroyT<TypeTy, IdTy, ExprTy>;
+using Detach = tomp::clause::DetachT<TypeTy, IdTy, ExprTy>;
+using Device = tomp::clause::DeviceT<TypeTy, IdTy, ExprTy>;
+using DeviceType = tomp::clause::DeviceTypeT<TypeTy, IdTy, ExprTy>;
+using DistSchedule = tomp::clause::DistScheduleT<TypeTy, IdTy, ExprTy>;
+using Doacross = tomp::clause::DoacrossT<TypeTy, IdTy, ExprTy>;
+using DynamicAllocators =
+ tomp::clause::DynamicAllocatorsT<TypeTy, IdTy, ExprTy>;
+using Enter = tomp::clause::EnterT<TypeTy, IdTy, ExprTy>;
+using Exclusive = tomp::clause::ExclusiveT<TypeTy, IdTy, ExprTy>;
+using Fail = tomp::clause::FailT<TypeTy, IdTy, ExprTy>;
+using Filter = tomp::clause::FilterT<TypeTy, IdTy, ExprTy>;
+using Final = tomp::clause::FinalT<TypeTy, IdTy, ExprTy>;
+using Firstprivate = tomp::clause::FirstprivateT<TypeTy, IdTy, ExprTy>;
+using From = tomp::clause::FromT<TypeTy, IdTy, ExprTy>;
+using Full = tomp::clause::FullT<TypeTy, IdTy, ExprTy>;
+using Grainsize = tomp::clause::GrainsizeT<TypeTy, IdTy, ExprTy>;
+using HasDeviceAddr = tomp::clause::HasDeviceAddrT<TypeTy, IdTy, ExprTy>;
+using Hint = tomp::clause::HintT<TypeTy, IdTy, ExprTy>;
+using If = tomp::clause::IfT<TypeTy, IdTy, ExprTy>;
+using Inbranch = tomp::clause::InbranchT<TypeTy, IdTy, ExprTy>;
+using Inclusive = tomp::clause::InclusiveT<TypeTy, IdTy, ExprTy>;
+using Indirect = tomp::clause::IndirectT<TypeTy, IdTy, ExprTy>;
+using Init = tomp::clause::InitT<TypeTy, IdTy, ExprTy>;
+using InReduction = tomp::clause::InReductionT<TypeTy, IdTy, ExprTy>;
+using IsDevicePtr = tomp::clause::IsDevicePtrT<TypeTy, IdTy, ExprTy>;
+using Lastprivate = tomp::clause::LastprivateT<TypeTy, IdTy, ExprTy>;
+using Linear = tomp::clause::LinearT<TypeTy, IdTy, ExprTy>;
+using Link = tomp::clause::LinkT<TypeTy, IdTy, ExprTy>;
+using Map = tomp::clause::MapT<TypeTy, IdTy, ExprTy>;
+using Match = tomp::clause::MatchT<TypeTy, IdTy, ExprTy>;
+using Mergeable = tomp::clause::MergeableT<TypeTy, IdTy, ExprTy>;
+using Message = tomp::clause::MessageT<TypeTy, IdTy, ExprTy>;
+using Nocontext = tomp::clause::NocontextT<TypeTy, IdTy, ExprTy>;
+using Nogroup = tomp::clause::NogroupT<TypeTy, IdTy, ExprTy>;
+using Nontemporal = tomp::clause::NontemporalT<TypeTy, IdTy, ExprTy>;
+using Notinbranch = tomp::clause::NotinbranchT<TypeTy, IdTy, ExprTy>;
+using Novariants = tomp::clause::NovariantsT<TypeTy, IdTy, ExprTy>;
+using Nowait = tomp::clause::NowaitT<TypeTy, IdTy, ExprTy>;
+using NumTasks = tomp::clause::NumTasksT<TypeTy, IdTy, ExprTy>;
+using NumTeams = tomp::clause::NumTeamsT<TypeTy, IdTy, ExprTy>;
+using NumThreads = tomp::clause::NumThreadsT<TypeTy, IdTy, ExprTy>;
+using OmpxAttribute = tomp::clause::OmpxAttributeT<TypeTy, IdTy, ExprTy>;
+using OmpxBare = tomp::clause::OmpxBareT<TypeTy, IdTy, ExprTy>;
+using OmpxDynCgroupMem = tomp::clause::OmpxDynCgroupMemT<TypeTy, IdTy, ExprTy>;
+using Ordered = tomp::clause::OrderedT<TypeTy, IdTy, ExprTy>;
+using Order = tomp::clause::OrderT<TypeTy, IdTy, ExprTy>;
+using Partial = tomp::clause::PartialT<TypeTy, IdTy, ExprTy>;
+using Priority = tomp::clause::PriorityT<TypeTy, IdTy, ExprTy>;
+using Private = tomp::clause::PrivateT<TypeTy, IdTy, ExprTy>;
+using ProcBind = tomp::clause::ProcBindT<TypeTy, IdTy, ExprTy>;
+using Read = tomp::clause::ReadT<TypeTy, IdTy, ExprTy>;
+using Reduction = tomp::clause::ReductionT<TypeTy, IdTy, ExprTy>;
+using Relaxed = tomp::clause::RelaxedT<TypeTy, IdTy, ExprTy>;
+using Release = tomp::clause::ReleaseT<TypeTy, IdTy, ExprTy>;
+using ReverseOffload = tomp::clause::ReverseOffloadT<TypeTy, IdTy, ExprTy>;
+using Safelen = tomp::clause::SafelenT<TypeTy, IdTy, ExprTy>;
+using Schedule = tomp::clause::ScheduleT<TypeTy, IdTy, ExprTy>;
+using SeqCst = tomp::clause::SeqCstT<TypeTy, IdTy, ExprTy>;
+using Severity = tomp::clause::SeverityT<TypeTy, IdTy, ExprTy>;
+using Shared = tomp::clause::SharedT<TypeTy, IdTy, ExprTy>;
+using Simdlen = tomp::clause::SimdlenT<TypeTy, IdTy, ExprTy>;
+using Simd = tomp::clause::SimdT<TypeTy, IdTy, ExprTy>;
+using Sizes = tomp::clause::SizesT<TypeTy, IdTy, ExprTy>;
+using TaskReduction = tomp::clause::TaskReductionT<TypeTy, IdTy, ExprTy>;
+using ThreadLimit = tomp::clause::ThreadLimitT<TypeTy, IdTy, ExprTy>;
+using Threads = tomp::clause::ThreadsT<TypeTy, IdTy, ExprTy>;
+using To = tomp::clause::ToT<TypeTy, IdTy, ExprTy>;
+using UnifiedAddress = tomp::clause::UnifiedAddressT<TypeTy, IdTy, ExprTy>;
+using UnifiedSharedMemory =
+ tomp::clause::UnifiedSharedMemoryT<TypeTy, IdTy, ExprTy>;
+using Uniform = tomp::clause::UniformT<TypeTy, IdTy, ExprTy>;
+using Unknown = tomp::clause::UnknownT<TypeTy, IdTy, ExprTy>;
+using Untied = tomp::clause::UntiedT<TypeTy, IdTy, ExprTy>;
+using Update = tomp::clause::UpdateT<TypeTy, IdTy, ExprTy>;
+using UseDeviceAddr = tomp::clause::UseDeviceAddrT<TypeTy, IdTy, ExprTy>;
+using UseDevicePtr = tomp::clause::UseDevicePtrT<TypeTy, IdTy, ExprTy>;
+using UsesAllocators = tomp::clause::UsesAllocatorsT<TypeTy, IdTy, ExprTy>;
+using Use = tomp::clause::UseT<TypeTy, IdTy, ExprTy>;
+using Weak = tomp::clause::WeakT<TypeTy, IdTy, ExprTy>;
+using When = tomp::clause::WhenT<TypeTy, IdTy, ExprTy>;
+using Write = tomp::clause::WriteT<TypeTy, IdTy, ExprTy>;
} // namespace clause
-struct Clause : public tomp::ClauseT<SymIdent, SymReference> {
+struct CancellationConstructType {
+ using EmptyTrait = std::true_type;
+};
+struct Depobj {
+ using EmptyTrait = std::true_type;
+};
+struct Flush {
+ using EmptyTrait = std::true_type;
+};
+struct MemoryOrder {
+ using EmptyTrait = std::true_type;
+};
+struct Threadprivate {
+ using EmptyTrait = std::true_type;
+};
+
+using ClauseBase = tomp::ClauseT<TypeTy, IdTy, ExprTy,
+ // Extras...
+ CancellationConstructType, Depobj, Flush,
+ MemoryOrder, Threadprivate>;
+
+struct Clause : public ClauseBase {
parser::CharBlock source;
};
@@ -205,8 +256,8 @@ Clause makeClause(llvm::omp::Clause id, Specific &&specific,
Clause makeClause(const Fortran::parser::OmpClause &cls,
semantics::SemanticsContext &semaCtx);
-List<Clause> makeList(const parser::OmpClauseList &clauses,
- semantics::SemanticsContext &semaCtx);
+List<Clause> makeClauses(const parser::OmpClauseList &clauses,
+ semantics::SemanticsContext &semaCtx);
} // namespace Fortran::lower::omp
#endif // FORTRAN_LOWER_OPENMP_CLAUSES_H
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index e1aa7524e930..e114ab9f4548 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -99,7 +99,8 @@ void DataSharingProcessor::collectSymbolsForPrivatization() {
collectOmpObjectListSymbol(firstPrivateClause->v, privatizedSymbols);
} else if (const auto &lastPrivateClause =
std::get_if<omp::clause::Lastprivate>(&clause.u)) {
- collectOmpObjectListSymbol(lastPrivateClause->v, privatizedSymbols);
+ const ObjectList &objects = std::get<ObjectList>(lastPrivateClause->t);
+ collectOmpObjectListSymbol(objects, privatizedSymbols);
hasLastPrivateOp = true;
} else if (std::get_if<omp::clause::Collapse>(&clause.u)) {
hasCollapse = true;
@@ -286,12 +287,13 @@ void DataSharingProcessor::collectSymbols(
}
void DataSharingProcessor::collectDefaultSymbols() {
+ using DataSharingAttribute = omp::clause::Default::DataSharingAttribute;
for (const omp::Clause &clause : clauses) {
if (const auto *defaultClause =
std::get_if<omp::clause::Default>(&clause.u)) {
- if (defaultClause->v == omp::clause::Default::Type::Private)
+ if (defaultClause->v == DataSharingAttribute::Private)
collectSymbols(Fortran::semantics::Symbol::Flag::OmpPrivate);
- else if (defaultClause->v == omp::clause::Default::Type::Firstprivate)
+ else if (defaultClause->v == DataSharingAttribute::Firstprivate)
collectSymbols(Fortran::semantics::Symbol::Flag::OmpFirstPrivate);
}
}
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.h b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
index 226abe96705e..1cbc825fd5e1 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.h
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
@@ -89,7 +89,7 @@ public:
Fortran::lower::SymMap *symTable = nullptr)
: hasLastPrivateOp(false), converter(converter),
firOpBuilder(converter.getFirOpBuilder()),
- clauses(omp::makeList(opClauseList, semaCtx)), eval(eval),
+ clauses(omp::makeClauses(opClauseList, semaCtx)), eval(eval),
useDelayedPrivatization(useDelayedPrivatization), symTable(symTable) {}
// Privatisation is split into two steps.
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index d335129565b4..340921c86724 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -13,6 +13,7 @@
#include "flang/Lower/OpenMP.h"
#include "ClauseProcessor.h"
+#include "Clauses.h"
#include "DataSharingProcessor.h"
#include "DirectivesCommon.h"
#include "ReductionProcessor.h"
@@ -214,24 +215,6 @@ static void threadPrivatizeVars(Fortran::lower::AbstractConverter &converter,
firOpBuilder.restoreInsertionPoint(insPt);
}
-static mlir::Type getLoopVarType(Fortran::lower::AbstractConverter &converter,
- std::size_t loopVarTypeSize) {
- // OpenMP runtime requires 32-bit or 64-bit loop variables.
- loopVarTypeSize = loopVarTypeSize * 8;
- if (loopVarTypeSize < 32) {
- loopVarTypeSize = 32;
- } else if (loopVarTypeSize > 64) {
- loopVarTypeSize = 64;
- mlir::emitWarning(converter.getCurrentLocation(),
- "OpenMP loop iteration variable cannot have more than 64 "
- "bits size and will be narrowed into 64 bits.");
- }
- assert((loopVarTypeSize == 32 || loopVarTypeSize == 64) &&
- "OpenMP loop iteration variable size must be transformed into 32-bit "
- "or 64-bit");
- return converter.getFirOpBuilder().getIntegerType(loopVarTypeSize);
-}
-
static mlir::Operation *
createAndSetPrivatizedLoopVar(Fortran::lower::AbstractConverter &converter,
mlir::Location loc, mlir::Value indexVal,
@@ -254,6 +237,213 @@ createAndSetPrivatizedLoopVar(Fortran::lower::AbstractConverter &converter,
return storeOp;
}
+static mlir::Operation *
+findReductionChain(mlir::Value loadVal, mlir::Value *reductionVal = nullptr) {
+ for (mlir::OpOperand &loadOperand : loadVal.getUses()) {
+ if (mlir::Operation *reductionOp = loadOperand.getOwner()) {
+ if (auto convertOp = mlir::dyn_cast<fir::ConvertOp>(reductionOp)) {
+ for (mlir::OpOperand &convertOperand : convertOp.getRes().getUses()) {
+ if (mlir::Operation *reductionOp = convertOperand.getOwner())
+ return reductionOp;
+ }
+ }
+ for (mlir::OpOperand &reductionOperand : reductionOp->getUses()) {
+ if (auto store =
+ mlir::dyn_cast<fir::StoreOp>(reductionOperand.getOwner())) {
+ if (store.getMemref() == *reductionVal) {
+ store.erase();
+ return reductionOp;
+ }
+ }
+ if (auto assign =
+ mlir::dyn_cast<hlfir::AssignOp>(reductionOperand.getOwner())) {
+ if (assign.getLhs() == *reductionVal) {
+ assign.erase();
+ return reductionOp;
+ }
+ }
+ }
+ }
+ }
+ return nullptr;
+}
+
+// for a logical operator 'op' reduction X = X op Y
+// This function returns the operation responsible for converting Y from
+// fir.logical<4> to i1
+static fir::ConvertOp getConvertFromReductionOp(mlir::Operation *reductionOp,
+ mlir::Value loadVal) {
+ for (mlir::Value reductionOperand : reductionOp->getOperands()) {
+ if (auto convertOp =
+ mlir::dyn_cast<fir::ConvertOp>(reductionOperand.getDefiningOp())) {
+ if (convertOp.getOperand() == loadVal)
+ continue;
+ return convertOp;
+ }
+ }
+ return nullptr;
+}
+
+static void updateReduction(mlir::Operation *op,
+ fir::FirOpBuilder &firOpBuilder,
+ mlir::Value loadVal, mlir::Value reductionVal,
+ fir::ConvertOp *convertOp = nullptr) {
+ mlir::OpBuilder::InsertPoint insertPtDel = firOpBuilder.saveInsertionPoint();
+ firOpBuilder.setInsertionPoint(op);
+
+ mlir::Value reductionOp;
+ if (convertOp)
+ reductionOp = convertOp->getOperand();
+ else if (op->getOperand(0) == loadVal)
+ reductionOp = op->getOperand(1);
+ else
+ reductionOp = op->getOperand(0);
+
+ firOpBuilder.create<mlir::omp::ReductionOp>(op->getLoc(), reductionOp,
+ reductionVal);
+ firOpBuilder.restoreInsertionPoint(insertPtDel);
+}
+
+static void removeStoreOp(mlir::Operation *reductionOp, mlir::Value symVal) {
+ for (mlir::Operation *reductionOpUse : reductionOp->getUsers()) {
+ if (auto convertReduction =
+ mlir::dyn_cast<fir::ConvertOp>(reductionOpUse)) {
+ for (mlir::Operation *convertReductionUse :
+ convertReduction.getRes().getUsers()) {
+ if (auto storeOp = mlir::dyn_cast<fir::StoreOp>(convertReductionUse)) {
+ if (storeOp.getMemref() == symVal)
+ storeOp.erase();
+ }
+ if (auto assignOp =
+ mlir::dyn_cast<hlfir::AssignOp>(convertReductionUse)) {
+ if (assignOp.getLhs() == symVal)
+ assignOp.erase();
+ }
+ }
+ }
+ }
+}
+
+// Generate an OpenMP reduction operation.
+// TODO: Currently assumes it is either an integer addition/multiplication
+// reduction, or a logical and reduction. Generalize this for various reduction
+// operation types.
+// TODO: Generate the reduction operation during lowering instead of creating
+// and removing operations since this is not a robust approach. Also, removing
+// ops in the builder (instead of a rewriter) is probably not the best approach.
+static void
+genOpenMPReduction(Fortran::lower::AbstractConverter &converter,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ const Fortran::parser::OmpClauseList &clauseList) {
+ fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
+
+ List<Clause> clauses{makeClauses(clauseList, semaCtx)};
+
+ for (const Clause &clause : clauses) {
+ if (const auto &reductionClause =
+ std::get_if<clause::Reduction>(&clause.u)) {
+ const auto &redOperatorList{
+ std::get<clause::Reduction::ReductionIdentifiers>(
+ reductionClause->t)};
+ assert(redOperatorList.size() == 1 && "Expecting single operator");
+ const auto &redOperator = redOperatorList.front();
+ const auto &objects{std::get<ObjectList>(reductionClause->t)};
+ if (const auto *reductionOp =
+ std::get_if<clause::DefinedOperator>(&redOperator.u)) {
+ const auto &intrinsicOp{
+ std::get<clause::DefinedOperator::IntrinsicOperator>(
+ reductionOp->u)};
+
+ switch (intrinsicOp) {
+ case clause::DefinedOperator::IntrinsicOperator::Add:
+ case clause::DefinedOperator::IntrinsicOperator::Multiply:
+ case clause::DefinedOperator::IntrinsicOperator::AND:
+ case clause::DefinedOperator::IntrinsicOperator::EQV:
+ case clause::DefinedOperator::IntrinsicOperator::OR:
+ case clause::DefinedOperator::IntrinsicOperator::NEQV:
+ break;
+ default:
+ continue;
+ }
+ for (const Object &object : objects) {
+ if (const Fortran::semantics::Symbol *symbol = object.id()) {
+ mlir::Value reductionVal = converter.getSymbolAddress(*symbol);
+ if (auto declOp = reductionVal.getDefiningOp<hlfir::DeclareOp>())
+ reductionVal = declOp.getBase();
+ mlir::Type reductionType =
+ reductionVal.getType().cast<fir::ReferenceType>().getEleTy();
+ if (!reductionType.isa<fir::LogicalType>()) {
+ if (!reductionType.isIntOrIndexOrFloat())
+ continue;
+ }
+ for (mlir::OpOperand &reductionValUse : reductionVal.getUses()) {
+ if (auto loadOp =
+ mlir::dyn_cast<fir::LoadOp>(reductionValUse.getOwner())) {
+ mlir::Value loadVal = loadOp.getRes();
+ if (reductionType.isa<fir::LogicalType>()) {
+ mlir::Operation *reductionOp = findReductionChain(loadVal);
+ fir::ConvertOp convertOp =
+ getConvertFromReductionOp(reductionOp, loadVal);
+ updateReduction(reductionOp, firOpBuilder, loadVal,
+ reductionVal, &convertOp);
+ removeStoreOp(reductionOp, reductionVal);
+ } else if (mlir::Operation *reductionOp =
+ findReductionChain(loadVal, &reductionVal)) {
+ updateReduction(reductionOp, firOpBuilder, loadVal,
+ reductionVal);
+ }
+ }
+ }
+ }
+ }
+ } else if (const auto *reductionIntrinsic =
+ std::get_if<clause::ProcedureDesignator>(&redOperator.u)) {
+ if (!ReductionProcessor::supportedIntrinsicProcReduction(
+ *reductionIntrinsic))
+ continue;
+ ReductionProcessor::ReductionIdentifier redId =
+ ReductionProcessor::getReductionType(*reductionIntrinsic);
+ for (const Object &object : objects) {
+ if (const Fortran::semantics::Symbol *symbol = object.id()) {
+ mlir::Value reductionVal = converter.getSymbolAddress(*symbol);
+ if (auto declOp = reductionVal.getDefiningOp<hlfir::DeclareOp>())
+ reductionVal = declOp.getBase();
+ for (const mlir::OpOperand &reductionValUse :
+ reductionVal.getUses()) {
+ if (auto loadOp =
+ mlir::dyn_cast<fir::LoadOp>(reductionValUse.getOwner())) {
+ mlir::Value loadVal = loadOp.getRes();
+ // Max is lowered as a compare -> select.
+ // Match the pattern here.
+ mlir::Operation *reductionOp =
+ findReductionChain(loadVal, &reductionVal);
+ if (reductionOp == nullptr)
+ continue;
+
+ if (redId == ReductionProcessor::ReductionIdentifier::MAX ||
+ redId == ReductionProcessor::ReductionIdentifier::MIN) {
+ assert(mlir::isa<mlir::arith::SelectOp>(reductionOp) &&
+ "Selection Op not found in reduction intrinsic");
+ mlir::Operation *compareOp =
+ getCompareFromReductionOp(reductionOp, loadVal);
+ updateReduction(compareOp, firOpBuilder, loadVal,
+ reductionVal);
+ }
+ if (redId == ReductionProcessor::ReductionIdentifier::IOR ||
+ redId == ReductionProcessor::ReductionIdentifier::IEOR ||
+ redId == ReductionProcessor::ReductionIdentifier::IAND) {
+ updateReduction(reductionOp, firOpBuilder, loadVal,
+ reductionVal);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
struct OpWithBodyGenInfo {
/// A type for a code-gen callback function. This takes as argument the op for
/// which the code is being generated and returns the arguments of the op's
@@ -568,23 +758,19 @@ genParallelOp(Fortran::lower::AbstractConverter &converter,
mlir::omp::ClauseProcBindKindAttr procBindKindAttr;
llvm::SmallVector<mlir::Value> allocateOperands, allocatorOperands,
reductionVars;
+ llvm::SmallVector<mlir::Type> reductionTypes;
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
llvm::SmallVector<const Fortran::semantics::Symbol *> reductionSymbols;
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(clause::If::DirectiveNameModifier::Parallel, ifClauseOperand);
+ cp.processIf(llvm::omp::Directive::OMPD_parallel, ifClauseOperand);
cp.processNumThreads(stmtCtx, numThreadsClauseOperand);
cp.processProcBind(procBindKindAttr);
cp.processDefault();
cp.processAllocate(allocatorOperands, allocateOperands);
if (!outerCombined)
- cp.processReduction(currentLocation, reductionVars, reductionDeclSymbols,
- &reductionSymbols);
-
- llvm::SmallVector<mlir::Type> reductionTypes;
- reductionTypes.reserve(reductionVars.size());
- llvm::transform(reductionVars, std::back_inserter(reductionTypes),
- [](mlir::Value v) { return v.getType(); });
+ cp.processReduction(currentLocation, reductionVars, reductionTypes,
+ reductionDeclSymbols, &reductionSymbols);
auto reductionCallback = [&](mlir::Operation *op) {
llvm::SmallVector<mlir::Location> locs(reductionVars.size(),
@@ -748,7 +934,7 @@ genTaskOp(Fortran::lower::AbstractConverter &converter,
dependOperands;
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(clause::If::DirectiveNameModifier::Task, ifClauseOperand);
+ cp.processIf(llvm::omp::Directive::OMPD_task, ifClauseOperand);
cp.processAllocate(allocatorOperands, allocateOperands);
cp.processDefault();
cp.processFinal(stmtCtx, finalClauseOperand);
@@ -756,9 +942,7 @@ genTaskOp(Fortran::lower::AbstractConverter &converter,
cp.processMergeable(mergeableAttr);
cp.processPriority(stmtCtx, priorityClauseOperand);
cp.processDepend(dependTypeOperands, dependOperands);
- cp.processTODO<Fortran::parser::OmpClause::InReduction,
- Fortran::parser::OmpClause::Detach,
- Fortran::parser::OmpClause::Affinity>(
+ cp.processTODO<clause::InReduction, clause::Detach, clause::Affinity>(
currentLocation, llvm::omp::Directive::OMPD_task);
return genOpWithBody<mlir::omp::TaskOp>(
@@ -784,8 +968,8 @@ genTaskgroupOp(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Value> allocateOperands, allocatorOperands;
ClauseProcessor cp(converter, semaCtx, clauseList);
cp.processAllocate(allocatorOperands, allocateOperands);
- cp.processTODO<Fortran::parser::OmpClause::TaskReduction>(
- currentLocation, llvm::omp::Directive::OMPD_taskgroup);
+ cp.processTODO<clause::TaskReduction>(currentLocation,
+ llvm::omp::Directive::OMPD_taskgroup);
return genOpWithBody<mlir::omp::TaskgroupOp>(
OpWithBodyGenInfo(converter, semaCtx, currentLocation, eval)
.setGenNested(genNested)
@@ -862,7 +1046,7 @@ genTargetDataOp(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<const Fortran::semantics::Symbol *> useDeviceSymbols;
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(clause::If::DirectiveNameModifier::TargetData, ifClauseOperand);
+ cp.processIf(llvm::omp::Directive::OMPD_target_data, ifClauseOperand);
cp.processDevice(stmtCtx, deviceOperand);
cp.processUseDevicePtr(devicePtrOperands, useDeviceTypes, useDeviceLocs,
useDeviceSymbols);
@@ -907,24 +1091,20 @@ static OpTy genTargetEnterExitDataUpdateOp(
llvm::SmallVector<mlir::Value> mapOperands, dependOperands;
llvm::SmallVector<mlir::Attribute> dependTypeOperands;
- clause::If::DirectiveNameModifier directiveName;
// GCC 9.3.0 emits a (probably) bogus warning about an unused variable.
[[maybe_unused]] llvm::omp::Directive directive;
if constexpr (std::is_same_v<OpTy, mlir::omp::TargetEnterDataOp>) {
- directiveName = clause::If::DirectiveNameModifier::TargetEnterData;
directive = llvm::omp::Directive::OMPD_target_enter_data;
} else if constexpr (std::is_same_v<OpTy, mlir::omp::TargetExitDataOp>) {
- directiveName = clause::If::DirectiveNameModifier::TargetExitData;
directive = llvm::omp::Directive::OMPD_target_exit_data;
} else if constexpr (std::is_same_v<OpTy, mlir::omp::TargetUpdateOp>) {
- directiveName = clause::If::DirectiveNameModifier::TargetUpdate;
directive = llvm::omp::Directive::OMPD_target_update;
} else {
return nullptr;
}
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(directiveName, ifClauseOperand);
+ cp.processIf(directive, ifClauseOperand);
cp.processDevice(stmtCtx, deviceOperand);
cp.processDepend(dependTypeOperands, dependOperands);
cp.processNowait(nowaitAttr);
@@ -1116,7 +1296,7 @@ genTargetOp(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<const Fortran::semantics::Symbol *> mapSymbols;
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(clause::If::DirectiveNameModifier::Target, ifClauseOperand);
+ cp.processIf(llvm::omp::Directive::OMPD_target, ifClauseOperand);
cp.processDevice(stmtCtx, deviceOperand);
cp.processThreadLimit(stmtCtx, threadLimitOperand);
cp.processDepend(dependTypeOperands, dependOperands);
@@ -1124,16 +1304,11 @@ genTargetOp(Fortran::lower::AbstractConverter &converter,
cp.processMap(currentLocation, directive, stmtCtx, mapOperands, &mapSymTypes,
&mapSymLocs, &mapSymbols);
- cp.processTODO<Fortran::parser::OmpClause::Private,
- Fortran::parser::OmpClause::Firstprivate,
- Fortran::parser::OmpClause::IsDevicePtr,
- Fortran::parser::OmpClause::HasDeviceAddr,
- Fortran::parser::OmpClause::Reduction,
- Fortran::parser::OmpClause::InReduction,
- Fortran::parser::OmpClause::Allocate,
- Fortran::parser::OmpClause::UsesAllocators,
- Fortran::parser::OmpClause::Defaultmap>(
+ cp.processTODO<clause::Private, clause::Firstprivate, clause::IsDevicePtr,
+ clause::HasDeviceAddr, clause::Reduction, clause::InReduction,
+ clause::Allocate, clause::UsesAllocators, clause::Defaultmap>(
currentLocation, llvm::omp::Directive::OMPD_target);
+
// 5.8.1 Implicit Data-Mapping Attribute Rules
// The following code follows the implicit data-mapping rules to map all the
// symbols used inside the region that have not been explicitly mapped using
@@ -1247,13 +1422,13 @@ genTeamsOp(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
ClauseProcessor cp(converter, semaCtx, clauseList);
- cp.processIf(clause::If::DirectiveNameModifier::Teams, ifClauseOperand);
+ cp.processIf(llvm::omp::Directive::OMPD_teams, ifClauseOperand);
cp.processAllocate(allocatorOperands, allocateOperands);
cp.processDefault();
cp.processNumTeams(stmtCtx, numTeamsClauseOperand);
cp.processThreadLimit(stmtCtx, threadLimitClauseOperand);
- cp.processTODO<Fortran::parser::OmpClause::Reduction>(
- currentLocation, llvm::omp::Directive::OMPD_teams);
+ cp.processTODO<clause::Reduction>(currentLocation,
+ llvm::omp::Directive::OMPD_teams);
return genOpWithBody<mlir::omp::TeamsOp>(
OpWithBodyGenInfo(converter, semaCtx, currentLocation, eval)
@@ -1286,7 +1461,7 @@ static mlir::omp::DeclareTargetDeviceType getDeclareTargetInfo(
if (const auto *objectList{
Fortran::parser::Unwrap<Fortran::parser::OmpObjectList>(spec.u)}) {
- ObjectList objects{makeList(*objectList, semaCtx)};
+ ObjectList objects{makeObjects(*objectList, semaCtx)};
// Case: declare target(func, var1, var2)
gatherFuncAndVarSyms(objects, mlir::omp::DeclareTargetCaptureClause::to,
symbolAndClause);
@@ -1305,9 +1480,8 @@ static mlir::omp::DeclareTargetDeviceType getDeclareTargetInfo(
cp.processEnter(symbolAndClause);
cp.processLink(symbolAndClause);
cp.processDeviceType(deviceType);
- cp.processTODO<Fortran::parser::OmpClause::Indirect>(
- converter.getCurrentLocation(),
- llvm::omp::Directive::OMPD_declare_target);
+ cp.processTODO<clause::Indirect>(converter.getCurrentLocation(),
+ llvm::omp::Directive::OMPD_declare_target);
}
return deviceType;
@@ -1389,8 +1563,7 @@ genOmpSimpleStandalone(Fortran::lower::AbstractConverter &converter,
break;
case llvm::omp::Directive::OMPD_taskwait:
ClauseProcessor(converter, semaCtx, opClauseList)
- .processTODO<Fortran::parser::OmpClause::Depend,
- Fortran::parser::OmpClause::Nowait>(
+ .processTODO<clause::Depend, clause::Nowait>(
currentLocation, llvm::omp::Directive::OMPD_taskwait);
firOpBuilder.create<mlir::omp::TaskwaitOp>(currentLocation);
break;
@@ -1437,53 +1610,6 @@ genOmpFlush(Fortran::lower::AbstractConverter &converter,
converter.getCurrentLocation(), operandRange);
}
-static void
-genOMP(Fortran::lower::AbstractConverter &converter,
- Fortran::lower::SymMap &symTable,
- Fortran::semantics::SemanticsContext &semaCtx,
- Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenMPStandaloneConstruct &standaloneConstruct) {
- std::visit(
- Fortran::common::visitors{
- [&](const Fortran::parser::OpenMPSimpleStandaloneConstruct
- &simpleStandaloneConstruct) {
- genOmpSimpleStandalone(converter, semaCtx, eval,
- /*genNested=*/true,
- simpleStandaloneConstruct);
- },
- [&](const Fortran::parser::OpenMPFlushConstruct &flushConstruct) {
- genOmpFlush(converter, semaCtx, eval, flushConstruct);
- },
- [&](const Fortran::parser::OpenMPCancelConstruct &cancelConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPCancelConstruct");
- },
- [&](const Fortran::parser::OpenMPCancellationPointConstruct
- &cancellationPointConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPCancelConstruct");
- },
- },
- standaloneConstruct.u);
-}
-
-static void convertLoopBounds(Fortran::lower::AbstractConverter &converter,
- mlir::Location loc,
- llvm::SmallVectorImpl<mlir::Value> &lowerBound,
- llvm::SmallVectorImpl<mlir::Value> &upperBound,
- llvm::SmallVectorImpl<mlir::Value> &step,
- std::size_t loopVarTypeSize) {
- fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
- // The types of lower bound, upper bound, and step are converted into the
- // type of the loop variable if necessary.
- mlir::Type loopVarType = getLoopVarType(converter, loopVarTypeSize);
- for (unsigned it = 0; it < (unsigned)lowerBound.size(); it++) {
- lowerBound[it] =
- firOpBuilder.createConvert(loc, loopVarType, lowerBound[it]);
- upperBound[it] =
- firOpBuilder.createConvert(loc, loopVarType, upperBound[it]);
- step[it] = firOpBuilder.createConvert(loc, loopVarType, step[it]);
- }
-}
-
static llvm::SmallVector<const Fortran::semantics::Symbol *>
genLoopVars(mlir::Operation *op, Fortran::lower::AbstractConverter &converter,
mlir::Location &loc,
@@ -1517,7 +1643,7 @@ genLoopAndReductionVars(
mlir::Location &loc,
llvm::ArrayRef<const Fortran::semantics::Symbol *> loopArgs,
llvm::ArrayRef<const Fortran::semantics::Symbol *> reductionArgs,
- llvm::SmallVectorImpl<mlir::Type> &reductionTypes) {
+ llvm::ArrayRef<mlir::Type> reductionTypes) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
llvm::SmallVector<mlir::Type> blockArgTypes;
@@ -1579,27 +1705,20 @@ createSimdLoop(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Value> lowerBound, upperBound, step, reductionVars;
llvm::SmallVector<mlir::Value> alignedVars, nontemporalVars;
llvm::SmallVector<const Fortran::semantics::Symbol *> iv;
+ llvm::SmallVector<mlir::Type> reductionTypes;
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
mlir::omp::ClauseOrderKindAttr orderClauseOperand;
mlir::IntegerAttr simdlenClauseOperand, safelenClauseOperand;
- std::size_t loopVarTypeSize;
ClauseProcessor cp(converter, semaCtx, loopOpClauseList);
- cp.processCollapse(loc, eval, lowerBound, upperBound, step, iv,
- loopVarTypeSize);
+ cp.processCollapse(loc, eval, lowerBound, upperBound, step, iv);
cp.processScheduleChunk(stmtCtx, scheduleChunkClauseOperand);
- cp.processReduction(loc, reductionVars, reductionDeclSymbols);
- cp.processIf(clause::If::DirectiveNameModifier::Simd, ifClauseOperand);
+ cp.processReduction(loc, reductionVars, reductionTypes, reductionDeclSymbols);
+ cp.processIf(llvm::omp::Directive::OMPD_simd, ifClauseOperand);
cp.processSimdlen(simdlenClauseOperand);
cp.processSafelen(safelenClauseOperand);
- cp.processTODO<Fortran::parser::OmpClause::Aligned,
- Fortran::parser::OmpClause::Allocate,
- Fortran::parser::OmpClause::Linear,
- Fortran::parser::OmpClause::Nontemporal,
- Fortran::parser::OmpClause::Order>(loc, ompDirective);
-
- convertLoopBounds(converter, loc, lowerBound, upperBound, step,
- loopVarTypeSize);
+ cp.processTODO<clause::Aligned, clause::Allocate, clause::Linear,
+ clause::Nontemporal, clause::Order>(loc, ompDirective);
mlir::TypeRange resultType;
auto simdLoopOp = firOpBuilder.create<mlir::omp::SimdLoopOp>(
@@ -1638,6 +1757,7 @@ static void createWsloop(Fortran::lower::AbstractConverter &converter,
llvm::SmallVector<mlir::Value> lowerBound, upperBound, step, reductionVars;
llvm::SmallVector<mlir::Value> linearVars, linearStepVars;
llvm::SmallVector<const Fortran::semantics::Symbol *> iv;
+ llvm::SmallVector<mlir::Type> reductionTypes;
llvm::SmallVector<mlir::Attribute> reductionDeclSymbols;
llvm::SmallVector<const Fortran::semantics::Symbol *> reductionSymbols;
mlir::omp::ClauseOrderKindAttr orderClauseOperand;
@@ -1645,19 +1765,13 @@ static void createWsloop(Fortran::lower::AbstractConverter &converter,
mlir::UnitAttr nowaitClauseOperand, byrefOperand, scheduleSimdClauseOperand;
mlir::IntegerAttr orderedClauseOperand;
mlir::omp::ScheduleModifierAttr scheduleModClauseOperand;
- std::size_t loopVarTypeSize;
ClauseProcessor cp(converter, semaCtx, beginClauseList);
- cp.processCollapse(loc, eval, lowerBound, upperBound, step, iv,
- loopVarTypeSize);
+ cp.processCollapse(loc, eval, lowerBound, upperBound, step, iv);
cp.processScheduleChunk(stmtCtx, scheduleChunkClauseOperand);
- cp.processReduction(loc, reductionVars, reductionDeclSymbols,
+ cp.processReduction(loc, reductionVars, reductionTypes, reductionDeclSymbols,
&reductionSymbols);
- cp.processTODO<Fortran::parser::OmpClause::Linear,
- Fortran::parser::OmpClause::Order>(loc, ompDirective);
-
- convertLoopBounds(converter, loc, lowerBound, upperBound, step,
- loopVarTypeSize);
+ cp.processTODO<clause::Linear, clause::Order>(loc, ompDirective);
if (ReductionProcessor::doReductionByRef(reductionVars))
byrefOperand = firOpBuilder.getUnitAttr();
@@ -1699,11 +1813,6 @@ static void createWsloop(Fortran::lower::AbstractConverter &converter,
auto *nestedEval = getCollapsedLoopEval(
eval, Fortran::lower::getCollapseValue(beginClauseList));
- llvm::SmallVector<mlir::Type> reductionTypes;
- reductionTypes.reserve(reductionVars.size());
- llvm::transform(reductionVars, std::back_inserter(reductionTypes),
- [](mlir::Value v) { return v.getType(); });
-
auto ivCallback = [&](mlir::Operation *op) {
return genLoopAndReductionVars(op, converter, loc, iv, reductionSymbols,
reductionTypes);
@@ -1724,11 +1833,9 @@ static void createSimdWsloop(
const Fortran::parser::OmpClauseList &beginClauseList,
const Fortran::parser::OmpClauseList *endClauseList, mlir::Location loc) {
ClauseProcessor cp(converter, semaCtx, beginClauseList);
- cp.processTODO<
- Fortran::parser::OmpClause::Aligned, Fortran::parser::OmpClause::Allocate,
- Fortran::parser::OmpClause::Linear, Fortran::parser::OmpClause::Safelen,
- Fortran::parser::OmpClause::Simdlen, Fortran::parser::OmpClause::Order>(
- loc, ompDirective);
+ cp.processTODO<clause::Aligned, clause::Allocate, clause::Linear,
+ clause::Safelen, clause::Simdlen, clause::Order>(loc,
+ ompDirective);
// TODO: Add support for vectorization - add vectorization hints inside loop
// body.
// OpenMP standard does not specify the length of vector instructions.
@@ -1741,85 +1848,87 @@ static void createSimdWsloop(
endClauseList, loc);
}
+static void
+markDeclareTarget(mlir::Operation *op,
+ Fortran::lower::AbstractConverter &converter,
+ mlir::omp::DeclareTargetCaptureClause captureClause,
+ mlir::omp::DeclareTargetDeviceType deviceType) {
+ // TODO: Add support for program local variables with declare target applied
+ auto declareTargetOp = llvm::dyn_cast<mlir::omp::DeclareTargetInterface>(op);
+ if (!declareTargetOp)
+ fir::emitFatalError(
+ converter.getCurrentLocation(),
+ "Attempt to apply declare target on unsupported operation");
+
+ // The function or global already has a declare target applied to it, very
+ // likely through implicit capture (usage in another declare target
+ // function/subroutine). It should be marked as any if it has been assigned
+ // both host and nohost, else we skip, as there is no change
+ if (declareTargetOp.isDeclareTarget()) {
+ if (declareTargetOp.getDeclareTargetDeviceType() != deviceType)
+ declareTargetOp.setDeclareTarget(mlir::omp::DeclareTargetDeviceType::any,
+ captureClause);
+ return;
+ }
+
+ declareTargetOp.setDeclareTarget(deviceType, captureClause);
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMPDeclarativeConstruct visitors
+//===----------------------------------------------------------------------===//
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPDeclarativeAllocate &declarativeAllocate) {
+ TODO(converter.getCurrentLocation(), "OpenMPDeclarativeAllocate");
+}
+
static void genOMP(Fortran::lower::AbstractConverter &converter,
Fortran::lower::SymMap &symTable,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenMPLoopConstruct &loopConstruct) {
- const auto &beginLoopDirective =
- std::get<Fortran::parser::OmpBeginLoopDirective>(loopConstruct.t);
- const auto &loopOpClauseList =
- std::get<Fortran::parser::OmpClauseList>(beginLoopDirective.t);
- mlir::Location currentLocation =
- converter.genLocation(beginLoopDirective.source);
- const auto ompDirective =
- std::get<Fortran::parser::OmpLoopDirective>(beginLoopDirective.t).v;
+ const Fortran::parser::OpenMPDeclareReductionConstruct
+ &declareReductionConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPDeclareReductionConstruct");
+}
- const auto *endClauseList = [&]() {
- using RetTy = const Fortran::parser::OmpClauseList *;
- if (auto &endLoopDirective =
- std::get<std::optional<Fortran::parser::OmpEndLoopDirective>>(
- loopConstruct.t)) {
- return RetTy(
- &std::get<Fortran::parser::OmpClauseList>((*endLoopDirective).t));
- }
- return RetTy();
- }();
+static void genOMP(
+ Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPDeclareSimdConstruct &declareSimdConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPDeclareSimdConstruct");
+}
- bool validDirective = false;
- if (llvm::omp::topTaskloopSet.test(ompDirective)) {
- validDirective = true;
- TODO(currentLocation, "Taskloop construct");
- } else {
- // Create omp.{target, teams, distribute, parallel} nested operations
- if ((llvm::omp::allTargetSet & llvm::omp::loopConstructSet)
- .test(ompDirective)) {
- validDirective = true;
- genTargetOp(converter, semaCtx, eval, /*genNested=*/false,
- currentLocation, loopOpClauseList, ompDirective,
- /*outerCombined=*/true);
- }
- if ((llvm::omp::allTeamsSet & llvm::omp::loopConstructSet)
- .test(ompDirective)) {
- validDirective = true;
- genTeamsOp(converter, semaCtx, eval, /*genNested=*/false, currentLocation,
- loopOpClauseList,
- /*outerCombined=*/true);
- }
- if (llvm::omp::allDistributeSet.test(ompDirective)) {
- validDirective = true;
- TODO(currentLocation, "Distribute construct");
- }
- if ((llvm::omp::allParallelSet & llvm::omp::loopConstructSet)
- .test(ompDirective)) {
- validDirective = true;
- genParallelOp(converter, symTable, semaCtx, eval, /*genNested=*/false,
- currentLocation, loopOpClauseList,
- /*outerCombined=*/true);
- }
- }
- if ((llvm::omp::allDoSet | llvm::omp::allSimdSet).test(ompDirective))
- validDirective = true;
+static void genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPDeclareTargetConstruct
+ &declareTargetConstruct) {
+ llvm::SmallVector<DeclareTargetCapturePair> symbolAndClause;
+ mlir::ModuleOp mod = converter.getFirOpBuilder().getModule();
+ mlir::omp::DeclareTargetDeviceType deviceType = getDeclareTargetInfo(
+ converter, semaCtx, eval, declareTargetConstruct, symbolAndClause);
- if (!validDirective) {
- TODO(currentLocation, "Unhandled loop directive (" +
- llvm::omp::getOpenMPDirectiveName(ompDirective) +
- ")");
- }
+ for (const DeclareTargetCapturePair &symClause : symbolAndClause) {
+ mlir::Operation *op = mod.lookupSymbol(converter.mangleName(
+ std::get<const Fortran::semantics::Symbol &>(symClause)));
- if (llvm::omp::allDoSimdSet.test(ompDirective)) {
- // 2.9.3.2 Workshare SIMD construct
- createSimdWsloop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
- endClauseList, currentLocation);
+ // Some symbols are deferred until later in the module, these are handled
+ // upon finalization of the module for OpenMP inside of Bridge, so we simply
+ // skip for now.
+ if (!op)
+ continue;
- } else if (llvm::omp::allSimdSet.test(ompDirective)) {
- // 2.9.3.1 SIMD construct
- createSimdLoop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
- currentLocation);
- genOpenMPReduction(converter, semaCtx, loopOpClauseList);
- } else {
- createWsloop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
- endClauseList, currentLocation);
+ markDeclareTarget(
+ op, converter,
+ std::get<mlir::omp::DeclareTargetCaptureClause>(symClause), deviceType);
}
}
@@ -1828,6 +1937,97 @@ genOMP(Fortran::lower::AbstractConverter &converter,
Fortran::lower::SymMap &symTable,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPRequiresConstruct &requiresConstruct) {
+ // Requires directives are gathered and processed in semantics and
+ // then combined in the lowering bridge before triggering codegen
+ // just once. Hence, there is no need to lower each individual
+ // occurrence here.
+}
+
+static void genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPThreadprivate &threadprivate) {
+ // The directive is lowered when instantiating the variable to
+ // support the case of threadprivate variable declared in module.
+}
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPDeclarativeConstruct &ompDeclConstruct) {
+ std::visit(
+ [&](auto &&s) { return genOMP(converter, symTable, semaCtx, eval, s); },
+ ompDeclConstruct.u);
+}
+
+//===----------------------------------------------------------------------===//
+// OpenMPConstruct visitors
+//===----------------------------------------------------------------------===//
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPAllocatorsConstruct &allocsConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPAllocatorsConstruct");
+}
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPAtomicConstruct &atomicConstruct) {
+ std::visit(
+ Fortran::common::visitors{
+ [&](const Fortran::parser::OmpAtomicRead &atomicRead) {
+ mlir::Location loc = converter.genLocation(atomicRead.source);
+ Fortran::lower::genOmpAccAtomicRead<
+ Fortran::parser::OmpAtomicRead,
+ Fortran::parser::OmpAtomicClauseList>(converter, atomicRead,
+ loc);
+ },
+ [&](const Fortran::parser::OmpAtomicWrite &atomicWrite) {
+ mlir::Location loc = converter.genLocation(atomicWrite.source);
+ Fortran::lower::genOmpAccAtomicWrite<
+ Fortran::parser::OmpAtomicWrite,
+ Fortran::parser::OmpAtomicClauseList>(converter, atomicWrite,
+ loc);
+ },
+ [&](const Fortran::parser::OmpAtomic &atomicConstruct) {
+ mlir::Location loc = converter.genLocation(atomicConstruct.source);
+ Fortran::lower::genOmpAtomic<Fortran::parser::OmpAtomic,
+ Fortran::parser::OmpAtomicClauseList>(
+ converter, atomicConstruct, loc);
+ },
+ [&](const Fortran::parser::OmpAtomicUpdate &atomicUpdate) {
+ mlir::Location loc = converter.genLocation(atomicUpdate.source);
+ Fortran::lower::genOmpAccAtomicUpdate<
+ Fortran::parser::OmpAtomicUpdate,
+ Fortran::parser::OmpAtomicClauseList>(converter, atomicUpdate,
+ loc);
+ },
+ [&](const Fortran::parser::OmpAtomicCapture &atomicCapture) {
+ mlir::Location loc = converter.genLocation(atomicCapture.source);
+ Fortran::lower::genOmpAccAtomicCapture<
+ Fortran::parser::OmpAtomicCapture,
+ Fortran::parser::OmpAtomicClauseList>(converter, atomicCapture,
+ loc);
+ },
+ },
+ atomicConstruct.u);
+}
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
const Fortran::parser::OpenMPBlockConstruct &blockConstruct) {
const auto &beginBlockDirective =
std::get<Fortran::parser::OmpBeginBlockDirective>(blockConstruct.t);
@@ -2008,6 +2208,107 @@ genOMP(Fortran::lower::AbstractConverter &converter,
Fortran::lower::SymMap &symTable,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPExecutableAllocate &execAllocConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPExecutableAllocate");
+}
+
+static void genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPLoopConstruct &loopConstruct) {
+ const auto &beginLoopDirective =
+ std::get<Fortran::parser::OmpBeginLoopDirective>(loopConstruct.t);
+ const auto &loopOpClauseList =
+ std::get<Fortran::parser::OmpClauseList>(beginLoopDirective.t);
+ mlir::Location currentLocation =
+ converter.genLocation(beginLoopDirective.source);
+ const auto ompDirective =
+ std::get<Fortran::parser::OmpLoopDirective>(beginLoopDirective.t).v;
+
+ const auto *endClauseList = [&]() {
+ using RetTy = const Fortran::parser::OmpClauseList *;
+ if (auto &endLoopDirective =
+ std::get<std::optional<Fortran::parser::OmpEndLoopDirective>>(
+ loopConstruct.t)) {
+ return RetTy(
+ &std::get<Fortran::parser::OmpClauseList>((*endLoopDirective).t));
+ }
+ return RetTy();
+ }();
+
+ bool validDirective = false;
+ if (llvm::omp::topTaskloopSet.test(ompDirective)) {
+ validDirective = true;
+ TODO(currentLocation, "Taskloop construct");
+ } else {
+ // Create omp.{target, teams, distribute, parallel} nested operations
+ if ((llvm::omp::allTargetSet & llvm::omp::loopConstructSet)
+ .test(ompDirective)) {
+ validDirective = true;
+ genTargetOp(converter, semaCtx, eval, /*genNested=*/false,
+ currentLocation, loopOpClauseList, ompDirective,
+ /*outerCombined=*/true);
+ }
+ if ((llvm::omp::allTeamsSet & llvm::omp::loopConstructSet)
+ .test(ompDirective)) {
+ validDirective = true;
+ genTeamsOp(converter, semaCtx, eval, /*genNested=*/false, currentLocation,
+ loopOpClauseList,
+ /*outerCombined=*/true);
+ }
+ if (llvm::omp::allDistributeSet.test(ompDirective)) {
+ validDirective = true;
+ TODO(currentLocation, "Distribute construct");
+ }
+ if ((llvm::omp::allParallelSet & llvm::omp::loopConstructSet)
+ .test(ompDirective)) {
+ validDirective = true;
+ genParallelOp(converter, symTable, semaCtx, eval, /*genNested=*/false,
+ currentLocation, loopOpClauseList,
+ /*outerCombined=*/true);
+ }
+ }
+ if ((llvm::omp::allDoSet | llvm::omp::allSimdSet).test(ompDirective))
+ validDirective = true;
+
+ if (!validDirective) {
+ TODO(currentLocation, "Unhandled loop directive (" +
+ llvm::omp::getOpenMPDirectiveName(ompDirective) +
+ ")");
+ }
+
+ if (llvm::omp::allDoSimdSet.test(ompDirective)) {
+ // 2.9.3.2 Workshare SIMD construct
+ createSimdWsloop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
+ endClauseList, currentLocation);
+
+ } else if (llvm::omp::allSimdSet.test(ompDirective)) {
+ // 2.9.3.1 SIMD construct
+ createSimdLoop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
+ currentLocation);
+ genOpenMPReduction(converter, semaCtx, loopOpClauseList);
+ } else {
+ createWsloop(converter, semaCtx, eval, ompDirective, loopOpClauseList,
+ endClauseList, currentLocation);
+ }
+}
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
+ const Fortran::parser::OpenMPSectionConstruct &sectionConstruct) {
+ // SECTION constructs are handled as a part of SECTIONS.
+ llvm_unreachable("Unexpected standalone OMP SECTION");
+}
+
+static void
+genOMP(Fortran::lower::AbstractConverter &converter,
+ Fortran::lower::SymMap &symTable,
+ Fortran::semantics::SemanticsContext &semaCtx,
+ Fortran::lower::pft::Evaluation &eval,
const Fortran::parser::OpenMPSectionsConstruct &sectionsConstruct) {
mlir::Location currentLocation = converter.getCurrentLocation();
llvm::SmallVector<mlir::Value> allocateOperands, allocatorOperands;
@@ -2068,98 +2369,27 @@ genOMP(Fortran::lower::AbstractConverter &converter,
Fortran::lower::SymMap &symTable,
Fortran::semantics::SemanticsContext &semaCtx,
Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenMPAtomicConstruct &atomicConstruct) {
+ const Fortran::parser::OpenMPStandaloneConstruct &standaloneConstruct) {
std::visit(
Fortran::common::visitors{
- [&](const Fortran::parser::OmpAtomicRead &atomicRead) {
- mlir::Location loc = converter.genLocation(atomicRead.source);
- Fortran::lower::genOmpAccAtomicRead<
- Fortran::parser::OmpAtomicRead,
- Fortran::parser::OmpAtomicClauseList>(converter, atomicRead,
- loc);
- },
- [&](const Fortran::parser::OmpAtomicWrite &atomicWrite) {
- mlir::Location loc = converter.genLocation(atomicWrite.source);
- Fortran::lower::genOmpAccAtomicWrite<
- Fortran::parser::OmpAtomicWrite,
- Fortran::parser::OmpAtomicClauseList>(converter, atomicWrite,
- loc);
+ [&](const Fortran::parser::OpenMPSimpleStandaloneConstruct
+ &simpleStandaloneConstruct) {
+ genOmpSimpleStandalone(converter, semaCtx, eval,
+ /*genNested=*/true,
+ simpleStandaloneConstruct);
},
- [&](const Fortran::parser::OmpAtomic &atomicConstruct) {
- mlir::Location loc = converter.genLocation(atomicConstruct.source);
- Fortran::lower::genOmpAtomic<Fortran::parser::OmpAtomic,
- Fortran::parser::OmpAtomicClauseList>(
- converter, atomicConstruct, loc);
+ [&](const Fortran::parser::OpenMPFlushConstruct &flushConstruct) {
+ genOmpFlush(converter, semaCtx, eval, flushConstruct);
},
- [&](const Fortran::parser::OmpAtomicUpdate &atomicUpdate) {
- mlir::Location loc = converter.genLocation(atomicUpdate.source);
- Fortran::lower::genOmpAccAtomicUpdate<
- Fortran::parser::OmpAtomicUpdate,
- Fortran::parser::OmpAtomicClauseList>(converter, atomicUpdate,
- loc);
+ [&](const Fortran::parser::OpenMPCancelConstruct &cancelConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPCancelConstruct");
},
- [&](const Fortran::parser::OmpAtomicCapture &atomicCapture) {
- mlir::Location loc = converter.genLocation(atomicCapture.source);
- Fortran::lower::genOmpAccAtomicCapture<
- Fortran::parser::OmpAtomicCapture,
- Fortran::parser::OmpAtomicClauseList>(converter, atomicCapture,
- loc);
+ [&](const Fortran::parser::OpenMPCancellationPointConstruct
+ &cancellationPointConstruct) {
+ TODO(converter.getCurrentLocation(), "OpenMPCancelConstruct");
},
},
- atomicConstruct.u);
-}
-
-static void
-markDeclareTarget(mlir::Operation *op,
- Fortran::lower::AbstractConverter &converter,
- mlir::omp::DeclareTargetCaptureClause captureClause,
- mlir::omp::DeclareTargetDeviceType deviceType) {
- // TODO: Add support for program local variables with declare target applied
- auto declareTargetOp = llvm::dyn_cast<mlir::omp::DeclareTargetInterface>(op);
- if (!declareTargetOp)
- fir::emitFatalError(
- converter.getCurrentLocation(),
- "Attempt to apply declare target on unsupported operation");
-
- // The function or global already has a declare target applied to it, very
- // likely through implicit capture (usage in another declare target
- // function/subroutine). It should be marked as any if it has been assigned
- // both host and nohost, else we skip, as there is no change
- if (declareTargetOp.isDeclareTarget()) {
- if (declareTargetOp.getDeclareTargetDeviceType() != deviceType)
- declareTargetOp.setDeclareTarget(mlir::omp::DeclareTargetDeviceType::any,
- captureClause);
- return;
- }
-
- declareTargetOp.setDeclareTarget(deviceType, captureClause);
-}
-
-static void genOMP(Fortran::lower::AbstractConverter &converter,
- Fortran::lower::SymMap &symTable,
- Fortran::semantics::SemanticsContext &semaCtx,
- Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenMPDeclareTargetConstruct
- &declareTargetConstruct) {
- llvm::SmallVector<DeclareTargetCapturePair> symbolAndClause;
- mlir::ModuleOp mod = converter.getFirOpBuilder().getModule();
- mlir::omp::DeclareTargetDeviceType deviceType = getDeclareTargetInfo(
- converter, semaCtx, eval, declareTargetConstruct, symbolAndClause);
-
- for (const DeclareTargetCapturePair &symClause : symbolAndClause) {
- mlir::Operation *op = mod.lookupSymbol(converter.mangleName(
- std::get<const Fortran::semantics::Symbol &>(symClause)));
-
- // Some symbols are deferred until later in the module, these are handled
- // upon finalization of the module for OpenMP inside of Bridge, so we simply
- // skip for now.
- if (!op)
- continue;
-
- markDeclareTarget(
- op, converter,
- std::get<mlir::omp::DeclareTargetCaptureClause>(symClause), deviceType);
- }
+ standaloneConstruct.u);
}
static void genOMP(Fortran::lower::AbstractConverter &converter,
@@ -2168,88 +2398,10 @@ static void genOMP(Fortran::lower::AbstractConverter &converter,
Fortran::lower::pft::Evaluation &eval,
const Fortran::parser::OpenMPConstruct &ompConstruct) {
std::visit(
- Fortran::common::visitors{
- [&](const Fortran::parser::OpenMPStandaloneConstruct
- &standaloneConstruct) {
- genOMP(converter, symTable, semaCtx, eval, standaloneConstruct);
- },
- [&](const Fortran::parser::OpenMPSectionsConstruct
- &sectionsConstruct) {
- genOMP(converter, symTable, semaCtx, eval, sectionsConstruct);
- },
- [&](const Fortran::parser::OpenMPSectionConstruct &sectionConstruct) {
- // SECTION constructs are handled as a part of SECTIONS.
- llvm_unreachable("Unexpected standalone OMP SECTION");
- },
- [&](const Fortran::parser::OpenMPLoopConstruct &loopConstruct) {
- genOMP(converter, symTable, semaCtx, eval, loopConstruct);
- },
- [&](const Fortran::parser::OpenMPDeclarativeAllocate
- &execAllocConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPDeclarativeAllocate");
- },
- [&](const Fortran::parser::OpenMPExecutableAllocate
- &execAllocConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPExecutableAllocate");
- },
- [&](const Fortran::parser::OpenMPAllocatorsConstruct
- &allocsConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPAllocatorsConstruct");
- },
- [&](const Fortran::parser::OpenMPBlockConstruct &blockConstruct) {
- genOMP(converter, symTable, semaCtx, eval, blockConstruct);
- },
- [&](const Fortran::parser::OpenMPAtomicConstruct &atomicConstruct) {
- genOMP(converter, symTable, semaCtx, eval, atomicConstruct);
- },
- [&](const Fortran::parser::OpenMPCriticalConstruct
- &criticalConstruct) {
- genOMP(converter, symTable, semaCtx, eval, criticalConstruct);
- },
- },
+ [&](auto &&s) { return genOMP(converter, symTable, semaCtx, eval, s); },
ompConstruct.u);
}
-static void
-genOMP(Fortran::lower::AbstractConverter &converter,
- Fortran::lower::SymMap &symTable,
- Fortran::semantics::SemanticsContext &semaCtx,
- Fortran::lower::pft::Evaluation &eval,
- const Fortran::parser::OpenMPDeclarativeConstruct &ompDeclConstruct) {
- std::visit(
- Fortran::common::visitors{
- [&](const Fortran::parser::OpenMPDeclarativeAllocate
- &declarativeAllocate) {
- TODO(converter.getCurrentLocation(), "OpenMPDeclarativeAllocate");
- },
- [&](const Fortran::parser::OpenMPDeclareReductionConstruct
- &declareReductionConstruct) {
- TODO(converter.getCurrentLocation(),
- "OpenMPDeclareReductionConstruct");
- },
- [&](const Fortran::parser::OpenMPDeclareSimdConstruct
- &declareSimdConstruct) {
- TODO(converter.getCurrentLocation(), "OpenMPDeclareSimdConstruct");
- },
- [&](const Fortran::parser::OpenMPDeclareTargetConstruct
- &declareTargetConstruct) {
- genOMP(converter, symTable, semaCtx, eval, declareTargetConstruct);
- },
- [&](const Fortran::parser::OpenMPRequiresConstruct
- &requiresConstruct) {
- // Requires directives are gathered and processed in semantics and
- // then combined in the lowering bridge before triggering codegen
- // just once. Hence, there is no need to lower each individual
- // occurrence here.
- },
- [&](const Fortran::parser::OpenMPThreadprivate &threadprivate) {
- // The directive is lowered when instantiating the variable to
- // support the case of threadprivate variable declared in module.
- },
- },
- ompDeclConstruct.u);
-}
-
//===----------------------------------------------------------------------===//
// Public functions
//===----------------------------------------------------------------------===//
@@ -2394,213 +2546,6 @@ void Fortran::lower::genDeclareTargetIntGlobal(
}
}
-// Generate an OpenMP reduction operation.
-// TODO: Currently assumes it is either an integer addition/multiplication
-// reduction, or a logical and reduction. Generalize this for various reduction
-// operation types.
-// TODO: Generate the reduction operation during lowering instead of creating
-// and removing operations since this is not a robust approach. Also, removing
-// ops in the builder (instead of a rewriter) is probably not the best approach.
-void Fortran::lower::genOpenMPReduction(
- Fortran::lower::AbstractConverter &converter,
- Fortran::semantics::SemanticsContext &semaCtx,
- const Fortran::parser::OmpClauseList &clauseList) {
- fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
-
- List<Clause> clauses{makeList(clauseList, semaCtx)};
-
- for (const Clause &clause : clauses) {
- if (const auto &reductionClause =
- std::get_if<clause::Reduction>(&clause.u)) {
- const auto &redOperator{
- std::get<clause::ReductionOperator>(reductionClause->t)};
- const auto &objects{std::get<ObjectList>(reductionClause->t)};
- if (const auto *reductionOp =
- std::get_if<clause::DefinedOperator>(&redOperator.u)) {
- const auto &intrinsicOp{
- std::get<clause::DefinedOperator::IntrinsicOperator>(
- reductionOp->u)};
-
- switch (intrinsicOp) {
- case clause::DefinedOperator::IntrinsicOperator::Add:
- case clause::DefinedOperator::IntrinsicOperator::Multiply:
- case clause::DefinedOperator::IntrinsicOperator::AND:
- case clause::DefinedOperator::IntrinsicOperator::EQV:
- case clause::DefinedOperator::IntrinsicOperator::OR:
- case clause::DefinedOperator::IntrinsicOperator::NEQV:
- break;
- default:
- continue;
- }
- for (const Object &object : objects) {
- if (const Fortran::semantics::Symbol *symbol = object.id()) {
- mlir::Value reductionVal = converter.getSymbolAddress(*symbol);
- if (auto declOp = reductionVal.getDefiningOp<hlfir::DeclareOp>())
- reductionVal = declOp.getBase();
- mlir::Type reductionType =
- reductionVal.getType().cast<fir::ReferenceType>().getEleTy();
- if (!reductionType.isa<fir::LogicalType>()) {
- if (!reductionType.isIntOrIndexOrFloat())
- continue;
- }
- for (mlir::OpOperand &reductionValUse : reductionVal.getUses()) {
- if (auto loadOp =
- mlir::dyn_cast<fir::LoadOp>(reductionValUse.getOwner())) {
- mlir::Value loadVal = loadOp.getRes();
- if (reductionType.isa<fir::LogicalType>()) {
- mlir::Operation *reductionOp = findReductionChain(loadVal);
- fir::ConvertOp convertOp =
- getConvertFromReductionOp(reductionOp, loadVal);
- updateReduction(reductionOp, firOpBuilder, loadVal,
- reductionVal, &convertOp);
- removeStoreOp(reductionOp, reductionVal);
- } else if (mlir::Operation *reductionOp =
- findReductionChain(loadVal, &reductionVal)) {
- updateReduction(reductionOp, firOpBuilder, loadVal,
- reductionVal);
- }
- }
- }
- }
- }
- } else if (const auto *reductionIntrinsic =
- std::get_if<clause::ProcedureDesignator>(&redOperator.u)) {
- if (!ReductionProcessor::supportedIntrinsicProcReduction(
- *reductionIntrinsic))
- continue;
- ReductionProcessor::ReductionIdentifier redId =
- ReductionProcessor::getReductionType(*reductionIntrinsic);
- for (const Object &object : objects) {
- if (const Fortran::semantics::Symbol *symbol = object.id()) {
- mlir::Value reductionVal = converter.getSymbolAddress(*symbol);
- if (auto declOp = reductionVal.getDefiningOp<hlfir::DeclareOp>())
- reductionVal = declOp.getBase();
- for (const mlir::OpOperand &reductionValUse :
- reductionVal.getUses()) {
- if (auto loadOp =
- mlir::dyn_cast<fir::LoadOp>(reductionValUse.getOwner())) {
- mlir::Value loadVal = loadOp.getRes();
- // Max is lowered as a compare -> select.
- // Match the pattern here.
- mlir::Operation *reductionOp =
- findReductionChain(loadVal, &reductionVal);
- if (reductionOp == nullptr)
- continue;
-
- if (redId == ReductionProcessor::ReductionIdentifier::MAX ||
- redId == ReductionProcessor::ReductionIdentifier::MIN) {
- assert(mlir::isa<mlir::arith::SelectOp>(reductionOp) &&
- "Selection Op not found in reduction intrinsic");
- mlir::Operation *compareOp =
- getCompareFromReductionOp(reductionOp, loadVal);
- updateReduction(compareOp, firOpBuilder, loadVal,
- reductionVal);
- }
- if (redId == ReductionProcessor::ReductionIdentifier::IOR ||
- redId == ReductionProcessor::ReductionIdentifier::IEOR ||
- redId == ReductionProcessor::ReductionIdentifier::IAND) {
- updateReduction(reductionOp, firOpBuilder, loadVal,
- reductionVal);
- }
- }
- }
- }
- }
- }
- }
- }
-}
-
-mlir::Operation *Fortran::lower::findReductionChain(mlir::Value loadVal,
- mlir::Value *reductionVal) {
- for (mlir::OpOperand &loadOperand : loadVal.getUses()) {
- if (mlir::Operation *reductionOp = loadOperand.getOwner()) {
- if (auto convertOp = mlir::dyn_cast<fir::ConvertOp>(reductionOp)) {
- for (mlir::OpOperand &convertOperand : convertOp.getRes().getUses()) {
- if (mlir::Operation *reductionOp = convertOperand.getOwner())
- return reductionOp;
- }
- }
- for (mlir::OpOperand &reductionOperand : reductionOp->getUses()) {
- if (auto store =
- mlir::dyn_cast<fir::StoreOp>(reductionOperand.getOwner())) {
- if (store.getMemref() == *reductionVal) {
- store.erase();
- return reductionOp;
- }
- }
- if (auto assign =
- mlir::dyn_cast<hlfir::AssignOp>(reductionOperand.getOwner())) {
- if (assign.getLhs() == *reductionVal) {
- assign.erase();
- return reductionOp;
- }
- }
- }
- }
- }
- return nullptr;
-}
-
-// for a logical operator 'op' reduction X = X op Y
-// This function returns the operation responsible for converting Y from
-// fir.logical<4> to i1
-fir::ConvertOp
-Fortran::lower::getConvertFromReductionOp(mlir::Operation *reductionOp,
- mlir::Value loadVal) {
- for (mlir::Value reductionOperand : reductionOp->getOperands()) {
- if (auto convertOp =
- mlir::dyn_cast<fir::ConvertOp>(reductionOperand.getDefiningOp())) {
- if (convertOp.getOperand() == loadVal)
- continue;
- return convertOp;
- }
- }
- return nullptr;
-}
-
-void Fortran::lower::updateReduction(mlir::Operation *op,
- fir::FirOpBuilder &firOpBuilder,
- mlir::Value loadVal,
- mlir::Value reductionVal,
- fir::ConvertOp *convertOp) {
- mlir::OpBuilder::InsertPoint insertPtDel = firOpBuilder.saveInsertionPoint();
- firOpBuilder.setInsertionPoint(op);
-
- mlir::Value reductionOp;
- if (convertOp)
- reductionOp = convertOp->getOperand();
- else if (op->getOperand(0) == loadVal)
- reductionOp = op->getOperand(1);
- else
- reductionOp = op->getOperand(0);
-
- firOpBuilder.create<mlir::omp::ReductionOp>(op->getLoc(), reductionOp,
- reductionVal);
- firOpBuilder.restoreInsertionPoint(insertPtDel);
-}
-
-void Fortran::lower::removeStoreOp(mlir::Operation *reductionOp,
- mlir::Value symVal) {
- for (mlir::Operation *reductionOpUse : reductionOp->getUsers()) {
- if (auto convertReduction =
- mlir::dyn_cast<fir::ConvertOp>(reductionOpUse)) {
- for (mlir::Operation *convertReductionUse :
- convertReduction.getRes().getUsers()) {
- if (auto storeOp = mlir::dyn_cast<fir::StoreOp>(convertReductionUse)) {
- if (storeOp.getMemref() == symVal)
- storeOp.erase();
- }
- if (auto assignOp =
- mlir::dyn_cast<hlfir::AssignOp>(convertReductionUse)) {
- if (assignOp.getLhs() == symVal)
- assignOp.erase();
- }
- }
- }
- }
-}
-
bool Fortran::lower::isOpenMPTargetConstruct(
const Fortran::parser::OpenMPConstruct &omp) {
llvm::omp::Directive dir = llvm::omp::Directive::OMPD_unknown;
diff --git a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
index 2477f635792a..0d05ca5aee65 100644
--- a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp
@@ -496,8 +496,10 @@ void ReductionProcessor::addDeclareReduction(
*reductionSymbols) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
mlir::omp::DeclareReductionOp decl;
- const auto &redOperator{
- std::get<omp::clause::ReductionOperator>(reduction.t)};
+ const auto &redOperatorList{
+ std::get<omp::clause::Reduction::ReductionIdentifiers>(reduction.t)};
+ assert(redOperatorList.size() == 1 && "Expecting single operator");
+ const auto &redOperator = redOperatorList.front();
const auto &objectList{std::get<omp::ObjectList>(reduction.t)};
if (!std::holds_alternative<omp::clause::DefinedOperator>(redOperator.u)) {
diff --git a/flang/lib/Lower/OpenMP/Utils.cpp b/flang/lib/Lower/OpenMP/Utils.cpp
index fa4a51e33848..b9c0660aa4da 100644
--- a/flang/lib/Lower/OpenMP/Utils.cpp
+++ b/flang/lib/Lower/OpenMP/Utils.cpp
@@ -15,6 +15,7 @@
#include <flang/Lower/AbstractConverter.h>
#include <flang/Lower/ConvertType.h>
+#include <flang/Optimizer/Builder/FIRBuilder.h>
#include <flang/Parser/parse-tree.h>
#include <flang/Parser/tools.h>
#include <flang/Semantics/tools.h>
@@ -70,6 +71,24 @@ void genObjectList2(const Fortran::parser::OmpObjectList &objectList,
}
}
+mlir::Type getLoopVarType(Fortran::lower::AbstractConverter &converter,
+ std::size_t loopVarTypeSize) {
+ // OpenMP runtime requires 32-bit or 64-bit loop variables.
+ loopVarTypeSize = loopVarTypeSize * 8;
+ if (loopVarTypeSize < 32) {
+ loopVarTypeSize = 32;
+ } else if (loopVarTypeSize > 64) {
+ loopVarTypeSize = 64;
+ mlir::emitWarning(converter.getCurrentLocation(),
+ "OpenMP loop iteration variable cannot have more than 64 "
+ "bits size and will be narrowed into 64 bits.");
+ }
+ assert((loopVarTypeSize == 32 || loopVarTypeSize == 64) &&
+ "OpenMP loop iteration variable size must be transformed into 32-bit "
+ "or 64-bit");
+ return converter.getFirOpBuilder().getIntegerType(loopVarTypeSize);
+}
+
void gatherFuncAndVarSyms(
const ObjectList &objects, mlir::omp::DeclareTargetCaptureClause clause,
llvm::SmallVectorImpl<DeclareTargetCapturePair> &symbolAndClause) {
diff --git a/flang/lib/Lower/OpenMP/Utils.h b/flang/lib/Lower/OpenMP/Utils.h
index 3ab0823a4621..4074bf73987d 100644
--- a/flang/lib/Lower/OpenMP/Utils.h
+++ b/flang/lib/Lower/OpenMP/Utils.h
@@ -51,6 +51,9 @@ createMapInfoOp(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::omp::VariableCaptureKind mapCaptureType, mlir::Type retTy,
bool isVal = false);
+mlir::Type getLoopVarType(Fortran::lower::AbstractConverter &converter,
+ std::size_t loopVarTypeSize);
+
void gatherFuncAndVarSyms(
const ObjectList &objects, mlir::omp::DeclareTargetCaptureClause clause,
llvm::SmallVectorImpl<DeclareTargetCapturePair> &symbolAndClause);
diff --git a/flang/lib/Lower/PFTBuilder.cpp b/flang/lib/Lower/PFTBuilder.cpp
index 1dacd5cf64cd..f196b9c5a0cb 100644
--- a/flang/lib/Lower/PFTBuilder.cpp
+++ b/flang/lib/Lower/PFTBuilder.cpp
@@ -1594,6 +1594,11 @@ private:
if (!s->has<semantics::DerivedTypeDetails>())
depth = std::max(analyze(s) + 1, depth);
}
+
+ // Make sure cray pointer is instantiated even if it is not visible.
+ if (ultimate.test(Fortran::semantics::Symbol::Flag::CrayPointee))
+ depth = std::max(
+ analyze(Fortran::semantics::GetCrayPointer(ultimate)) + 1, depth);
adjustSize(depth + 1);
bool global = lower::symbolIsGlobal(sym);
layeredVarList[depth].emplace_back(sym, global, depth);
@@ -2002,6 +2007,10 @@ struct SymbolVisitor {
}
}
}
+ // - CrayPointer needs to be available whenever a CrayPointee is used.
+ if (symbol.GetUltimate().test(
+ Fortran::semantics::Symbol::Flag::CrayPointee))
+ visitSymbol(Fortran::semantics::GetCrayPointer(symbol));
}
template <typename A>
diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
index eb8f5135ff12..ea1ef1f08aba 100644
--- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
+++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp
@@ -3883,7 +3883,7 @@ mlir::Value IntrinsicLibrary::genIeeeClass(mlir::Type resultType,
int pos = 3 + highSignificandSize;
mlir::Value index = builder.create<mlir::arith::AndIOp>(
loc, builder.create<mlir::arith::ShRUIOp>(loc, intVal, signShift),
- createIntegerConstant(1 << pos));
+ createIntegerConstant(1ULL << pos));
// [e] exponent != 0
mlir::Value exponent =
@@ -3895,7 +3895,7 @@ mlir::Value IntrinsicLibrary::genIeeeClass(mlir::Type resultType,
loc,
builder.create<mlir::arith::CmpIOp>(
loc, mlir::arith::CmpIPredicate::ne, exponent, zero),
- createIntegerConstant(1 << --pos), zero));
+ createIntegerConstant(1ULL << --pos), zero));
// [m] exponent == 1..1 (max exponent)
index = builder.create<mlir::arith::OrIOp>(
@@ -3904,7 +3904,7 @@ mlir::Value IntrinsicLibrary::genIeeeClass(mlir::Type resultType,
loc,
builder.create<mlir::arith::CmpIOp>(
loc, mlir::arith::CmpIPredicate::eq, exponent, exponentMask),
- createIntegerConstant(1 << --pos), zero));
+ createIntegerConstant(1ULL << --pos), zero));
// [l] low-order significand != 0
index = builder.create<mlir::arith::OrIOp>(
@@ -3916,7 +3916,7 @@ mlir::Value IntrinsicLibrary::genIeeeClass(mlir::Type resultType,
builder.create<mlir::arith::AndIOp>(loc, intVal,
lowSignificandMask),
zero),
- createIntegerConstant(1 << --pos), zero));
+ createIntegerConstant(1ULL << --pos), zero));
// [h] high-order significand (1 or 2 bits)
index = builder.create<mlir::arith::OrIOp>(
diff --git a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
index 746c275f37ea..48173033ecbe 100644
--- a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
+++ b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
@@ -69,17 +69,32 @@ public:
return false;
}
if (auto recTy = ty.dyn_cast<RecordType>()) {
- if (llvm::is_contained(visitedTypes, recTy))
- return false;
+ auto visited = visitedTypes.find(ty);
+ if (visited != visitedTypes.end())
+ return visited->second;
+ [[maybe_unused]] auto newIt = visitedTypes.try_emplace(ty, false);
+ assert(newIt.second && "expected ty to not be in the map");
+ bool wasAlreadyVisitingRecordType = needConversionIsVisitingRecordType;
+ needConversionIsVisitingRecordType = true;
bool result = false;
- visitedTypes.push_back(recTy);
for (auto t : recTy.getTypeList()) {
if (needsConversion(t.second)) {
result = true;
break;
}
}
- visitedTypes.pop_back();
+ // Only keep the result cached if the fir.type visited was a "top-level
+ // type". Nested types with a recursive reference to the "top-level type"
+ // may incorrectly have been resolved as not needed conversions because it
+ // had not been determined yet if the "top-level type" needed conversion.
+ // This is not an issue to determine the "top-level type" need of
+ // conversion, but the result should not be kept and later used in other
+ // contexts.
+ needConversionIsVisitingRecordType = wasAlreadyVisitingRecordType;
+ if (needConversionIsVisitingRecordType)
+ visitedTypes.erase(ty);
+ else
+ visitedTypes.find(ty)->second = result;
return result;
}
if (auto boxTy = ty.dyn_cast<BaseBoxType>())
@@ -139,10 +154,8 @@ public:
ty.getName().str() + boxprocSuffix.str());
if (rec.isFinalized())
return rec;
- auto it = convertedTypes.try_emplace(ty, rec);
- if (!it.second) {
- llvm::errs() << "failed\n" << ty << "\n";
- }
+ [[maybe_unused]] auto it = convertedTypes.try_emplace(ty, rec);
+ assert(it.second && "expected ty to not be in the map");
std::vector<RecordType::TypePair> ps = ty.getLenParamList();
std::vector<RecordType::TypePair> cs;
for (auto t : ty.getTypeList()) {
@@ -171,11 +184,12 @@ public:
void setLocation(mlir::Location location) { loc = location; }
private:
- llvm::SmallVector<mlir::Type> visitedTypes;
- // Map to deal with recursive derived types (avoid infinite loops).
+ // Maps to deal with recursive derived types (avoid infinite loops).
// Caching is also beneficial for apps with big types (dozens of
// components and or parent types), so the lifetime of the cache
// is the whole pass.
+ llvm::DenseMap<mlir::Type, bool> visitedTypes;
+ bool needConversionIsVisitingRecordType = false;
llvm::DenseMap<mlir::Type, mlir::Type> convertedTypes;
mlir::Location loc;
};
diff --git a/flang/lib/Optimizer/CodeGen/CMakeLists.txt b/flang/lib/Optimizer/CodeGen/CMakeLists.txt
index 175ab9fefda2..879bc28d017a 100644
--- a/flang/lib/Optimizer/CodeGen/CMakeLists.txt
+++ b/flang/lib/Optimizer/CodeGen/CMakeLists.txt
@@ -3,6 +3,7 @@ add_flang_library(FIRCodeGen
CGOps.cpp
CodeGen.cpp
CodeGenOpenMP.cpp
+ FIROpPatterns.cpp
PreCGRewrite.cpp
TBAABuilder.cpp
Target.cpp
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index faf90ef6b50a..06ce84f1543a 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -14,6 +14,8 @@
#include "CGOps.h"
#include "flang/Optimizer/CodeGen/CodeGenOpenMP.h"
+#include "flang/Optimizer/CodeGen/FIROpPatterns.h"
+#include "flang/Optimizer/CodeGen/TypeConverter.h"
#include "flang/Optimizer/Dialect/FIRAttr.h"
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
@@ -58,42 +60,14 @@ namespace fir {
#define DEBUG_TYPE "flang-codegen"
-// fir::LLVMTypeConverter for converting to LLVM IR dialect types.
-#include "flang/Optimizer/CodeGen/TypeConverter.h"
-
// TODO: This should really be recovered from the specified target.
static constexpr unsigned defaultAlign = 8;
-static constexpr unsigned defaultAddressSpace = 0u;
/// `fir.box` attribute values as defined for CFI_attribute_t in
/// flang/ISO_Fortran_binding.h.
static constexpr unsigned kAttrPointer = CFI_attribute_pointer;
static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable;
-static inline unsigned
-getAllocaAddressSpace(mlir::ConversionPatternRewriter &rewriter) {
- mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
- assert(parentOp != nullptr &&
- "expected insertion block to have parent operation");
- if (auto module = parentOp->getParentOfType<mlir::ModuleOp>())
- if (mlir::Attribute addrSpace =
- mlir::DataLayout(module).getAllocaMemorySpace())
- return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt();
- return defaultAddressSpace;
-}
-
-static inline unsigned
-getProgramAddressSpace(mlir::ConversionPatternRewriter &rewriter) {
- mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
- assert(parentOp != nullptr &&
- "expected insertion block to have parent operation");
- if (auto module = parentOp->getParentOfType<mlir::ModuleOp>())
- if (mlir::Attribute addrSpace =
- mlir::DataLayout(module).getProgramMemorySpace())
- return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt();
- return defaultAddressSpace;
-}
-
static inline mlir::Type getLlvmPtrType(mlir::MLIRContext *context,
unsigned addressSpace = 0) {
return mlir::LLVM::LLVMPointerType::get(context, addressSpace);
@@ -152,332 +126,8 @@ static unsigned getLenParamFieldId(mlir::Type ty) {
}
namespace {
-/// FIR conversion pattern template
-template <typename FromOp>
-class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
-public:
- explicit FIROpConversion(const fir::LLVMTypeConverter &lowering,
- const fir::FIRToLLVMPassOptions &options)
- : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {}
-
-protected:
- mlir::Type convertType(mlir::Type ty) const {
- return lowerTy().convertType(ty);
- }
-
- // Convert FIR type to LLVM without turning fir.box<T> into memory
- // reference.
- mlir::Type convertObjectType(mlir::Type firType) const {
- if (auto boxTy = firType.dyn_cast<fir::BaseBoxType>())
- return lowerTy().convertBoxTypeAsStruct(boxTy);
- return lowerTy().convertType(firType);
- }
-
- mlir::LLVM::ConstantOp
- genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
- int value) const {
- mlir::Type i32Ty = rewriter.getI32Type();
- mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value);
- return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr);
- }
-
- mlir::LLVM::ConstantOp
- genConstantOffset(mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter,
- int offset) const {
- mlir::Type ity = lowerTy().offsetType();
- mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset);
- return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
- }
-
- /// Perform an extension or truncation as needed on an integer value. Lowering
- /// to the specific target may involve some sign-extending or truncation of
- /// values, particularly to fit them from abstract box types to the
- /// appropriate reified structures.
- mlir::Value integerCast(mlir::Location loc,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::Type ty, mlir::Value val) const {
- auto valTy = val.getType();
- // If the value was not yet lowered, lower its type so that it can
- // be used in getPrimitiveTypeSizeInBits.
- if (!valTy.isa<mlir::IntegerType>())
- valTy = convertType(valTy);
- auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
- auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
- if (toSize < fromSize)
- return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
- if (toSize > fromSize)
- return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
- return val;
- }
-
- struct TypePair {
- mlir::Type fir;
- mlir::Type llvm;
- };
-
- TypePair getBoxTypePair(mlir::Type firBoxTy) const {
- mlir::Type llvmBoxTy = lowerTy().convertBoxTypeAsStruct(
- mlir::cast<fir::BaseBoxType>(firBoxTy));
- return TypePair{firBoxTy, llvmBoxTy};
- }
-
- /// Construct code sequence to extract the specific value from a `fir.box`.
- mlir::Value getValueFromBox(mlir::Location loc, TypePair boxTy,
- mlir::Value box, mlir::Type resultTy,
- mlir::ConversionPatternRewriter &rewriter,
- int boxValue) const {
- if (box.getType().isa<mlir::LLVM::LLVMPointerType>()) {
- auto pty = ::getLlvmPtrType(resultTy.getContext());
- auto p = rewriter.create<mlir::LLVM::GEPOp>(
- loc, pty, boxTy.llvm, box,
- llvm::ArrayRef<mlir::LLVM::GEPArg>{0, boxValue});
- auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p);
- attachTBAATag(loadOp, boxTy.fir, nullptr, p);
- return loadOp;
- }
- return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, box, boxValue);
- }
-
- /// Method to construct code sequence to get the triple for dimension `dim`
- /// from a box.
- llvm::SmallVector<mlir::Value, 3>
- getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
- TypePair boxTy, mlir::Value box, mlir::Value dim,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Value l0 =
- loadDimFieldFromBox(loc, boxTy, box, dim, 0, retTys[0], rewriter);
- mlir::Value l1 =
- loadDimFieldFromBox(loc, boxTy, box, dim, 1, retTys[1], rewriter);
- mlir::Value l2 =
- loadDimFieldFromBox(loc, boxTy, box, dim, 2, retTys[2], rewriter);
- return {l0, l1, l2};
- }
-
- llvm::SmallVector<mlir::Value, 3>
- getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
- TypePair boxTy, mlir::Value box, int dim,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Value l0 =
- getDimFieldFromBox(loc, boxTy, box, dim, 0, retTys[0], rewriter);
- mlir::Value l1 =
- getDimFieldFromBox(loc, boxTy, box, dim, 1, retTys[1], rewriter);
- mlir::Value l2 =
- getDimFieldFromBox(loc, boxTy, box, dim, 2, retTys[2], rewriter);
- return {l0, l1, l2};
- }
-
- mlir::Value
- loadDimFieldFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
- mlir::Value dim, int off, mlir::Type ty,
- mlir::ConversionPatternRewriter &rewriter) const {
- assert(box.getType().isa<mlir::LLVM::LLVMPointerType>() &&
- "descriptor inquiry with runtime dim can only be done on descriptor "
- "in memory");
- mlir::LLVM::GEPOp p = genGEP(loc, boxTy.llvm, rewriter, box, 0,
- static_cast<int>(kDimsPosInBox), dim, off);
- auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
- attachTBAATag(loadOp, boxTy.fir, nullptr, p);
- return loadOp;
- }
-
- mlir::Value
- getDimFieldFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
- int dim, int off, mlir::Type ty,
- mlir::ConversionPatternRewriter &rewriter) const {
- if (box.getType().isa<mlir::LLVM::LLVMPointerType>()) {
- mlir::LLVM::GEPOp p = genGEP(loc, boxTy.llvm, rewriter, box, 0,
- static_cast<int>(kDimsPosInBox), dim, off);
- auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
- attachTBAATag(loadOp, boxTy.fir, nullptr, p);
- return loadOp;
- }
- return rewriter.create<mlir::LLVM::ExtractValueOp>(
- loc, box, llvm::ArrayRef<std::int64_t>{kDimsPosInBox, dim, off});
- }
-
- mlir::Value
- getStrideFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
- unsigned dim,
- mlir::ConversionPatternRewriter &rewriter) const {
- auto idxTy = lowerTy().indexType();
- return getDimFieldFromBox(loc, boxTy, box, dim, kDimStridePos, idxTy,
- rewriter);
- }
-
- /// Read base address from a fir.box. Returned address has type ty.
- mlir::Value
- getBaseAddrFromBox(mlir::Location loc, TypePair boxTy, mlir::Value box,
- mlir::ConversionPatternRewriter &rewriter) const {
- mlir::Type resultTy = ::getLlvmPtrType(boxTy.llvm.getContext());
- return getValueFromBox(loc, boxTy, box, resultTy, rewriter, kAddrPosInBox);
- }
-
- mlir::Value
- getElementSizeFromBox(mlir::Location loc, mlir::Type resultTy, TypePair boxTy,
- mlir::Value box,
- mlir::ConversionPatternRewriter &rewriter) const {
- return getValueFromBox(loc, boxTy, box, resultTy, rewriter,
- kElemLenPosInBox);
- }
-
- // Get the element type given an LLVM type that is of the form
- // (array|struct|vector)+ and the provided indexes.
- static mlir::Type getBoxEleTy(mlir::Type type,
- llvm::ArrayRef<std::int64_t> indexes) {
- for (unsigned i : indexes) {
- if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) {
- assert(!t.isOpaque() && i < t.getBody().size());
- type = t.getBody()[i];
- } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
- type = t.getElementType();
- } else if (auto t = type.dyn_cast<mlir::VectorType>()) {
- type = t.getElementType();
- } else {
- fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()),
- "request for invalid box element type");
- }
- }
- return type;
- }
-
- // Return LLVM type of the object described by a fir.box of \p boxType.
- mlir::Type getLlvmObjectTypeFromBoxType(mlir::Type boxType) const {
- mlir::Type objectType = fir::dyn_cast_ptrOrBoxEleTy(boxType);
- assert(objectType && "boxType must be a box type");
- return this->convertType(objectType);
- }
-
- /// Read the address of the type descriptor from a box.
- mlir::Value
- loadTypeDescAddress(mlir::Location loc, TypePair boxTy, mlir::Value box,
- mlir::ConversionPatternRewriter &rewriter) const {
- unsigned typeDescFieldId = getTypeDescFieldId(boxTy.fir);
- mlir::Type tdescType = lowerTy().convertTypeDescType(rewriter.getContext());
- return getValueFromBox(loc, boxTy, box, tdescType, rewriter,
- typeDescFieldId);
- }
-
- // Load the attribute from the \p box and perform a check against \p maskValue
- // The final comparison is implemented as `(attribute & maskValue) != 0`.
- mlir::Value genBoxAttributeCheck(mlir::Location loc, TypePair boxTy,
- mlir::Value box,
- mlir::ConversionPatternRewriter &rewriter,
- unsigned maskValue) const {
- mlir::Type attrTy = rewriter.getI32Type();
- mlir::Value attribute =
- getValueFromBox(loc, boxTy, box, attrTy, rewriter, kAttributePosInBox);
- mlir::LLVM::ConstantOp attrMask =
- genConstantOffset(loc, rewriter, maskValue);
- auto maskRes =
- rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask);
- mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
- return rewriter.create<mlir::LLVM::ICmpOp>(
- loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0);
- }
-
- template <typename... ARGS>
- mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
- mlir::ConversionPatternRewriter &rewriter,
- mlir::Value base, ARGS... args) const {
- llvm::SmallVector<mlir::LLVM::GEPArg> cv = {args...};
- auto llvmPtrTy = ::getLlvmPtrType(ty.getContext());
- return rewriter.create<mlir::LLVM::GEPOp>(loc, llvmPtrTy, ty, base, cv);
- }
-
- // Find the Block in which the alloca should be inserted.
- // The order to recursively find the proper block:
- // 1. An OpenMP Op that will be outlined.
- // 2. A LLVMFuncOp
- // 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
- static mlir::Block *getBlockForAllocaInsert(mlir::Operation *op) {
- if (auto iface =
- mlir::dyn_cast<mlir::omp::OutlineableOpenMPOpInterface>(op))
- return iface.getAllocaBlock();
- if (auto llvmFuncOp = mlir::dyn_cast<mlir::LLVM::LLVMFuncOp>(op))
- return &llvmFuncOp.front();
- return getBlockForAllocaInsert(op->getParentOp());
- }
-
- // Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
- // allocation address space provided for the architecture in the DataLayout
- // specification. If the address space is different from the devices
- // program address space we perform a cast. In the case of most architectures
- // the program and allocation address space will be the default of 0 and no
- // cast will be emitted.
- mlir::Value genAllocaAndAddrCastWithType(
- mlir::Location loc, mlir::Type llvmObjectTy, unsigned alignment,
- mlir::ConversionPatternRewriter &rewriter) const {
- auto thisPt = rewriter.saveInsertionPoint();
- mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
- if (mlir::isa<mlir::omp::DeclareReductionOp>(parentOp)) {
- // DeclareReductionOp has multiple child regions. We want to get the first
- // block of whichever of those regions we are currently in
- mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
- rewriter.setInsertionPointToStart(&parentRegion->front());
- } else {
- mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp);
- rewriter.setInsertionPointToStart(insertBlock);
- }
- auto size = genI32Constant(loc, rewriter, 1);
- unsigned allocaAs = getAllocaAddressSpace(rewriter);
- unsigned programAs = getProgramAddressSpace(rewriter);
-
- mlir::Value al = rewriter.create<mlir::LLVM::AllocaOp>(
- loc, ::getLlvmPtrType(llvmObjectTy.getContext(), allocaAs),
- llvmObjectTy, size, alignment);
-
- // if our allocation address space, is not the same as the program address
- // space, then we must emit a cast to the program address space before use.
- // An example case would be on AMDGPU, where the allocation address space is
- // the numeric value 5 (private), and the program address space is 0
- // (generic).
- if (allocaAs != programAs) {
- al = rewriter.create<mlir::LLVM::AddrSpaceCastOp>(
- loc, ::getLlvmPtrType(llvmObjectTy.getContext(), programAs), al);
- }
-
- rewriter.restoreInsertionPoint(thisPt);
- return al;
- }
-
- const fir::LLVMTypeConverter &lowerTy() const {
- return *static_cast<const fir::LLVMTypeConverter *>(
- this->getTypeConverter());
- }
-
- void attachTBAATag(mlir::LLVM::AliasAnalysisOpInterface op,
- mlir::Type baseFIRType, mlir::Type accessFIRType,
- mlir::LLVM::GEPOp gep) const {
- lowerTy().attachTBAATag(op, baseFIRType, accessFIRType, gep);
- }
-
- const fir::FIRToLLVMPassOptions &options;
-};
-
-/// FIR conversion pattern template
-template <typename FromOp>
-class FIROpAndTypeConversion : public FIROpConversion<FromOp> {
-public:
- using FIROpConversion<FromOp>::FIROpConversion;
- using OpAdaptor = typename FromOp::Adaptor;
-
- mlir::LogicalResult
- matchAndRewrite(FromOp op, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const final {
- mlir::Type ty = this->convertType(op.getType());
- return doRewrite(op, ty, adaptor, rewriter);
- }
-
- virtual mlir::LogicalResult
- doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor,
- mlir::ConversionPatternRewriter &rewriter) const = 0;
-};
-} // namespace
-
-namespace {
/// Lower `fir.address_of` operation to `llvm.address_of` operation.
-struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> {
+struct AddrOfOpConversion : public fir::FIROpConversion<fir::AddrOfOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -539,7 +189,7 @@ genAllocationScaleSize(OP op, mlir::Type ity,
namespace {
/// convert to LLVM IR dialect `alloca`
-struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> {
+struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -624,7 +274,7 @@ struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> {
namespace {
/// Lower `fir.box_addr` to the sequence of operations to extract the first
/// element of the box.
-struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> {
+struct BoxAddrOpConversion : public fir::FIROpConversion<fir::BoxAddrOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -645,7 +295,7 @@ struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> {
/// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the
/// boxchar.
-struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> {
+struct BoxCharLenOpConversion : public fir::FIROpConversion<fir::BoxCharLenOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -668,7 +318,7 @@ struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> {
/// Lower `fir.box_dims` to a sequence of operations to extract the requested
/// dimension information from the boxed value.
/// Result in a triple set of GEPs and loads.
-struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> {
+struct BoxDimsOpConversion : public fir::FIROpConversion<fir::BoxDimsOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -690,7 +340,7 @@ struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> {
/// Lower `fir.box_elesize` to a sequence of operations ro extract the size of
/// an element in the boxed value.
-struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> {
+struct BoxEleSizeOpConversion : public fir::FIROpConversion<fir::BoxEleSizeOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -708,7 +358,7 @@ struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> {
/// Lower `fir.box_isalloc` to a sequence of operations to determine if the
/// boxed value was from an ALLOCATABLE entity.
-struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> {
+struct BoxIsAllocOpConversion : public fir::FIROpConversion<fir::BoxIsAllocOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -726,7 +376,7 @@ struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> {
/// Lower `fir.box_isarray` to a sequence of operations to determine if the
/// boxed is an array.
-struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> {
+struct BoxIsArrayOpConversion : public fir::FIROpConversion<fir::BoxIsArrayOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -746,7 +396,7 @@ struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> {
/// Lower `fir.box_isptr` to a sequence of operations to determined if the
/// boxed value was from a POINTER entity.
-struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> {
+struct BoxIsPtrOpConversion : public fir::FIROpConversion<fir::BoxIsPtrOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -764,7 +414,7 @@ struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> {
/// Lower `fir.box_rank` to the sequence of operation to extract the rank from
/// the box.
-struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> {
+struct BoxRankOpConversion : public fir::FIROpConversion<fir::BoxRankOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -784,7 +434,8 @@ struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> {
/// Lower `fir.boxproc_host` operation. Extracts the host pointer from the
/// boxproc.
/// TODO: Part of supporting Fortran 2003 procedure pointers.
-struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> {
+struct BoxProcHostOpConversion
+ : public fir::FIROpConversion<fir::BoxProcHostOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -797,7 +448,8 @@ struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> {
/// Lower `fir.box_tdesc` to the sequence of operations to extract the type
/// descriptor from the box.
-struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> {
+struct BoxTypeDescOpConversion
+ : public fir::FIROpConversion<fir::BoxTypeDescOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -814,7 +466,8 @@ struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> {
/// Lower `fir.box_typecode` to a sequence of operations to extract the type
/// code in the boxed value.
-struct BoxTypeCodeOpConversion : public FIROpConversion<fir::BoxTypeCodeOp> {
+struct BoxTypeCodeOpConversion
+ : public fir::FIROpConversion<fir::BoxTypeCodeOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -832,7 +485,7 @@ struct BoxTypeCodeOpConversion : public FIROpConversion<fir::BoxTypeCodeOp> {
};
/// Lower `fir.string_lit` to LLVM IR dialect operation.
-struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> {
+struct StringLitOpConversion : public fir::FIROpConversion<fir::StringLitOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -872,7 +525,7 @@ struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> {
};
/// `fir.call` -> `llvm.call`
-struct CallOpConversion : public FIROpConversion<fir::CallOp> {
+struct CallOpConversion : public fir::FIROpConversion<fir::CallOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -903,7 +556,7 @@ namespace {
/// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une).
///
/// For completeness, all other comparison are done on the real component only.
-struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> {
+struct CmpcOpConversion : public fir::FIROpConversion<fir::CmpcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -941,7 +594,7 @@ struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> {
};
/// Lower complex constants
-struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> {
+struct ConstcOpConversion : public fir::FIROpConversion<fir::ConstcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -968,7 +621,7 @@ struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> {
};
/// convert value of from-type to value of to-type
-struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> {
+struct ConvertOpConversion : public fir::FIROpConversion<fir::ConvertOp> {
using FIROpConversion::FIROpConversion;
static bool isFloatingPointTy(mlir::Type ty) {
@@ -1137,7 +790,7 @@ struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> {
/// only used to carry information during FIR to FIR passes. It may be used
/// in the future to generate the runtime type info data structures instead
/// of generating them in lowering.
-struct TypeInfoOpConversion : public FIROpConversion<fir::TypeInfoOp> {
+struct TypeInfoOpConversion : public fir::FIROpConversion<fir::TypeInfoOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1150,7 +803,7 @@ struct TypeInfoOpConversion : public FIROpConversion<fir::TypeInfoOp> {
/// `fir.dt_entry` operation has no specific CodeGen. The operation is only used
/// to carry information during FIR to FIR passes.
-struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> {
+struct DTEntryOpConversion : public fir::FIROpConversion<fir::DTEntryOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1162,7 +815,7 @@ struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> {
};
/// Lower `fir.global_len` operation.
-struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> {
+struct GlobalLenOpConversion : public fir::FIROpConversion<fir::GlobalLenOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1175,7 +828,7 @@ struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> {
/// Lower fir.len_param_index
struct LenParamIndexOpConversion
- : public FIROpConversion<fir::LenParamIndexOp> {
+ : public fir::FIROpConversion<fir::LenParamIndexOp> {
using FIROpConversion::FIROpConversion;
// FIXME: this should be specialized by the runtime target
@@ -1190,7 +843,7 @@ struct LenParamIndexOpConversion
/// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element
/// in this struct is a pointer. Its type is determined from `KIND`. The 2nd
/// element is the length of the character buffer (`#n`).
-struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> {
+struct EmboxCharOpConversion : public fir::FIROpConversion<fir::EmboxCharOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1283,7 +936,7 @@ genTypeStrideInBytes(mlir::Location loc, mlir::Type idxTy,
namespace {
/// Lower a `fir.allocmem` instruction into `llvm.call @malloc`
-struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> {
+struct AllocMemOpConversion : public fir::FIROpConversion<fir::AllocMemOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1352,7 +1005,7 @@ static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) {
namespace {
/// Lower a `fir.freemem` instruction into `llvm.call @free`
-struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> {
+struct FreeMemOpConversion : public fir::FIROpConversion<fir::FreeMemOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -1410,9 +1063,9 @@ convertSubcomponentIndices(mlir::Location loc, mlir::Type eleTy,
/// Common base class for embox to descriptor conversion.
template <typename OP>
-struct EmboxCommonConversion : public FIROpConversion<OP> {
- using FIROpConversion<OP>::FIROpConversion;
- using TypePair = typename FIROpConversion<OP>::TypePair;
+struct EmboxCommonConversion : public fir::FIROpConversion<OP> {
+ using fir::FIROpConversion<OP>::FIROpConversion;
+ using TypePair = typename fir::FIROpConversion<OP>::TypePair;
static int getCFIAttr(fir::BaseBoxType boxTy) {
auto eleTy = boxTy.getEleTy();
@@ -1434,8 +1087,8 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
return size; // Length accounted for in the genTypeStrideInBytes GEP.
// Otherwise, multiply the single character size by the length.
assert(!lenParams.empty());
- auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty,
- lenParams.back());
+ auto len64 = fir::FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty,
+ lenParams.back());
return rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, size, len64);
}
@@ -2280,7 +1933,7 @@ private:
/// Lower `fir.emboxproc` operation. Creates a procedure box.
/// TODO: Part of supporting Fortran 2003 procedure pointers.
-struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> {
+struct EmboxProcOpConversion : public fir::FIROpConversion<fir::EmboxProcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -2346,7 +1999,7 @@ private:
namespace {
/// Extract a subobject value from an ssa-value of aggregate type
struct ExtractValueOpConversion
- : public FIROpAndTypeConversion<fir::ExtractValueOp>,
+ : public fir::FIROpAndTypeConversion<fir::ExtractValueOp>,
public ValueOpCommon {
using FIROpAndTypeConversion::FIROpAndTypeConversion;
@@ -2365,7 +2018,7 @@ struct ExtractValueOpConversion
/// InsertValue is the generalized instruction for the composition of new
/// aggregate type values.
struct InsertValueOpConversion
- : public FIROpAndTypeConversion<fir::InsertValueOp>,
+ : public fir::FIROpAndTypeConversion<fir::InsertValueOp>,
public ValueOpCommon {
using FIROpAndTypeConversion::FIROpAndTypeConversion;
@@ -2383,7 +2036,7 @@ struct InsertValueOpConversion
/// InsertOnRange inserts a value into a sequence over a range of offsets.
struct InsertOnRangeOpConversion
- : public FIROpAndTypeConversion<fir::InsertOnRangeOp> {
+ : public fir::FIROpAndTypeConversion<fir::InsertOnRangeOp> {
using FIROpAndTypeConversion::FIROpAndTypeConversion;
// Increments an array of subscripts in a row major fasion.
@@ -2447,7 +2100,7 @@ namespace {
/// (See the static restriction on coordinate_of.) array_coor determines the
/// coordinate (location) of a specific element.
struct XArrayCoorOpConversion
- : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
+ : public fir::FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
using FIROpAndTypeConversion::FIROpAndTypeConversion;
mlir::LogicalResult
@@ -2615,7 +2268,7 @@ struct XArrayCoorOpConversion
/// With unboxed arrays, there is the restriction that the array have a static
/// shape in all but the last column.
struct CoordinateOpConversion
- : public FIROpAndTypeConversion<fir::CoordinateOp> {
+ : public fir::FIROpAndTypeConversion<fir::CoordinateOp> {
using FIROpAndTypeConversion::FIROpAndTypeConversion;
mlir::LogicalResult
@@ -2910,7 +2563,7 @@ private:
/// Convert `fir.field_index`. The conversion depends on whether the size of
/// the record is static or dynamic.
-struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> {
+struct FieldIndexOpConversion : public fir::FIROpConversion<fir::FieldIndexOp> {
using FIROpConversion::FIROpConversion;
// NB: most field references should be resolved by this point
@@ -2951,7 +2604,7 @@ struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> {
};
/// Convert `fir.end`
-struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> {
+struct FirEndOpConversion : public fir::FIROpConversion<fir::FirEndOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -2963,7 +2616,7 @@ struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> {
};
/// Lower `fir.type_desc` to a global addr.
-struct TypeDescOpConversion : public FIROpConversion<fir::TypeDescOp> {
+struct TypeDescOpConversion : public fir::FIROpConversion<fir::TypeDescOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -2990,7 +2643,7 @@ struct TypeDescOpConversion : public FIROpConversion<fir::TypeDescOp> {
};
/// Lower `fir.has_value` operation to `llvm.return` operation.
-struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> {
+struct HasValueOpConversion : public fir::FIROpConversion<fir::HasValueOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3043,7 +2696,7 @@ static inline bool attributeTypeIsCompatible(mlir::MLIRContext *ctx,
/// Lower `fir.global` operation to `llvm.global` operation.
/// `fir.insert_on_range` operations are replaced with constant dense attribute
/// if they are applied on the full range.
-struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
+struct GlobalOpConversion : public fir::FIROpConversion<fir::GlobalOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3167,7 +2820,7 @@ private:
};
/// `fir.load` --> `llvm.load`
-struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
+struct LoadOpConversion : public fir::FIROpConversion<fir::LoadOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3216,7 +2869,7 @@ struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
/// Lower `fir.no_reassoc` to LLVM IR dialect.
/// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast
/// math flags?
-struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
+struct NoReassocOpConversion : public fir::FIROpConversion<fir::NoReassocOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3277,7 +2930,7 @@ static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
/// upper bound in the same case condition.
///
/// TODO: lowering of CHARACTER type cases is not handled yet.
-struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
+struct SelectCaseOpConversion : public fir::FIROpConversion<fir::SelectCaseOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3391,7 +3044,7 @@ static void selectMatchAndRewrite(const fir::LLVMTypeConverter &lowering,
}
/// conversion of fir::SelectOp to an if-then-else ladder
-struct SelectOpConversion : public FIROpConversion<fir::SelectOp> {
+struct SelectOpConversion : public fir::FIROpConversion<fir::SelectOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3403,7 +3056,7 @@ struct SelectOpConversion : public FIROpConversion<fir::SelectOp> {
};
/// conversion of fir::SelectRankOp to an if-then-else ladder
-struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> {
+struct SelectRankOpConversion : public fir::FIROpConversion<fir::SelectRankOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3415,7 +3068,7 @@ struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> {
};
/// Lower `fir.select_type` to LLVM IR dialect.
-struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> {
+struct SelectTypeOpConversion : public fir::FIROpConversion<fir::SelectTypeOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3428,7 +3081,7 @@ struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> {
};
/// `fir.store` --> `llvm.store`
-struct StoreOpConversion : public FIROpConversion<fir::StoreOp> {
+struct StoreOpConversion : public fir::FIROpConversion<fir::StoreOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3462,7 +3115,7 @@ namespace {
/// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for
/// the character buffer and one for the buffer length.
-struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> {
+struct UnboxCharOpConversion : public fir::FIROpConversion<fir::UnboxCharOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3487,7 +3140,7 @@ struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> {
/// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its
/// components.
/// TODO: Part of supporting Fortran 2003 procedure pointers.
-struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> {
+struct UnboxProcOpConversion : public fir::FIROpConversion<fir::UnboxProcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3499,7 +3152,7 @@ struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> {
};
/// convert to LLVM IR dialect `undef`
-struct UndefOpConversion : public FIROpConversion<fir::UndefOp> {
+struct UndefOpConversion : public fir::FIROpConversion<fir::UndefOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3511,7 +3164,7 @@ struct UndefOpConversion : public FIROpConversion<fir::UndefOp> {
}
};
-struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
+struct ZeroOpConversion : public fir::FIROpConversion<fir::ZeroOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3524,7 +3177,8 @@ struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
};
/// `fir.unreachable` --> `llvm.unreachable`
-struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> {
+struct UnreachableOpConversion
+ : public fir::FIROpConversion<fir::UnreachableOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3541,7 +3195,7 @@ struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> {
/// %1 = llvm.ptrtoint %0
/// %2 = llvm.icmp "ne" %1, %0 : i64
/// ```
-struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
+struct IsPresentOpConversion : public fir::FIROpConversion<fir::IsPresentOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3570,7 +3224,7 @@ struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
/// Create value signaling an absent optional argument in a call, e.g.
/// `fir.absent !fir.ref<i64>` --> `llvm.mlir.zero : !llvm.ptr<i64>`
-struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
+struct AbsentOpConversion : public fir::FIROpConversion<fir::AbsentOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3630,7 +3284,7 @@ complexSum(OPTY sumop, mlir::ValueRange opnds,
} // namespace
namespace {
-struct AddcOpConversion : public FIROpConversion<fir::AddcOp> {
+struct AddcOpConversion : public fir::FIROpConversion<fir::AddcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3645,7 +3299,7 @@ struct AddcOpConversion : public FIROpConversion<fir::AddcOp> {
}
};
-struct SubcOpConversion : public FIROpConversion<fir::SubcOp> {
+struct SubcOpConversion : public fir::FIROpConversion<fir::SubcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3661,7 +3315,7 @@ struct SubcOpConversion : public FIROpConversion<fir::SubcOp> {
};
/// Inlined complex multiply
-struct MulcOpConversion : public FIROpConversion<fir::MulcOp> {
+struct MulcOpConversion : public fir::FIROpConversion<fir::MulcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3695,7 +3349,7 @@ struct MulcOpConversion : public FIROpConversion<fir::MulcOp> {
};
/// Inlined complex division
-struct DivcOpConversion : public FIROpConversion<fir::DivcOp> {
+struct DivcOpConversion : public fir::FIROpConversion<fir::DivcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3735,7 +3389,7 @@ struct DivcOpConversion : public FIROpConversion<fir::DivcOp> {
};
/// Inlined complex negation
-struct NegcOpConversion : public FIROpConversion<fir::NegcOp> {
+struct NegcOpConversion : public fir::FIROpConversion<fir::NegcOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3756,7 +3410,7 @@ struct NegcOpConversion : public FIROpConversion<fir::NegcOp> {
}
};
-struct BoxOffsetOpConversion : public FIROpConversion<fir::BoxOffsetOp> {
+struct BoxOffsetOpConversion : public fir::FIROpConversion<fir::BoxOffsetOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
@@ -3782,10 +3436,10 @@ struct BoxOffsetOpConversion : public FIROpConversion<fir::BoxOffsetOp> {
/// anymore uses.
/// These operations are normally dead after the pre-codegen pass.
template <typename FromOp>
-struct MustBeDeadConversion : public FIROpConversion<FromOp> {
+struct MustBeDeadConversion : public fir::FIROpConversion<FromOp> {
explicit MustBeDeadConversion(const fir::LLVMTypeConverter &lowering,
const fir::FIRToLLVMPassOptions &options)
- : FIROpConversion<FromOp>(lowering, options) {}
+ : fir::FIROpConversion<FromOp>(lowering, options) {}
using OpAdaptor = typename FromOp::Adaptor;
mlir::LogicalResult
@@ -3799,7 +3453,7 @@ struct MustBeDeadConversion : public FIROpConversion<FromOp> {
};
struct UnrealizedConversionCastOpConversion
- : public FIROpConversion<mlir::UnrealizedConversionCastOp> {
+ : public fir::FIROpConversion<mlir::UnrealizedConversionCastOp> {
using FIROpConversion::FIROpConversion;
mlir::LogicalResult
diff --git a/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
new file mode 100644
index 000000000000..26871d888815
--- /dev/null
+++ b/flang/lib/Optimizer/CodeGen/FIROpPatterns.cpp
@@ -0,0 +1,315 @@
+//===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
+//
+//===----------------------------------------------------------------------===//
+
+#include "flang/Optimizer/CodeGen/FIROpPatterns.h"
+#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
+#include "llvm/Support/Debug.h"
+
+static inline mlir::Type getLlvmPtrType(mlir::MLIRContext *context,
+ unsigned addressSpace = 0) {
+ return mlir::LLVM::LLVMPointerType::get(context, addressSpace);
+}
+
+static unsigned getTypeDescFieldId(mlir::Type ty) {
+ auto isArray = fir::dyn_cast_ptrOrBoxEleTy(ty).isa<fir::SequenceType>();
+ return isArray ? kOptTypePtrPosInBox : kDimsPosInBox;
+}
+
+namespace fir {
+
+ConvertFIRToLLVMPattern::ConvertFIRToLLVMPattern(
+ llvm::StringRef rootOpName, mlir::MLIRContext *context,
+ const fir::LLVMTypeConverter &typeConverter,
+ const fir::FIRToLLVMPassOptions &options, mlir::PatternBenefit benefit)
+ : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
+ options(options) {}
+
+// Convert FIR type to LLVM without turning fir.box<T> into memory
+// reference.
+mlir::Type
+ConvertFIRToLLVMPattern::convertObjectType(mlir::Type firType) const {
+ if (auto boxTy = firType.dyn_cast<fir::BaseBoxType>())
+ return lowerTy().convertBoxTypeAsStruct(boxTy);
+ return lowerTy().convertType(firType);
+}
+
+mlir::LLVM::ConstantOp ConvertFIRToLLVMPattern::genI32Constant(
+ mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+ int value) const {
+ mlir::Type i32Ty = rewriter.getI32Type();
+ mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value);
+ return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr);
+}
+
+mlir::LLVM::ConstantOp ConvertFIRToLLVMPattern::genConstantOffset(
+ mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
+ int offset) const {
+ mlir::Type ity = lowerTy().offsetType();
+ mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset);
+ return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
+}
+
+/// Perform an extension or truncation as needed on an integer value. Lowering
+/// to the specific target may involve some sign-extending or truncation of
+/// values, particularly to fit them from abstract box types to the
+/// appropriate reified structures.
+mlir::Value
+ConvertFIRToLLVMPattern::integerCast(mlir::Location loc,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Type ty, mlir::Value val) const {
+ auto valTy = val.getType();
+ // If the value was not yet lowered, lower its type so that it can
+ // be used in getPrimitiveTypeSizeInBits.
+ if (!valTy.isa<mlir::IntegerType>())
+ valTy = convertType(valTy);
+ auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
+ auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
+ if (toSize < fromSize)
+ return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
+ if (toSize > fromSize)
+ return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
+ return val;
+}
+
+fir::ConvertFIRToLLVMPattern::TypePair
+ConvertFIRToLLVMPattern::getBoxTypePair(mlir::Type firBoxTy) const {
+ mlir::Type llvmBoxTy =
+ lowerTy().convertBoxTypeAsStruct(mlir::cast<fir::BaseBoxType>(firBoxTy));
+ return TypePair{firBoxTy, llvmBoxTy};
+}
+
+/// Construct code sequence to extract the specific value from a `fir.box`.
+mlir::Value ConvertFIRToLLVMPattern::getValueFromBox(
+ mlir::Location loc, TypePair boxTy, mlir::Value box, mlir::Type resultTy,
+ mlir::ConversionPatternRewriter &rewriter, int boxValue) const {
+ if (box.getType().isa<mlir::LLVM::LLVMPointerType>()) {
+ auto pty = getLlvmPtrType(resultTy.getContext());
+ auto p = rewriter.create<mlir::LLVM::GEPOp>(
+ loc, pty, boxTy.llvm, box,
+ llvm::ArrayRef<mlir::LLVM::GEPArg>{0, boxValue});
+ auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p);
+ attachTBAATag(loadOp, boxTy.fir, nullptr, p);
+ return loadOp;
+ }
+ return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, box, boxValue);
+}
+
+/// Method to construct code sequence to get the triple for dimension `dim`
+/// from a box.
+llvm::SmallVector<mlir::Value, 3> ConvertFIRToLLVMPattern::getDimsFromBox(
+ mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, TypePair boxTy,
+ mlir::Value box, mlir::Value dim,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Value l0 =
+ loadDimFieldFromBox(loc, boxTy, box, dim, 0, retTys[0], rewriter);
+ mlir::Value l1 =
+ loadDimFieldFromBox(loc, boxTy, box, dim, 1, retTys[1], rewriter);
+ mlir::Value l2 =
+ loadDimFieldFromBox(loc, boxTy, box, dim, 2, retTys[2], rewriter);
+ return {l0, l1, l2};
+}
+
+llvm::SmallVector<mlir::Value, 3> ConvertFIRToLLVMPattern::getDimsFromBox(
+ mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, TypePair boxTy,
+ mlir::Value box, int dim, mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Value l0 =
+ getDimFieldFromBox(loc, boxTy, box, dim, 0, retTys[0], rewriter);
+ mlir::Value l1 =
+ getDimFieldFromBox(loc, boxTy, box, dim, 1, retTys[1], rewriter);
+ mlir::Value l2 =
+ getDimFieldFromBox(loc, boxTy, box, dim, 2, retTys[2], rewriter);
+ return {l0, l1, l2};
+}
+
+mlir::Value ConvertFIRToLLVMPattern::loadDimFieldFromBox(
+ mlir::Location loc, TypePair boxTy, mlir::Value box, mlir::Value dim,
+ int off, mlir::Type ty, mlir::ConversionPatternRewriter &rewriter) const {
+ assert(box.getType().isa<mlir::LLVM::LLVMPointerType>() &&
+ "descriptor inquiry with runtime dim can only be done on descriptor "
+ "in memory");
+ mlir::LLVM::GEPOp p = genGEP(loc, boxTy.llvm, rewriter, box, 0,
+ static_cast<int>(kDimsPosInBox), dim, off);
+ auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
+ attachTBAATag(loadOp, boxTy.fir, nullptr, p);
+ return loadOp;
+}
+
+mlir::Value ConvertFIRToLLVMPattern::getDimFieldFromBox(
+ mlir::Location loc, TypePair boxTy, mlir::Value box, int dim, int off,
+ mlir::Type ty, mlir::ConversionPatternRewriter &rewriter) const {
+ if (box.getType().isa<mlir::LLVM::LLVMPointerType>()) {
+ mlir::LLVM::GEPOp p = genGEP(loc, boxTy.llvm, rewriter, box, 0,
+ static_cast<int>(kDimsPosInBox), dim, off);
+ auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
+ attachTBAATag(loadOp, boxTy.fir, nullptr, p);
+ return loadOp;
+ }
+ return rewriter.create<mlir::LLVM::ExtractValueOp>(
+ loc, box, llvm::ArrayRef<std::int64_t>{kDimsPosInBox, dim, off});
+}
+
+mlir::Value ConvertFIRToLLVMPattern::getStrideFromBox(
+ mlir::Location loc, TypePair boxTy, mlir::Value box, unsigned dim,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto idxTy = lowerTy().indexType();
+ return getDimFieldFromBox(loc, boxTy, box, dim, kDimStridePos, idxTy,
+ rewriter);
+}
+
+/// Read base address from a fir.box. Returned address has type ty.
+mlir::Value ConvertFIRToLLVMPattern::getBaseAddrFromBox(
+ mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Type resultTy = ::getLlvmPtrType(boxTy.llvm.getContext());
+ return getValueFromBox(loc, boxTy, box, resultTy, rewriter, kAddrPosInBox);
+}
+
+mlir::Value ConvertFIRToLLVMPattern::getElementSizeFromBox(
+ mlir::Location loc, mlir::Type resultTy, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ return getValueFromBox(loc, boxTy, box, resultTy, rewriter, kElemLenPosInBox);
+}
+
+// Get the element type given an LLVM type that is of the form
+// (array|struct|vector)+ and the provided indexes.
+mlir::Type ConvertFIRToLLVMPattern::getBoxEleTy(
+ mlir::Type type, llvm::ArrayRef<std::int64_t> indexes) const {
+ for (unsigned i : indexes) {
+ if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) {
+ assert(!t.isOpaque() && i < t.getBody().size());
+ type = t.getBody()[i];
+ } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
+ type = t.getElementType();
+ } else if (auto t = type.dyn_cast<mlir::VectorType>()) {
+ type = t.getElementType();
+ } else {
+ fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()),
+ "request for invalid box element type");
+ }
+ }
+ return type;
+}
+
+// Return LLVM type of the object described by a fir.box of \p boxType.
+mlir::Type ConvertFIRToLLVMPattern::getLlvmObjectTypeFromBoxType(
+ mlir::Type boxType) const {
+ mlir::Type objectType = fir::dyn_cast_ptrOrBoxEleTy(boxType);
+ assert(objectType && "boxType must be a box type");
+ return this->convertType(objectType);
+}
+
+/// Read the address of the type descriptor from a box.
+mlir::Value ConvertFIRToLLVMPattern::loadTypeDescAddress(
+ mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ unsigned typeDescFieldId = getTypeDescFieldId(boxTy.fir);
+ mlir::Type tdescType = lowerTy().convertTypeDescType(rewriter.getContext());
+ return getValueFromBox(loc, boxTy, box, tdescType, rewriter, typeDescFieldId);
+}
+
+// Load the attribute from the \p box and perform a check against \p maskValue
+// The final comparison is implemented as `(attribute & maskValue) != 0`.
+mlir::Value ConvertFIRToLLVMPattern::genBoxAttributeCheck(
+ mlir::Location loc, TypePair boxTy, mlir::Value box,
+ mlir::ConversionPatternRewriter &rewriter, unsigned maskValue) const {
+ mlir::Type attrTy = rewriter.getI32Type();
+ mlir::Value attribute =
+ getValueFromBox(loc, boxTy, box, attrTy, rewriter, kAttributePosInBox);
+ mlir::LLVM::ConstantOp attrMask = genConstantOffset(loc, rewriter, maskValue);
+ auto maskRes =
+ rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask);
+ mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
+ return rewriter.create<mlir::LLVM::ICmpOp>(loc, mlir::LLVM::ICmpPredicate::ne,
+ maskRes, c0);
+}
+
+// Find the Block in which the alloca should be inserted.
+// The order to recursively find the proper block:
+// 1. An OpenMP Op that will be outlined.
+// 2. A LLVMFuncOp
+// 3. The first ancestor that is an OpenMP Op or a LLVMFuncOp
+mlir::Block *
+ConvertFIRToLLVMPattern::getBlockForAllocaInsert(mlir::Operation *op) const {
+ if (auto iface = mlir::dyn_cast<mlir::omp::OutlineableOpenMPOpInterface>(op))
+ return iface.getAllocaBlock();
+ if (auto llvmFuncOp = mlir::dyn_cast<mlir::LLVM::LLVMFuncOp>(op))
+ return &llvmFuncOp.front();
+ return getBlockForAllocaInsert(op->getParentOp());
+}
+
+// Generate an alloca of size 1 for an object of type \p llvmObjectTy in the
+// allocation address space provided for the architecture in the DataLayout
+// specification. If the address space is different from the devices
+// program address space we perform a cast. In the case of most architectures
+// the program and allocation address space will be the default of 0 and no
+// cast will be emitted.
+mlir::Value ConvertFIRToLLVMPattern::genAllocaAndAddrCastWithType(
+ mlir::Location loc, mlir::Type llvmObjectTy, unsigned alignment,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto thisPt = rewriter.saveInsertionPoint();
+ mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
+ if (mlir::isa<mlir::omp::DeclareReductionOp>(parentOp)) {
+ // DeclareReductionOp has multiple child regions. We want to get the first
+ // block of whichever of those regions we are currently in
+ mlir::Region *parentRegion = rewriter.getInsertionBlock()->getParent();
+ rewriter.setInsertionPointToStart(&parentRegion->front());
+ } else {
+ mlir::Block *insertBlock = getBlockForAllocaInsert(parentOp);
+ rewriter.setInsertionPointToStart(insertBlock);
+ }
+ auto size = genI32Constant(loc, rewriter, 1);
+ unsigned allocaAs = getAllocaAddressSpace(rewriter);
+ unsigned programAs = getProgramAddressSpace(rewriter);
+
+ mlir::Value al = rewriter.create<mlir::LLVM::AllocaOp>(
+ loc, ::getLlvmPtrType(llvmObjectTy.getContext(), allocaAs), llvmObjectTy,
+ size, alignment);
+
+ // if our allocation address space, is not the same as the program address
+ // space, then we must emit a cast to the program address space before use.
+ // An example case would be on AMDGPU, where the allocation address space is
+ // the numeric value 5 (private), and the program address space is 0
+ // (generic).
+ if (allocaAs != programAs) {
+ al = rewriter.create<mlir::LLVM::AddrSpaceCastOp>(
+ loc, ::getLlvmPtrType(llvmObjectTy.getContext(), programAs), al);
+ }
+
+ rewriter.restoreInsertionPoint(thisPt);
+ return al;
+}
+
+unsigned ConvertFIRToLLVMPattern::getAllocaAddressSpace(
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
+ assert(parentOp != nullptr &&
+ "expected insertion block to have parent operation");
+ if (auto module = parentOp->getParentOfType<mlir::ModuleOp>())
+ if (mlir::Attribute addrSpace =
+ mlir::DataLayout(module).getAllocaMemorySpace())
+ return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt();
+ return defaultAddressSpace;
+}
+
+unsigned ConvertFIRToLLVMPattern::getProgramAddressSpace(
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
+ assert(parentOp != nullptr &&
+ "expected insertion block to have parent operation");
+ if (auto module = parentOp->getParentOfType<mlir::ModuleOp>())
+ if (mlir::Attribute addrSpace =
+ mlir::DataLayout(module).getProgramMemorySpace())
+ return llvm::cast<mlir::IntegerAttr>(addrSpace).getUInt();
+ return defaultAddressSpace;
+}
+
+} // namespace fir
diff --git a/flang/lib/Optimizer/Dialect/FIRAttr.cpp b/flang/lib/Optimizer/Dialect/FIRAttr.cpp
index 0cf8dfb9f784..e43710f5627e 100644
--- a/flang/lib/Optimizer/Dialect/FIRAttr.cpp
+++ b/flang/lib/Optimizer/Dialect/FIRAttr.cpp
@@ -299,5 +299,6 @@ void FIROpsDialect::registerAttributes() {
addAttributes<ClosedIntervalAttr, ExactTypeAttr, FortranVariableFlagsAttr,
LowerBoundAttr, PointIntervalAttr, RealAttr, SubclassAttr,
UpperBoundAttr, CUDADataAttributeAttr, CUDAProcAttributeAttr,
- CUDALaunchBoundsAttr, CUDAClusterDimsAttr>();
+ CUDALaunchBoundsAttr, CUDAClusterDimsAttr,
+ CUDADataTransferKindAttr>();
}
diff --git a/flang/lib/Parser/Fortran-parsers.cpp b/flang/lib/Parser/Fortran-parsers.cpp
index fc81a477897a..21185694227d 100644
--- a/flang/lib/Parser/Fortran-parsers.cpp
+++ b/flang/lib/Parser/Fortran-parsers.cpp
@@ -1261,24 +1261,27 @@ TYPE_PARSER(construct<StatOrErrmsg>("STAT =" >> statVariable) ||
// Directives, extensions, and deprecated statements
// !DIR$ IGNORE_TKR [ [(tkrdmac...)] name ]...
// !DIR$ LOOP COUNT (n1[, n2]...)
-// !DIR$ name...
+// !DIR$ name[=value] [, name[=value]]...
+// !DIR$ <anything else>
constexpr auto ignore_tkr{
- "DIR$ IGNORE_TKR" >> optionalList(construct<CompilerDirective::IgnoreTKR>(
- maybe(parenthesized(many(letter))), name))};
+ "IGNORE_TKR" >> optionalList(construct<CompilerDirective::IgnoreTKR>(
+ maybe(parenthesized(many(letter))), name))};
constexpr auto loopCount{
- "DIR$ LOOP COUNT" >> construct<CompilerDirective::LoopCount>(
- parenthesized(nonemptyList(digitString64)))};
-constexpr auto assumeAligned{"DIR$ ASSUME_ALIGNED" >>
+ "LOOP COUNT" >> construct<CompilerDirective::LoopCount>(
+ parenthesized(nonemptyList(digitString64)))};
+constexpr auto assumeAligned{"ASSUME_ALIGNED" >>
optionalList(construct<CompilerDirective::AssumeAligned>(
indirect(designator), ":"_tok >> digitString64))};
-TYPE_PARSER(beginDirective >>
- sourced(construct<CompilerDirective>(ignore_tkr) ||
- construct<CompilerDirective>(loopCount) ||
- construct<CompilerDirective>(assumeAligned) ||
- construct<CompilerDirective>(
- "DIR$" >> many(construct<CompilerDirective::NameValue>(name,
- maybe(("="_tok || ":"_tok) >> digitString64))))) /
- endOfStmt)
+TYPE_PARSER(beginDirective >> "DIR$ "_tok >>
+ sourced((construct<CompilerDirective>(ignore_tkr) ||
+ construct<CompilerDirective>(loopCount) ||
+ construct<CompilerDirective>(assumeAligned) ||
+ construct<CompilerDirective>(
+ many(construct<CompilerDirective::NameValue>(
+ name, maybe(("="_tok || ":"_tok) >> digitString64))))) /
+ endOfStmt ||
+ construct<CompilerDirective>(pure<CompilerDirective::Unrecognized>()) /
+ SkipTo<'\n'>{}))
TYPE_PARSER(extension<LanguageFeature::CrayPointer>(
"nonstandard usage: based POINTER"_port_en_US,
diff --git a/flang/lib/Parser/tools.cpp b/flang/lib/Parser/tools.cpp
index 899fb0f069a9..6e5f1ed2fc66 100644
--- a/flang/lib/Parser/tools.cpp
+++ b/flang/lib/Parser/tools.cpp
@@ -123,6 +123,10 @@ const Name &GetFirstName(const Variable &x) {
x.u);
}
+const Name &GetFirstName(const EntityDecl &x) {
+ return std::get<ObjectName>(x.t);
+}
+
const CoindexedNamedObject *GetCoindexedNamedObject(const DataRef &base) {
return common::visit(
common::visitors{
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index baba4863f577..c06458833f07 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -1827,6 +1827,10 @@ public:
[&](const std::list<CompilerDirective::NameValue> &names) {
Walk("!DIR$ ", names, " ");
},
+ [&](const CompilerDirective::Unrecognized &) {
+ Word("!DIR$ ");
+ Word(x.source.ToString());
+ },
},
x.u);
Put('\n');
diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp
index d625f8c2f7fc..51a16ee155fa 100644
--- a/flang/lib/Semantics/check-call.cpp
+++ b/flang/lib/Semantics/check-call.cpp
@@ -1588,6 +1588,9 @@ static void CheckReduce(
procChars->dummyArguments.size() != 2 || !procChars->functionResult) {
messages.Say(
"OPERATION= argument of REDUCE() must be a pure function of two data arguments"_err_en_US);
+ } else if (procChars->attrs.test(characteristics::Procedure::Attr::BindC)) {
+ messages.Say(
+ "A BIND(C) OPERATION= argument of REDUCE() is not supported"_err_en_US);
} else if (!result || result->Rank() != 0) {
messages.Say(
"OPERATION= argument of REDUCE() must be a scalar function"_err_en_US);
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index 581371ff7a00..dec8fee774c5 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -2346,7 +2346,14 @@ void CheckHelper::CheckProcBinding(
"Intrinsic procedure '%s' is not a specific intrinsic permitted for use in the definition of binding '%s'"_err_en_US,
binding.symbol().name(), symbol.name());
}
- if (const Symbol *overridden{FindOverriddenBinding(symbol)}) {
+ bool isInaccessibleDeferred{false};
+ if (const Symbol *
+ overridden{FindOverriddenBinding(symbol, isInaccessibleDeferred)}) {
+ if (isInaccessibleDeferred) {
+ SayWithDeclaration(*overridden,
+ "Override of PRIVATE DEFERRED '%s' must appear in its module"_err_en_US,
+ symbol.name());
+ }
if (overridden->attrs().test(Attr::NON_OVERRIDABLE)) {
SayWithDeclaration(*overridden,
"Override of NON_OVERRIDABLE '%s' is not permitted"_err_en_US,
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 6d58013b87d2..27af192f606b 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -1649,6 +1649,15 @@ void OmpAttributeVisitor::ResolveSeqLoopIndexInParallelOrTaskConstruct(
break;
}
}
+ // If this symbol already has a data-sharing attribute then there is nothing
+ // to do here.
+ if (const Symbol * symbol{iv.symbol}) {
+ for (auto symMap : targetIt->objectWithDSA) {
+ if (symMap.first->name() == symbol->name()) {
+ return;
+ }
+ }
+ }
// If this symbol is already Private or Firstprivate in the enclosing
// OpenMP parallel or task then there is nothing to do here.
if (auto *symbol{targetIt->scope.FindSymbol(iv.source)}) {
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index f89323f3e54a..2e88a2daff2c 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -955,7 +955,7 @@ public:
void Post(const parser::TargetStmt &) { objectDeclAttr_ = std::nullopt; }
void Post(const parser::DimensionStmt::Declaration &);
void Post(const parser::CodimensionDecl &);
- bool Pre(const parser::TypeDeclarationStmt &) { return BeginDecl(); }
+ bool Pre(const parser::TypeDeclarationStmt &);
void Post(const parser::TypeDeclarationStmt &);
void Post(const parser::IntegerTypeSpec &);
void Post(const parser::IntrinsicTypeSpec::Real &);
@@ -1202,6 +1202,7 @@ private:
bool MustBeScalar(const Symbol &symbol) const {
return mustBeScalar_.find(symbol) != mustBeScalar_.end();
}
+ void DeclareIntrinsic(const parser::Name &);
};
// Resolve construct entities and statement entities.
@@ -4550,6 +4551,20 @@ void DeclarationVisitor::CheckAccessibility(
}
}
+bool DeclarationVisitor::Pre(const parser::TypeDeclarationStmt &x) {
+ BeginDecl();
+ // If INTRINSIC appears as an attr-spec, handle it now as if the
+ // names had appeared on an INTRINSIC attribute statement beforehand.
+ for (const auto &attr : std::get<std::list<parser::AttrSpec>>(x.t)) {
+ if (std::holds_alternative<parser::Intrinsic>(attr.u)) {
+ for (const auto &decl : std::get<std::list<parser::EntityDecl>>(x.t)) {
+ DeclareIntrinsic(parser::GetFirstName(decl));
+ }
+ break;
+ }
+ }
+ return true;
+}
void DeclarationVisitor::Post(const parser::TypeDeclarationStmt &) {
EndDecl();
}
@@ -4571,6 +4586,7 @@ bool DeclarationVisitor::Pre(const parser::Initialization &) {
void DeclarationVisitor::Post(const parser::EntityDecl &x) {
const auto &name{std::get<parser::ObjectName>(x.t)};
Attrs attrs{attrs_ ? HandleSaveName(name.source, *attrs_) : Attrs{}};
+ attrs.set(Attr::INTRINSIC, false); // dealt with in Pre(TypeDeclarationStmt)
Symbol &symbol{DeclareUnknownEntity(name, attrs)};
symbol.ReplaceName(name.source);
SetCUDADataAttr(name.source, symbol, cudaDataAttr());
@@ -4646,23 +4662,23 @@ bool DeclarationVisitor::Pre(const parser::OldParameterStmt &x) {
bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
auto &name{std::get<parser::NamedConstant>(x.t).v};
auto &symbol{HandleAttributeStmt(Attr::PARAMETER, name)};
- if (!ConvertToObjectEntity(symbol) ||
- symbol.test(Symbol::Flag::CrayPointer) ||
+ ConvertToObjectEntity(symbol);
+ auto *details{symbol.detailsIf<ObjectEntityDetails>()};
+ if (!details || symbol.test(Symbol::Flag::CrayPointer) ||
symbol.test(Symbol::Flag::CrayPointee)) {
SayWithDecl(
name, symbol, "PARAMETER attribute not allowed on '%s'"_err_en_US);
return false;
}
const auto &expr{std::get<parser::ConstantExpr>(x.t)};
- auto &details{symbol.get<ObjectEntityDetails>()};
- if (details.init() || symbol.test(Symbol::Flag::InDataStmt)) {
+ if (details->init() || symbol.test(Symbol::Flag::InDataStmt)) {
Say(name, "Named constant '%s' already has a value"_err_en_US);
}
if (inOldStyleParameterStmt_) {
// non-standard extension PARAMETER statement (no parentheses)
Walk(expr);
auto folded{EvaluateExpr(expr)};
- if (details.type()) {
+ if (details->type()) {
SayWithDecl(name, symbol,
"Alternative style PARAMETER '%s' must not already have an explicit type"_err_en_US);
} else if (folded) {
@@ -4674,9 +4690,9 @@ bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
} else if (auto shape{ToArraySpec(
GetFoldingContext(), evaluate::GetShape(*folded))}) {
// The type of the named constant is assumed from the expression.
- details.set_type(*type);
- details.set_init(std::move(*folded));
- details.set_shape(std::move(*shape));
+ details->set_type(*type);
+ details->set_init(std::move(*folded));
+ details->set_shape(std::move(*shape));
} else {
Say(at, "The expression must have constant shape"_err_en_US);
}
@@ -4693,7 +4709,7 @@ bool DeclarationVisitor::Pre(const parser::NamedConstantDef &x) {
Walk(expr);
if (auto converted{EvaluateNonPointerInitializer(
symbol, expr, expr.thing.value().source)}) {
- details.set_init(std::move(*converted));
+ details->set_init(std::move(*converted));
}
}
return false;
@@ -4811,45 +4827,47 @@ bool DeclarationVisitor::Pre(const parser::IntentStmt &x) {
HandleAttributeStmt(IntentSpecToAttr(intentSpec), names);
}
bool DeclarationVisitor::Pre(const parser::IntrinsicStmt &x) {
- HandleAttributeStmt(Attr::INTRINSIC, x.v);
for (const auto &name : x.v) {
- if (!IsIntrinsic(name.source, std::nullopt)) {
- Say(name.source, "'%s' is not a known intrinsic procedure"_err_en_US);
- }
- auto &symbol{DEREF(FindSymbol(name))};
- if (symbol.has<GenericDetails>()) {
- // Generic interface is extending intrinsic; ok
- } else if (!ConvertToProcEntity(symbol)) {
- SayWithDecl(
- name, symbol, "INTRINSIC attribute not allowed on '%s'"_err_en_US);
- } else if (symbol.attrs().test(Attr::EXTERNAL)) { // C840
+ DeclareIntrinsic(name);
+ }
+ return false;
+}
+void DeclarationVisitor::DeclareIntrinsic(const parser::Name &name) {
+ HandleAttributeStmt(Attr::INTRINSIC, name);
+ if (!IsIntrinsic(name.source, std::nullopt)) {
+ Say(name.source, "'%s' is not a known intrinsic procedure"_err_en_US);
+ }
+ auto &symbol{DEREF(FindSymbol(name))};
+ if (symbol.has<GenericDetails>()) {
+ // Generic interface is extending intrinsic; ok
+ } else if (!ConvertToProcEntity(symbol)) {
+ SayWithDecl(
+ name, symbol, "INTRINSIC attribute not allowed on '%s'"_err_en_US);
+ } else if (symbol.attrs().test(Attr::EXTERNAL)) { // C840
+ Say(symbol.name(),
+ "Symbol '%s' cannot have both EXTERNAL and INTRINSIC attributes"_err_en_US,
+ symbol.name());
+ } else {
+ if (symbol.GetType()) {
+ // These warnings are worded so that they should make sense in either
+ // order.
Say(symbol.name(),
- "Symbol '%s' cannot have both EXTERNAL and INTRINSIC attributes"_err_en_US,
- symbol.name());
- } else {
- if (symbol.GetType()) {
- // These warnings are worded so that they should make sense in either
- // order.
- Say(symbol.name(),
- "Explicit type declaration ignored for intrinsic function '%s'"_warn_en_US,
- symbol.name())
- .Attach(name.source,
- "INTRINSIC statement for explicitly-typed '%s'"_en_US,
- name.source);
- }
- if (!symbol.test(Symbol::Flag::Function) &&
- !symbol.test(Symbol::Flag::Subroutine)) {
- if (context().intrinsics().IsIntrinsicFunction(
- name.source.ToString())) {
- symbol.set(Symbol::Flag::Function);
- } else if (context().intrinsics().IsIntrinsicSubroutine(
- name.source.ToString())) {
- symbol.set(Symbol::Flag::Subroutine);
- }
+ "Explicit type declaration ignored for intrinsic function '%s'"_warn_en_US,
+ symbol.name())
+ .Attach(name.source,
+ "INTRINSIC statement for explicitly-typed '%s'"_en_US,
+ name.source);
+ }
+ if (!symbol.test(Symbol::Flag::Function) &&
+ !symbol.test(Symbol::Flag::Subroutine)) {
+ if (context().intrinsics().IsIntrinsicFunction(name.source.ToString())) {
+ symbol.set(Symbol::Flag::Function);
+ } else if (context().intrinsics().IsIntrinsicSubroutine(
+ name.source.ToString())) {
+ symbol.set(Symbol::Flag::Subroutine);
}
}
}
- return false;
}
bool DeclarationVisitor::Pre(const parser::OptionalStmt &x) {
return CheckNotInBlock("OPTIONAL") && // C1107
diff --git a/flang/lib/Semantics/tools.cpp b/flang/lib/Semantics/tools.cpp
index 0484baae93cd..df435906af68 100644
--- a/flang/lib/Semantics/tools.cpp
+++ b/flang/lib/Semantics/tools.cpp
@@ -403,6 +403,18 @@ const Symbol &BypassGeneric(const Symbol &symbol) {
return symbol;
}
+const Symbol &GetCrayPointer(const Symbol &crayPointee) {
+ const Symbol *found{nullptr};
+ for (const auto &[pointee, pointer] :
+ crayPointee.GetUltimate().owner().crayPointers()) {
+ if (pointee == crayPointee.name()) {
+ found = &pointer.get();
+ break;
+ }
+ }
+ return DEREF(found);
+}
+
bool ExprHasTypeCategory(
const SomeExpr &expr, const common::TypeCategory &type) {
auto dynamicType{expr.GetType()};
@@ -516,7 +528,9 @@ const Symbol *FindSubprogram(const Symbol &symbol) {
symbol.details());
}
-const Symbol *FindOverriddenBinding(const Symbol &symbol) {
+const Symbol *FindOverriddenBinding(
+ const Symbol &symbol, bool &isInaccessibleDeferred) {
+ isInaccessibleDeferred = false;
if (symbol.has<ProcBindingDetails>()) {
if (const DeclTypeSpec * parentType{FindParentTypeSpec(symbol.owner())}) {
if (const DerivedTypeSpec * parentDerived{parentType->AsDerived()}) {
@@ -525,8 +539,11 @@ const Symbol *FindOverriddenBinding(const Symbol &symbol) {
overridden{parentScope->FindComponent(symbol.name())}) {
// 7.5.7.3 p1: only accessible bindings are overridden
if (!overridden->attrs().test(Attr::PRIVATE) ||
- (FindModuleContaining(overridden->owner()) ==
- FindModuleContaining(symbol.owner()))) {
+ FindModuleContaining(overridden->owner()) ==
+ FindModuleContaining(symbol.owner())) {
+ return overridden;
+ } else if (overridden->attrs().test(Attr::DEFERRED)) {
+ isInaccessibleDeferred = true;
return overridden;
}
}
@@ -1275,6 +1292,8 @@ static bool StopAtComponentPre(const Symbol &component) {
return !IsPointer(component);
} else if constexpr (componentKind == ComponentKind::PotentialAndPointer) {
return true;
+ } else {
+ DIE("unexpected ComponentKind");
}
}
diff --git a/flang/runtime/CMakeLists.txt b/flang/runtime/CMakeLists.txt
index 7dd60b5edcd5..c0e4cff698e3 100644
--- a/flang/runtime/CMakeLists.txt
+++ b/flang/runtime/CMakeLists.txt
@@ -129,6 +129,7 @@ set(sources
exceptions.cpp
execute.cpp
extensions.cpp
+ external-unit.cpp
extrema.cpp
file.cpp
findloc.cpp
@@ -149,8 +150,10 @@ set(sources
numeric.cpp
pointer.cpp
product.cpp
+ pseudo-unit.cpp
ragged.cpp
random.cpp
+ reduce.cpp
reduction.cpp
stat.cpp
stop.cpp
@@ -171,6 +174,7 @@ set(sources
option(FLANG_EXPERIMENTAL_CUDA_RUNTIME
"Compile Fortran runtime as CUDA sources (experimental)" OFF
)
+set(FLANG_LIBCUDACXX_PATH "" CACHE PATH "Path to libcu++ package installation")
# List of files that are buildable for all devices.
set(supported_files
@@ -178,22 +182,38 @@ set(supported_files
allocatable.cpp
array-constructor.cpp
assign.cpp
+ buffer.cpp
character.cpp
+ connection.cpp
copy.cpp
derived-api.cpp
derived.cpp
descriptor.cpp
+ descriptor-io.cpp
dot-product.cpp
+ edit-input.cpp
+ edit-output.cpp
+ environment.cpp
extrema.cpp
+ external-unit.cpp
findloc.cpp
+ format.cpp
inquiry.cpp
+ internal-unit.cpp
+ io-api.cpp
+ io-error.cpp
+ io-stmt.cpp
+ iostat.cpp
matmul-transpose.cpp
matmul.cpp
memory.cpp
misc-intrinsic.cpp
+ namelist.cpp
+ non-tbp-dio.cpp
numeric.cpp
pointer.cpp
product.cpp
+ pseudo-unit.cpp
ragged.cpp
stat.cpp
sum.cpp
@@ -203,6 +223,8 @@ set(supported_files
transformational.cpp
type-code.cpp
type-info.cpp
+ unit.cpp
+ utf.cpp
)
if (FLANG_EXPERIMENTAL_CUDA_RUNTIME)
@@ -239,6 +261,13 @@ if (FLANG_EXPERIMENTAL_CUDA_RUNTIME)
set_source_files_properties(${supported_files} PROPERTIES COMPILE_OPTIONS
"${CUDA_COMPILE_OPTIONS}"
)
+
+ if (EXISTS "${FLANG_LIBCUDACXX_PATH}/include")
+ # When using libcudacxx headers files, we have to use them
+ # for all files of F18 runtime.
+ include_directories(AFTER ${FLANG_LIBCUDACXX_PATH}/include)
+ add_compile_definitions(RT_USE_LIBCUDACXX=1)
+ endif()
endif()
set(FLANG_EXPERIMENTAL_OMP_OFFLOAD_BUILD "off" CACHE STRING
diff --git a/flang/runtime/buffer.cpp b/flang/runtime/buffer.cpp
index 15c83bfd2492..7b4869d69c2e 100644
--- a/flang/runtime/buffer.cpp
+++ b/flang/runtime/buffer.cpp
@@ -10,14 +10,20 @@
#include <algorithm>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
// Here's a very old trick for shifting circular buffer data cheaply
// without a need for a temporary array.
void LeftShiftBufferCircularly(
char *buffer, std::size_t bytes, std::size_t shift) {
// Assume that we start with "efgabcd" and the left shift is 3.
+ RT_DIAG_PUSH
+ RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
std::reverse(buffer, buffer + shift); // "gfeabcd"
std::reverse(buffer, buffer + bytes); // "dcbaefg"
std::reverse(buffer, buffer + bytes - shift); // "abcdefg"
+ RT_DIAG_POP
}
+
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/buffer.h b/flang/runtime/buffer.h
index 93fda36f500d..ca1baea12efa 100644
--- a/flang/runtime/buffer.h
+++ b/flang/runtime/buffer.h
@@ -11,6 +11,7 @@
#ifndef FORTRAN_RUNTIME_BUFFER_H_
#define FORTRAN_RUNTIME_BUFFER_H_
+#include "freestanding-tools.h"
#include "io-error.h"
#include "flang/Runtime/memory.h"
#include <algorithm>
@@ -19,7 +20,8 @@
namespace Fortran::runtime::io {
-void LeftShiftBufferCircularly(char *, std::size_t bytes, std::size_t shift);
+RT_API_ATTRS void LeftShiftBufferCircularly(
+ char *, std::size_t bytes, std::size_t shift);
// Maintains a view of a contiguous region of a file in a memory buffer.
// The valid data in the buffer may be circular, but any active frame
@@ -48,22 +50,24 @@ template <typename STORE, std::size_t minBuffer = 65536> class FileFrame {
public:
using FileOffset = std::int64_t;
- ~FileFrame() { FreeMemoryAndNullify(buffer_); }
+ RT_API_ATTRS ~FileFrame() { FreeMemoryAndNullify(buffer_); }
// The valid data in the buffer begins at buffer_[start_] and proceeds
// with possible wrap-around for length_ bytes. The current frame
// is offset by frame_ bytes into that region and is guaranteed to
// be contiguous for at least as many bytes as were requested.
- FileOffset FrameAt() const { return fileOffset_ + frame_; }
- char *Frame() const { return buffer_ + start_ + frame_; }
- std::size_t FrameLength() const {
+ RT_API_ATTRS FileOffset FrameAt() const { return fileOffset_ + frame_; }
+ RT_API_ATTRS char *Frame() const { return buffer_ + start_ + frame_; }
+ RT_API_ATTRS std::size_t FrameLength() const {
return std::min<std::size_t>(length_ - frame_, size_ - (start_ + frame_));
}
- std::size_t BytesBufferedBeforeFrame() const { return frame_ - start_; }
+ RT_API_ATTRS std::size_t BytesBufferedBeforeFrame() const {
+ return frame_ - start_;
+ }
// Returns a short frame at a non-fatal EOF. Can return a long frame as well.
- std::size_t ReadFrame(
+ RT_API_ATTRS std::size_t ReadFrame(
FileOffset at, std::size_t bytes, IoErrorHandler &handler) {
Flush(handler);
Reallocate(bytes, handler);
@@ -92,7 +96,8 @@ public:
return FrameLength();
}
- void WriteFrame(FileOffset at, std::size_t bytes, IoErrorHandler &handler) {
+ RT_API_ATTRS void WriteFrame(
+ FileOffset at, std::size_t bytes, IoErrorHandler &handler) {
Reallocate(bytes, handler);
std::int64_t newFrame{at - fileOffset_};
if (!dirty_ || newFrame < 0 || newFrame > length_) {
@@ -110,7 +115,7 @@ public:
length_ = std::max<std::int64_t>(length_, frame_ + bytes);
}
- void Flush(IoErrorHandler &handler, std::int64_t keep = 0) {
+ RT_API_ATTRS void Flush(IoErrorHandler &handler, std::int64_t keep = 0) {
if (dirty_) {
while (length_ > keep) {
std::size_t chunk{
@@ -128,7 +133,7 @@ public:
}
}
- void TruncateFrame(std::int64_t at, IoErrorHandler &handler) {
+ RT_API_ATTRS void TruncateFrame(std::int64_t at, IoErrorHandler &handler) {
RUNTIME_CHECK(handler, !dirty_);
if (at <= fileOffset_) {
Reset(at);
@@ -138,9 +143,10 @@ public:
}
private:
- STORE &Store() { return static_cast<STORE &>(*this); }
+ RT_API_ATTRS STORE &Store() { return static_cast<STORE &>(*this); }
- void Reallocate(std::int64_t bytes, const Terminator &terminator) {
+ RT_API_ATTRS void Reallocate(
+ std::int64_t bytes, const Terminator &terminator) {
if (bytes > size_) {
char *old{buffer_};
auto oldSize{size_};
@@ -160,13 +166,14 @@ private:
}
}
- void Reset(FileOffset at) {
+ RT_API_ATTRS void Reset(FileOffset at) {
start_ = length_ = frame_ = 0;
fileOffset_ = at;
dirty_ = false;
}
- void DiscardLeadingBytes(std::int64_t n, const Terminator &terminator) {
+ RT_API_ATTRS void DiscardLeadingBytes(
+ std::int64_t n, const Terminator &terminator) {
RUNTIME_CHECK(terminator, length_ >= n);
length_ -= n;
if (length_ == 0) {
@@ -185,19 +192,20 @@ private:
fileOffset_ += n;
}
- void MakeDataContiguous(IoErrorHandler &handler, std::size_t bytes) {
+ RT_API_ATTRS void MakeDataContiguous(
+ IoErrorHandler &handler, std::size_t bytes) {
if (static_cast<std::int64_t>(start_ + bytes) > size_) {
// Frame would wrap around; shift current data (if any) to force
// contiguity.
RUNTIME_CHECK(handler, length_ < size_);
if (start_ + length_ <= size_) {
// [......abcde..] -> [abcde........]
- std::memmove(buffer_, buffer_ + start_, length_);
+ runtime::memmove(buffer_, buffer_ + start_, length_);
} else {
// [cde........ab] -> [abcde........]
auto n{start_ + length_ - size_}; // 3 for cde
RUNTIME_CHECK(handler, length_ >= n);
- std::memmove(buffer_ + n, buffer_ + start_, length_ - n); // cdeab
+ runtime::memmove(buffer_ + n, buffer_ + start_, length_ - n); // cdeab
LeftShiftBufferCircularly(buffer_, length_, n); // abcde
}
start_ = 0;
diff --git a/flang/runtime/complex-reduction.c b/flang/runtime/complex-reduction.c
index c91d12539911..7654de8080a1 100644
--- a/flang/runtime/complex-reduction.c
+++ b/flang/runtime/complex-reduction.c
@@ -155,3 +155,25 @@ ADAPT_REDUCTION(DotProductComplex10, long_double_Complex_t,
ADAPT_REDUCTION(DotProductComplex16, CFloat128ComplexType, CppComplexFloat128,
CMPLXF128, DOT_PRODUCT_ARGS, DOT_PRODUCT_ARG_NAMES)
#endif
+
+/* REDUCE() */
+#define RARGS REDUCE_ARGS(float_Complex_t)
+ADAPT_REDUCTION(ReduceComplex4, float_Complex_t, CppComplexFloat, CMPLXF, RARGS,
+ REDUCE_ARG_NAMES)
+#undef RARGS
+#define RARGS REDUCE_ARGS(double_Complex_t)
+ADAPT_REDUCTION(ReduceComplex8, double_Complex_t, CppComplexDouble, CMPLX,
+ RARGS, REDUCE_ARG_NAMES)
+#undef RARGS
+#if LDBL_MANT_DIG == 64
+#define RARGS REDUCE_ARGS(long_double_Complex_t)
+ADAPT_REDUCTION(ReduceComplex10, long_double_Complex_t, CppComplexLongDouble,
+ CMPLXL, RARGS, REDUCE_ARG_NAMES)
+#undef RARGS
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+#define RARGS REDUCE_ARGS(CFloat128ComplexType)
+ADAPT_REDUCTION(ReduceComplex16, CFloat128ComplexType, CppComplexFloat128,
+ CMPLXF128, RARGS, REDUCE_ARG_NAMES)
+#undef RARGS
+#endif
diff --git a/flang/runtime/complex-reduction.h b/flang/runtime/complex-reduction.h
index 1d37b235d519..98b20d1e592b 100644
--- a/flang/runtime/complex-reduction.h
+++ b/flang/runtime/complex-reduction.h
@@ -69,4 +69,49 @@ long_double_Complex_t RTNAME(DotProductComplex10)(DOT_PRODUCT_ARGS);
CFloat128ComplexType RTNAME(DotProductComplex16)(DOT_PRODUCT_ARGS);
#endif
+#define REDUCE_ARGS(T) \
+ T##_op operation, const struct CppDescriptor *x, \
+ const struct CppDescriptor *y, const char *source, int line, \
+ int dim /*=0*/, const struct CppDescriptor *mask /*=NULL*/, \
+ const T *identity /*=NULL*/, _Bool ordered /*=true*/
+#define REDUCE_ARG_NAMES \
+ operation, x, y, source, line, dim, mask, identity, ordered
+
+typedef float_Complex_t (*float_Complex_t_op)(
+ const float_Complex_t *, const float_Complex_t *);
+typedef double_Complex_t (*double_Complex_t_op)(
+ const double_Complex_t *, const double_Complex_t *);
+typedef long_double_Complex_t (*long_double_Complex_t_op)(
+ const long_double_Complex_t *, const long_double_Complex_t *);
+
+float_Complex_t RTNAME(ReduceComplex2)(REDUCE_ARGS(float_Complex_t));
+float_Complex_t RTNAME(ReduceComplex3)(REDUCE_ARGS(float_Complex_t));
+float_Complex_t RTNAME(ReduceComplex4)(REDUCE_ARGS(float_Complex_t));
+double_Complex_t RTNAME(ReduceComplex8)(REDUCE_ARGS(double_Complex_t));
+long_double_Complex_t RTNAME(ReduceComplex10)(
+ REDUCE_ARGS(long_double_Complex_t));
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+typedef CFloat128ComplexType (*CFloat128ComplexType_op)(
+ const CFloat128ComplexType *, const CFloat128ComplexType *);
+CFloat128ComplexType RTNAME(ReduceComplex16)(REDUCE_ARGS(CFloat128ComplexType));
+#endif
+
+#define REDUCE_DIM_ARGS(T) \
+ struct CppDescriptor *result, T##_op operation, \
+ const struct CppDescriptor *x, const struct CppDescriptor *y, \
+ const char *source, int line, int dim, \
+ const struct CppDescriptor *mask /*=NULL*/, const T *identity /*=NULL*/, \
+ _Bool ordered /*=true*/
+#define REDUCE_DIM_ARG_NAMES \
+ result, operation, x, y, source, line, dim, mask, identity, ordered
+
+void RTNAME(ReduceComplex2Dim)(REDUCE_DIM_ARGS(float_Complex_t));
+void RTNAME(ReduceComplex3Dim)(REDUCE_DIM_ARGS(float_Complex_t));
+void RTNAME(ReduceComplex4Dim)(REDUCE_DIM_ARGS(float_Complex_t));
+void RTNAME(ReduceComplex8Dim)(REDUCE_DIM_ARGS(double_Complex_t));
+void RTNAME(ReduceComplex10Dim)(REDUCE_DIM_ARGS(long_double_Complex_t));
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+void RTNAME(ReduceComplex16Dim)(REDUCE_DIM_ARGS(CFloat128ComplexType));
+#endif
+
#endif // FORTRAN_RUNTIME_COMPLEX_REDUCTION_H_
diff --git a/flang/runtime/connection.cpp b/flang/runtime/connection.cpp
index 91ac9a0e14e4..f24f0e832eb4 100644
--- a/flang/runtime/connection.cpp
+++ b/flang/runtime/connection.cpp
@@ -12,30 +12,31 @@
#include <algorithm>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
-std::size_t ConnectionState::RemainingSpaceInRecord() const {
+RT_API_ATTRS std::size_t ConnectionState::RemainingSpaceInRecord() const {
auto recl{recordLength.value_or(openRecl.value_or(
executionEnvironment.listDirectedOutputLineLengthLimit))};
return positionInRecord >= recl ? 0 : recl - positionInRecord;
}
-bool ConnectionState::NeedAdvance(std::size_t width) const {
+RT_API_ATTRS bool ConnectionState::NeedAdvance(std::size_t width) const {
return positionInRecord > 0 && width > RemainingSpaceInRecord();
}
-bool ConnectionState::IsAtEOF() const {
+RT_API_ATTRS bool ConnectionState::IsAtEOF() const {
return endfileRecordNumber && currentRecordNumber >= *endfileRecordNumber;
}
-bool ConnectionState::IsAfterEndfile() const {
+RT_API_ATTRS bool ConnectionState::IsAfterEndfile() const {
return endfileRecordNumber && currentRecordNumber > *endfileRecordNumber;
}
-void ConnectionState::HandleAbsolutePosition(std::int64_t n) {
+RT_API_ATTRS void ConnectionState::HandleAbsolutePosition(std::int64_t n) {
positionInRecord = std::max(n, std::int64_t{0}) + leftTabLimit.value_or(0);
}
-void ConnectionState::HandleRelativePosition(std::int64_t n) {
+RT_API_ATTRS void ConnectionState::HandleRelativePosition(std::int64_t n) {
positionInRecord = std::max(leftTabLimit.value_or(0), positionInRecord + n);
}
@@ -57,4 +58,6 @@ SavedPosition::~SavedPosition() {
conn.pinnedFrame = saved_.pinnedFrame;
}
}
+
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/connection.h b/flang/runtime/connection.h
index c41970d47e7b..6f1ea90a160e 100644
--- a/flang/runtime/connection.h
+++ b/flang/runtime/connection.h
@@ -31,12 +31,12 @@ struct ConnectionAttributes {
unsigned char internalIoCharKind{0}; // 0->external, 1/2/4->internal
Fortran::common::optional<std::int64_t> openRecl; // RECL= on OPEN
- bool IsRecordFile() const {
+ RT_API_ATTRS bool IsRecordFile() const {
// Formatted stream files are viewed as having records, at least on input
return access != Access::Stream || !isUnformatted.value_or(true);
}
- template <typename CHAR = char> constexpr bool useUTF8() const {
+ template <typename CHAR = char> constexpr RT_API_ATTRS bool useUTF8() const {
// For wide CHARACTER kinds, always use UTF-8 for formatted I/O.
// For single-byte CHARACTER, encode characters >= 0x80 with
// UTF-8 iff the mode is set.
@@ -45,25 +45,28 @@ struct ConnectionAttributes {
};
struct ConnectionState : public ConnectionAttributes {
- bool IsAtEOF() const; // true when read has hit EOF or endfile record
- bool IsAfterEndfile() const; // true after ENDFILE until repositioned
+ RT_API_ATTRS bool
+ IsAtEOF() const; // true when read has hit EOF or endfile record
+ RT_API_ATTRS bool
+ IsAfterEndfile() const; // true after ENDFILE until repositioned
// All positions and measurements are always in units of bytes,
// not characters. Multi-byte character encodings are possible in
// both internal I/O (when the character kind of the variable is 2 or 4)
// and external formatted I/O (when the encoding is UTF-8).
- std::size_t RemainingSpaceInRecord() const;
- bool NeedAdvance(std::size_t) const;
- void HandleAbsolutePosition(std::int64_t);
- void HandleRelativePosition(std::int64_t);
+ RT_API_ATTRS std::size_t RemainingSpaceInRecord() const;
+ RT_API_ATTRS bool NeedAdvance(std::size_t) const;
+ RT_API_ATTRS void HandleAbsolutePosition(std::int64_t);
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t);
- void BeginRecord() {
+ RT_API_ATTRS void BeginRecord() {
positionInRecord = 0;
furthestPositionInRecord = 0;
unterminatedRecord = false;
}
- Fortran::common::optional<std::int64_t> EffectiveRecordLength() const {
+ RT_API_ATTRS Fortran::common::optional<std::int64_t>
+ EffectiveRecordLength() const {
// When an input record is longer than an explicit RECL= from OPEN
// it is effectively truncated on input.
return openRecl && recordLength && *openRecl < *recordLength ? openRecl
@@ -110,9 +113,9 @@ struct ConnectionState : public ConnectionAttributes {
// Utility class for capturing and restoring a position in an input stream.
class SavedPosition {
public:
- explicit SavedPosition(IoStatementState &);
- ~SavedPosition();
- void Cancel() { cancelled_ = true; }
+ explicit RT_API_ATTRS SavedPosition(IoStatementState &);
+ RT_API_ATTRS ~SavedPosition();
+ RT_API_ATTRS void Cancel() { cancelled_ = true; }
private:
IoStatementState &io_;
diff --git a/flang/runtime/derived.h b/flang/runtime/derived.h
index e43ecc34a31d..b4863df8db41 100644
--- a/flang/runtime/derived.h
+++ b/flang/runtime/derived.h
@@ -11,7 +11,7 @@
#ifndef FORTRAN_RUNTIME_DERIVED_H_
#define FORTRAN_RUNTIME_DERIVED_H_
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
namespace Fortran::runtime::typeInfo {
class DerivedType;
diff --git a/flang/runtime/descriptor-io.cpp b/flang/runtime/descriptor-io.cpp
index 7c7323b719ad..93df51cf22d3 100644
--- a/flang/runtime/descriptor-io.cpp
+++ b/flang/runtime/descriptor-io.cpp
@@ -7,9 +7,11 @@
//===----------------------------------------------------------------------===//
#include "descriptor-io.h"
+#include "freestanding-tools.h"
#include "flang/Common/restorer.h"
namespace Fortran::runtime::io::descr {
+RT_OFFLOAD_API_GROUP_BEGIN
// Defined formatted I/O (maybe)
Fortran::common::optional<bool> DefinedFormattedIo(IoStatementState &io,
@@ -32,9 +34,9 @@ Fortran::common::optional<bool> DefinedFormattedIo(IoStatementState &io,
ioType[1] = 'T';
std::memcpy(ioType + 2, edit.ioType, edit.ioTypeChars);
} else {
- std::strcpy(
+ runtime::strcpy(
ioType, io.mutableModes().inNamelist ? "NAMELIST" : "LISTDIRECTED");
- ioTypeLen = std::strlen(ioType);
+ ioTypeLen = runtime::strlen(ioType);
}
StaticDescriptor<1, true> vListStatDesc;
Descriptor &vListDesc{vListStatDesc.descriptor()};
@@ -150,4 +152,5 @@ bool DefinedUnformattedIo(IoStatementState &io, const Descriptor &descriptor,
return handler.GetIoStat() == IostatOk;
}
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io::descr
diff --git a/flang/runtime/descriptor-io.h b/flang/runtime/descriptor-io.h
index b6b0fefcff87..7063858d6191 100644
--- a/flang/runtime/descriptor-io.h
+++ b/flang/runtime/descriptor-io.h
@@ -28,8 +28,8 @@
namespace Fortran::runtime::io::descr {
template <typename A>
-inline A &ExtractElement(IoStatementState &io, const Descriptor &descriptor,
- const SubscriptValue subscripts[]) {
+inline RT_API_ATTRS A &ExtractElement(IoStatementState &io,
+ const Descriptor &descriptor, const SubscriptValue subscripts[]) {
A *p{descriptor.Element<A>(subscripts)};
if (!p) {
io.GetIoErrorHandler().Crash("Bad address for I/O item -- null base "
@@ -45,7 +45,7 @@ inline A &ExtractElement(IoStatementState &io, const Descriptor &descriptor,
// NAMELIST array output.
template <int KIND, Direction DIR>
-inline bool FormattedIntegerIO(
+inline RT_API_ATTRS bool FormattedIntegerIO(
IoStatementState &io, const Descriptor &descriptor) {
std::size_t numElements{descriptor.Elements()};
SubscriptValue subscripts[maxRank];
@@ -78,7 +78,7 @@ inline bool FormattedIntegerIO(
}
template <int KIND, Direction DIR>
-inline bool FormattedRealIO(
+inline RT_API_ATTRS bool FormattedRealIO(
IoStatementState &io, const Descriptor &descriptor) {
std::size_t numElements{descriptor.Elements()};
SubscriptValue subscripts[maxRank];
@@ -111,7 +111,7 @@ inline bool FormattedRealIO(
}
template <int KIND, Direction DIR>
-inline bool FormattedComplexIO(
+inline RT_API_ATTRS bool FormattedComplexIO(
IoStatementState &io, const Descriptor &descriptor) {
std::size_t numElements{descriptor.Elements()};
SubscriptValue subscripts[maxRank];
@@ -159,7 +159,7 @@ inline bool FormattedComplexIO(
}
template <typename A, Direction DIR>
-inline bool FormattedCharacterIO(
+inline RT_API_ATTRS bool FormattedCharacterIO(
IoStatementState &io, const Descriptor &descriptor) {
std::size_t numElements{descriptor.Elements()};
SubscriptValue subscripts[maxRank];
@@ -199,7 +199,7 @@ inline bool FormattedCharacterIO(
}
template <int KIND, Direction DIR>
-inline bool FormattedLogicalIO(
+inline RT_API_ATTRS bool FormattedLogicalIO(
IoStatementState &io, const Descriptor &descriptor) {
std::size_t numElements{descriptor.Elements()};
SubscriptValue subscripts[maxRank];
@@ -241,12 +241,12 @@ inline bool FormattedLogicalIO(
}
template <Direction DIR>
-static bool DescriptorIO(IoStatementState &, const Descriptor &,
+static RT_API_ATTRS bool DescriptorIO(IoStatementState &, const Descriptor &,
const NonTbpDefinedIoTable * = nullptr);
// For intrinsic (not defined) derived type I/O, formatted & unformatted
template <Direction DIR>
-static bool DefaultComponentIO(IoStatementState &io,
+static RT_API_ATTRS bool DefaultComponentIO(IoStatementState &io,
const typeInfo::Component &component, const Descriptor &origDescriptor,
const SubscriptValue origSubscripts[], Terminator &terminator,
const NonTbpDefinedIoTable *table) {
@@ -269,7 +269,7 @@ static bool DefaultComponentIO(IoStatementState &io,
}
template <Direction DIR>
-static bool DefaultComponentwiseFormattedIO(IoStatementState &io,
+static RT_API_ATTRS bool DefaultComponentwiseFormattedIO(IoStatementState &io,
const Descriptor &descriptor, const typeInfo::DerivedType &type,
const NonTbpDefinedIoTable *table, const SubscriptValue subscripts[]) {
IoErrorHandler &handler{io.GetIoErrorHandler()};
@@ -295,7 +295,7 @@ static bool DefaultComponentwiseFormattedIO(IoStatementState &io,
}
template <Direction DIR>
-static bool DefaultComponentwiseUnformattedIO(IoStatementState &io,
+static RT_API_ATTRS bool DefaultComponentwiseUnformattedIO(IoStatementState &io,
const Descriptor &descriptor, const typeInfo::DerivedType &type,
const NonTbpDefinedIoTable *table) {
IoErrorHandler &handler{io.GetIoErrorHandler()};
@@ -322,12 +322,12 @@ static bool DefaultComponentwiseUnformattedIO(IoStatementState &io,
return true;
}
-Fortran::common::optional<bool> DefinedFormattedIo(IoStatementState &,
- const Descriptor &, const typeInfo::DerivedType &,
+RT_API_ATTRS Fortran::common::optional<bool> DefinedFormattedIo(
+ IoStatementState &, const Descriptor &, const typeInfo::DerivedType &,
const typeInfo::SpecialBinding &, const SubscriptValue[]);
template <Direction DIR>
-static bool FormattedDerivedTypeIO(IoStatementState &io,
+static RT_API_ATTRS bool FormattedDerivedTypeIO(IoStatementState &io,
const Descriptor &descriptor, const NonTbpDefinedIoTable *table) {
IoErrorHandler &handler{io.GetIoErrorHandler()};
// Derived type information must be present for formatted I/O.
@@ -385,12 +385,12 @@ static bool FormattedDerivedTypeIO(IoStatementState &io,
return true;
}
-bool DefinedUnformattedIo(IoStatementState &, const Descriptor &,
+RT_API_ATTRS bool DefinedUnformattedIo(IoStatementState &, const Descriptor &,
const typeInfo::DerivedType &, const typeInfo::SpecialBinding &);
// Unformatted I/O
template <Direction DIR>
-static bool UnformattedDescriptorIO(IoStatementState &io,
+static RT_API_ATTRS bool UnformattedDescriptorIO(IoStatementState &io,
const Descriptor &descriptor, const NonTbpDefinedIoTable *table = nullptr) {
IoErrorHandler &handler{io.GetIoErrorHandler()};
const DescriptorAddendum *addendum{descriptor.Addendum()};
@@ -488,8 +488,8 @@ static bool UnformattedDescriptorIO(IoStatementState &io,
}
template <Direction DIR>
-static bool DescriptorIO(IoStatementState &io, const Descriptor &descriptor,
- const NonTbpDefinedIoTable *table) {
+static RT_API_ATTRS bool DescriptorIO(IoStatementState &io,
+ const Descriptor &descriptor, const NonTbpDefinedIoTable *table) {
IoErrorHandler &handler{io.GetIoErrorHandler()};
if (handler.InError()) {
return false;
diff --git a/flang/runtime/edit-input.cpp b/flang/runtime/edit-input.cpp
index fbeb1a595b32..935b7c299b25 100644
--- a/flang/runtime/edit-input.cpp
+++ b/flang/runtime/edit-input.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "edit-input.h"
+#include "freestanding-tools.h"
#include "namelist.h"
#include "utf.h"
#include "flang/Common/optional.h"
@@ -16,17 +17,19 @@
#include <cfenv>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
// Checks that a list-directed input value has been entirely consumed and
// doesn't contain unparsed characters before the next value separator.
-static inline bool IsCharValueSeparator(const DataEdit &edit, char32_t ch) {
+static inline RT_API_ATTRS bool IsCharValueSeparator(
+ const DataEdit &edit, char32_t ch) {
char32_t comma{
edit.modes.editingFlags & decimalComma ? char32_t{';'} : char32_t{','}};
return ch == ' ' || ch == '\t' || ch == comma || ch == '/' ||
(edit.IsNamelist() && (ch == '&' || ch == '$'));
}
-static bool CheckCompleteListDirectedField(
+static RT_API_ATTRS bool CheckCompleteListDirectedField(
IoStatementState &io, const DataEdit &edit) {
if (edit.IsListDirected()) {
std::size_t byteCount;
@@ -52,7 +55,7 @@ static bool CheckCompleteListDirectedField(
}
template <int LOG2_BASE>
-static bool EditBOZInput(
+static RT_API_ATTRS bool EditBOZInput(
IoStatementState &io, const DataEdit &edit, void *n, std::size_t bytes) {
// Skip leading white space & zeroes
Fortran::common::optional<int> remaining{io.CueUpInput(edit)};
@@ -151,13 +154,13 @@ static bool EditBOZInput(
return CheckCompleteListDirectedField(io, edit);
}
-static inline char32_t GetRadixPointChar(const DataEdit &edit) {
+static inline RT_API_ATTRS char32_t GetRadixPointChar(const DataEdit &edit) {
return edit.modes.editingFlags & decimalComma ? char32_t{','} : char32_t{'.'};
}
// Prepares input from a field, and returns the sign, if any, else '\0'.
-static char ScanNumericPrefix(IoStatementState &io, const DataEdit &edit,
- Fortran::common::optional<char32_t> &next,
+static RT_API_ATTRS char ScanNumericPrefix(IoStatementState &io,
+ const DataEdit &edit, Fortran::common::optional<char32_t> &next,
Fortran::common::optional<int> &remaining) {
remaining = io.CueUpInput(edit);
next = io.NextInField(remaining, edit);
@@ -174,7 +177,7 @@ static char ScanNumericPrefix(IoStatementState &io, const DataEdit &edit,
return sign;
}
-bool EditIntegerInput(
+RT_API_ATTRS bool EditIntegerInput(
IoStatementState &io, const DataEdit &edit, void *n, int kind) {
RUNTIME_CHECK(io.GetIoErrorHandler(), kind >= 1 && !(kind & (kind - 1)));
switch (edit.descriptor) {
@@ -279,18 +282,20 @@ struct ScannedRealInput {
int exponent{0}; // adjusted as necessary; binary if isHexadecimal
bool isHexadecimal{false}; // 0X...
};
-static ScannedRealInput ScanRealInput(
+static RT_API_ATTRS ScannedRealInput ScanRealInput(
char *buffer, int bufferSize, IoStatementState &io, const DataEdit &edit) {
Fortran::common::optional<int> remaining;
Fortran::common::optional<char32_t> next;
int got{0};
Fortran::common::optional<int> radixPointOffset;
- auto Put{[&](char ch) -> void {
+ // The following lambda definition violates the conding style,
+ // but cuda-11.8 nvcc hits an internal error with the brace initialization.
+ auto Put = [&](char ch) -> void {
if (got < bufferSize) {
buffer[got] = ch;
}
++got;
- }};
+ };
char sign{ScanNumericPrefix(io, edit, next, remaining)};
if (sign == '-') {
Put('-');
@@ -487,13 +492,21 @@ static ScannedRealInput ScanRealInput(
return {got, exponent, isHexadecimal};
}
-static void RaiseFPExceptions(decimal::ConversionResultFlags flags) {
+static RT_API_ATTRS void RaiseFPExceptions(
+ decimal::ConversionResultFlags flags) {
#undef RAISE
+#if defined(RT_DEVICE_COMPILATION)
+ Terminator terminator(__FILE__, __LINE__);
+#define RAISE(e) \
+ terminator.Crash( \
+ "not implemented yet: raising FP exception in device code: %s", #e);
+#else // !defined(RT_DEVICE_COMPILATION)
#ifdef feraisexcept // a macro in some environments; omit std::
#define RAISE feraiseexcept
#else
#define RAISE std::feraiseexcept
#endif
+#endif // !defined(RT_DEVICE_COMPILATION)
if (flags & decimal::ConversionResultFlags::Overflow) {
RAISE(FE_OVERFLOW);
}
@@ -514,7 +527,7 @@ static void RaiseFPExceptions(decimal::ConversionResultFlags flags) {
// converter without modification, this fast path for real input
// saves time by avoiding memory copies and reformatting of the exponent.
template <int PRECISION>
-static bool TryFastPathRealDecimalInput(
+static RT_API_ATTRS bool TryFastPathRealDecimalInput(
IoStatementState &io, const DataEdit &edit, void *n) {
if (edit.modes.editingFlags & (blankZero | decimalComma)) {
return false;
@@ -586,7 +599,8 @@ static bool TryFastPathRealDecimalInput(
}
template <int binaryPrecision>
-decimal::ConversionToBinaryResult<binaryPrecision> ConvertHexadecimal(
+RT_API_ATTRS decimal::ConversionToBinaryResult<binaryPrecision>
+ConvertHexadecimal(
const char *&p, enum decimal::FortranRounding rounding, int expo) {
using RealType = decimal::BinaryFloatingPointNumber<binaryPrecision>;
using RawType = typename RealType::RawType;
@@ -702,7 +716,8 @@ decimal::ConversionToBinaryResult<binaryPrecision> ConvertHexadecimal(
}
template <int KIND>
-bool EditCommonRealInput(IoStatementState &io, const DataEdit &edit, void *n) {
+RT_API_ATTRS bool EditCommonRealInput(
+ IoStatementState &io, const DataEdit &edit, void *n) {
constexpr int binaryPrecision{common::PrecisionOfRealKind(KIND)};
if (TryFastPathRealDecimalInput<binaryPrecision>(io, edit, n)) {
return CheckCompleteListDirectedField(io, edit);
@@ -798,7 +813,8 @@ bool EditCommonRealInput(IoStatementState &io, const DataEdit &edit, void *n) {
}
template <int KIND>
-bool EditRealInput(IoStatementState &io, const DataEdit &edit, void *n) {
+RT_API_ATTRS bool EditRealInput(
+ IoStatementState &io, const DataEdit &edit, void *n) {
switch (edit.descriptor) {
case DataEdit::ListDirected:
if (IsNamelistNameOrSlash(io)) {
@@ -832,7 +848,8 @@ bool EditRealInput(IoStatementState &io, const DataEdit &edit, void *n) {
}
// 13.7.3 in Fortran 2018
-bool EditLogicalInput(IoStatementState &io, const DataEdit &edit, bool &x) {
+RT_API_ATTRS bool EditLogicalInput(
+ IoStatementState &io, const DataEdit &edit, bool &x) {
switch (edit.descriptor) {
case DataEdit::ListDirected:
if (IsNamelistNameOrSlash(io)) {
@@ -882,7 +899,7 @@ bool EditLogicalInput(IoStatementState &io, const DataEdit &edit, bool &x) {
// See 13.10.3.1 paragraphs 7-9 in Fortran 2018
template <typename CHAR>
-static bool EditDelimitedCharacterInput(
+static RT_API_ATTRS bool EditDelimitedCharacterInput(
IoStatementState &io, CHAR *x, std::size_t length, char32_t delimiter) {
bool result{true};
while (true) {
@@ -911,12 +928,12 @@ static bool EditDelimitedCharacterInput(
--length;
}
}
- std::fill_n(x, length, ' ');
+ Fortran::runtime::fill_n(x, length, ' ');
return result;
}
template <typename CHAR>
-static bool EditListDirectedCharacterInput(
+static RT_API_ATTRS bool EditListDirectedCharacterInput(
IoStatementState &io, CHAR *x, std::size_t length, const DataEdit &edit) {
std::size_t byteCount{0};
auto ch{io.GetCurrentChar(byteCount)};
@@ -961,13 +978,13 @@ static bool EditListDirectedCharacterInput(
remaining = --length > 0 ? maxUTF8Bytes : 0;
}
}
- std::fill_n(x, length, ' ');
+ Fortran::runtime::fill_n(x, length, ' ');
return true;
}
template <typename CHAR>
-bool EditCharacterInput(IoStatementState &io, const DataEdit &edit, CHAR *x,
- std::size_t lengthChars) {
+RT_API_ATTRS bool EditCharacterInput(IoStatementState &io, const DataEdit &edit,
+ CHAR *x, std::size_t lengthChars) {
switch (edit.descriptor) {
case DataEdit::ListDirected:
return EditListDirectedCharacterInput(io, x, lengthChars, edit);
@@ -1011,7 +1028,7 @@ bool EditCharacterInput(IoStatementState &io, const DataEdit &edit, CHAR *x,
if (io.CheckForEndOfRecord(readyBytes)) {
if (readyBytes == 0) {
// PAD='YES' and no more data
- std::fill_n(x, lengthChars, ' ');
+ Fortran::runtime::fill_n(x, lengthChars, ' ');
return !io.GetIoErrorHandler().InError();
} else {
// Do partial read(s) then pad on last iteration
@@ -1088,23 +1105,30 @@ bool EditCharacterInput(IoStatementState &io, const DataEdit &edit, CHAR *x,
readyBytes -= chunkBytes;
}
// Pad the remainder of the input variable, if any.
- std::fill_n(x, lengthChars, ' ');
+ Fortran::runtime::fill_n(x, lengthChars, ' ');
return CheckCompleteListDirectedField(io, edit);
}
-template bool EditRealInput<2>(IoStatementState &, const DataEdit &, void *);
-template bool EditRealInput<3>(IoStatementState &, const DataEdit &, void *);
-template bool EditRealInput<4>(IoStatementState &, const DataEdit &, void *);
-template bool EditRealInput<8>(IoStatementState &, const DataEdit &, void *);
-template bool EditRealInput<10>(IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<2>(
+ IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<3>(
+ IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<4>(
+ IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<8>(
+ IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<10>(
+ IoStatementState &, const DataEdit &, void *);
// TODO: double/double
-template bool EditRealInput<16>(IoStatementState &, const DataEdit &, void *);
+template RT_API_ATTRS bool EditRealInput<16>(
+ IoStatementState &, const DataEdit &, void *);
-template bool EditCharacterInput(
+template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char *, std::size_t);
-template bool EditCharacterInput(
+template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char16_t *, std::size_t);
-template bool EditCharacterInput(
+template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char32_t *, std::size_t);
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/edit-input.h b/flang/runtime/edit-input.h
index 61844a1199a7..a90180b8ee2e 100644
--- a/flang/runtime/edit-input.h
+++ b/flang/runtime/edit-input.h
@@ -15,36 +15,38 @@
namespace Fortran::runtime::io {
-bool EditIntegerInput(IoStatementState &, const DataEdit &, void *, int kind);
+RT_API_ATTRS bool EditIntegerInput(
+ IoStatementState &, const DataEdit &, void *, int kind);
template <int KIND>
-bool EditRealInput(IoStatementState &, const DataEdit &, void *);
+RT_API_ATTRS bool EditRealInput(IoStatementState &, const DataEdit &, void *);
-bool EditLogicalInput(IoStatementState &, const DataEdit &, bool &);
+RT_API_ATTRS bool EditLogicalInput(
+ IoStatementState &, const DataEdit &, bool &);
template <typename CHAR>
-bool EditCharacterInput(
+RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, CHAR *, std::size_t);
-extern template bool EditRealInput<2>(
+extern template RT_API_ATTRS bool EditRealInput<2>(
IoStatementState &, const DataEdit &, void *);
-extern template bool EditRealInput<3>(
+extern template RT_API_ATTRS bool EditRealInput<3>(
IoStatementState &, const DataEdit &, void *);
-extern template bool EditRealInput<4>(
+extern template RT_API_ATTRS bool EditRealInput<4>(
IoStatementState &, const DataEdit &, void *);
-extern template bool EditRealInput<8>(
+extern template RT_API_ATTRS bool EditRealInput<8>(
IoStatementState &, const DataEdit &, void *);
-extern template bool EditRealInput<10>(
+extern template RT_API_ATTRS bool EditRealInput<10>(
IoStatementState &, const DataEdit &, void *);
// TODO: double/double
-extern template bool EditRealInput<16>(
+extern template RT_API_ATTRS bool EditRealInput<16>(
IoStatementState &, const DataEdit &, void *);
-extern template bool EditCharacterInput(
+extern template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char *, std::size_t);
-extern template bool EditCharacterInput(
+extern template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char16_t *, std::size_t);
-extern template bool EditCharacterInput(
+extern template RT_API_ATTRS bool EditCharacterInput(
IoStatementState &, const DataEdit &, char32_t *, std::size_t);
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/edit-output.cpp b/flang/runtime/edit-output.cpp
index 7267540370fc..b710c298babe 100644
--- a/flang/runtime/edit-output.cpp
+++ b/flang/runtime/edit-output.cpp
@@ -14,9 +14,10 @@
#include <algorithm>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
// In output statement, add a space between numbers and characters.
-static void addSpaceBeforeCharacter(IoStatementState &io) {
+static RT_API_ATTRS void addSpaceBeforeCharacter(IoStatementState &io) {
if (auto *list{io.get_if<ListDirectedStatementState<Direction::Output>>()}) {
list->set_lastWasUndelimitedCharacter(false);
}
@@ -26,8 +27,8 @@ static void addSpaceBeforeCharacter(IoStatementState &io) {
// representation of what is interpreted to be a single unsigned integer value.
// When used with character data, endianness is exposed.
template <int LOG2_BASE>
-static bool EditBOZOutput(IoStatementState &io, const DataEdit &edit,
- const unsigned char *data0, std::size_t bytes) {
+static RT_API_ATTRS bool EditBOZOutput(IoStatementState &io,
+ const DataEdit &edit, const unsigned char *data0, std::size_t bytes) {
addSpaceBeforeCharacter(io);
int digits{static_cast<int>((bytes * 8) / LOG2_BASE)};
int get{static_cast<int>(bytes * 8) - digits * LOG2_BASE};
@@ -107,7 +108,7 @@ static bool EditBOZOutput(IoStatementState &io, const DataEdit &edit,
}
template <int KIND>
-bool EditIntegerOutput(IoStatementState &io, const DataEdit &edit,
+bool RT_API_ATTRS EditIntegerOutput(IoStatementState &io, const DataEdit &edit,
common::HostSignedIntType<8 * KIND> n) {
addSpaceBeforeCharacter(io);
char buffer[130], *end{&buffer[sizeof buffer]}, *p{end};
@@ -187,7 +188,7 @@ bool EditIntegerOutput(IoStatementState &io, const DataEdit &edit,
}
// Formats the exponent (see table 13.1 for all the cases)
-const char *RealOutputEditingBase::FormatExponent(
+RT_API_ATTRS const char *RealOutputEditingBase::FormatExponent(
int expo, const DataEdit &edit, int &length) {
char *eEnd{&exponent_[sizeof exponent_]};
char *exponent{eEnd};
@@ -226,7 +227,7 @@ const char *RealOutputEditingBase::FormatExponent(
return overflow ? nullptr : exponent;
}
-bool RealOutputEditingBase::EmitPrefix(
+RT_API_ATTRS bool RealOutputEditingBase::EmitPrefix(
const DataEdit &edit, std::size_t length, std::size_t width) {
if (edit.IsListDirected()) {
int prefixLength{edit.descriptor == DataEdit::ListDirectedRealPart ? 2
@@ -247,7 +248,7 @@ bool RealOutputEditingBase::EmitPrefix(
}
}
-bool RealOutputEditingBase::EmitSuffix(const DataEdit &edit) {
+RT_API_ATTRS bool RealOutputEditingBase::EmitSuffix(const DataEdit &edit) {
if (edit.descriptor == DataEdit::ListDirectedRealPart) {
return EmitAscii(
io_, edit.modes.editingFlags & decimalComma ? ";" : ",", 1);
@@ -259,8 +260,10 @@ bool RealOutputEditingBase::EmitSuffix(const DataEdit &edit) {
}
template <int KIND>
-decimal::ConversionToDecimalResult RealOutputEditing<KIND>::ConvertToDecimal(
+RT_API_ATTRS decimal::ConversionToDecimalResult
+RealOutputEditing<KIND>::ConvertToDecimal(
int significantDigits, enum decimal::FortranRounding rounding, int flags) {
+#if !defined(RT_DEVICE_COMPILATION)
auto converted{decimal::ConvertToDecimal<binaryPrecision>(buffer_,
sizeof buffer_, static_cast<enum decimal::DecimalConversionFlags>(flags),
significantDigits, rounding, x_)};
@@ -270,9 +273,13 @@ decimal::ConversionToDecimalResult RealOutputEditing<KIND>::ConvertToDecimal(
sizeof buffer_);
}
return converted;
+#else // defined(RT_DEVICE_COMPILATION)
+ // TODO: enable Decimal library build for the device.
+ io_.GetIoErrorHandler().Crash("not implemented yet: decimal conversion");
+#endif // defined(RT_DEVICE_COMPILATION)
}
-static bool IsInfOrNaN(const char *p, int length) {
+static RT_API_ATTRS bool IsInfOrNaN(const char *p, int length) {
if (!p || length < 1) {
return false;
}
@@ -287,7 +294,8 @@ static bool IsInfOrNaN(const char *p, int length) {
// 13.7.2.3.3 in F'2018
template <int KIND>
-bool RealOutputEditing<KIND>::EditEorDOutput(const DataEdit &edit) {
+RT_API_ATTRS bool RealOutputEditing<KIND>::EditEorDOutput(
+ const DataEdit &edit) {
addSpaceBeforeCharacter(io_);
int editDigits{edit.digits.value_or(0)}; // 'd' field
int editWidth{edit.width.value_or(0)}; // 'w' field
@@ -423,7 +431,7 @@ bool RealOutputEditing<KIND>::EditEorDOutput(const DataEdit &edit) {
// 13.7.2.3.2 in F'2018
template <int KIND>
-bool RealOutputEditing<KIND>::EditFOutput(const DataEdit &edit) {
+RT_API_ATTRS bool RealOutputEditing<KIND>::EditFOutput(const DataEdit &edit) {
addSpaceBeforeCharacter(io_);
int fracDigits{edit.digits.value_or(0)}; // 'd' field
const int editWidth{edit.width.value_or(0)}; // 'w' field
@@ -553,12 +561,12 @@ bool RealOutputEditing<KIND>::EditFOutput(const DataEdit &edit) {
// 13.7.5.2.3 in F'2018
template <int KIND>
-DataEdit RealOutputEditing<KIND>::EditForGOutput(DataEdit edit) {
+RT_API_ATTRS DataEdit RealOutputEditing<KIND>::EditForGOutput(DataEdit edit) {
edit.descriptor = 'E';
edit.variation = 'G'; // to suppress error for Ew.0
int editWidth{edit.width.value_or(0)};
- int significantDigits{
- edit.digits.value_or(BinaryFloatingPoint::decimalPrecision)}; // 'd'
+ int significantDigits{edit.digits.value_or(
+ static_cast<int>(BinaryFloatingPoint::decimalPrecision))}; // 'd'
if (editWidth > 0 && significantDigits == 0) {
return edit; // Gw.0Ee -> Ew.0Ee for w > 0
}
@@ -597,7 +605,8 @@ DataEdit RealOutputEditing<KIND>::EditForGOutput(DataEdit edit) {
// 13.10.4 in F'2018
template <int KIND>
-bool RealOutputEditing<KIND>::EditListDirectedOutput(const DataEdit &edit) {
+RT_API_ATTRS bool RealOutputEditing<KIND>::EditListDirectedOutput(
+ const DataEdit &edit) {
decimal::ConversionToDecimalResult converted{
ConvertToDecimal(1, edit.modes.round)};
if (IsInfOrNaN(converted.str, static_cast<int>(converted.length))) {
@@ -631,9 +640,9 @@ bool RealOutputEditing<KIND>::EditListDirectedOutput(const DataEdit &edit) {
// E.g., 2. is edited into 0X8.0P-2 rather than 0X2.0P0. This implementation
// follows that precedent so as to avoid a gratuitous incompatibility.
template <int KIND>
-auto RealOutputEditing<KIND>::ConvertToHexadecimal(
- int significantDigits, enum decimal::FortranRounding rounding, int flags)
- -> ConvertToHexadecimalResult {
+RT_API_ATTRS auto RealOutputEditing<KIND>::ConvertToHexadecimal(
+ int significantDigits, enum decimal::FortranRounding rounding,
+ int flags) -> ConvertToHexadecimalResult {
if (x_.IsNaN() || x_.IsInfinite()) {
auto converted{ConvertToDecimal(significantDigits, rounding, flags)};
return {converted.str, static_cast<int>(converted.length), 0};
@@ -689,7 +698,7 @@ auto RealOutputEditing<KIND>::ConvertToHexadecimal(
}
template <int KIND>
-bool RealOutputEditing<KIND>::EditEXOutput(const DataEdit &edit) {
+RT_API_ATTRS bool RealOutputEditing<KIND>::EditEXOutput(const DataEdit &edit) {
addSpaceBeforeCharacter(io_);
int editDigits{edit.digits.value_or(0)}; // 'd' field
int significantDigits{editDigits + 1};
@@ -740,7 +749,8 @@ bool RealOutputEditing<KIND>::EditEXOutput(const DataEdit &edit) {
EmitAscii(io_, exponent, expoLength);
}
-template <int KIND> bool RealOutputEditing<KIND>::Edit(const DataEdit &edit) {
+template <int KIND>
+RT_API_ATTRS bool RealOutputEditing<KIND>::Edit(const DataEdit &edit) {
switch (edit.descriptor) {
case 'D':
return EditEorDOutput(edit);
@@ -783,13 +793,14 @@ template <int KIND> bool RealOutputEditing<KIND>::Edit(const DataEdit &edit) {
return false;
}
-bool ListDirectedLogicalOutput(IoStatementState &io,
+RT_API_ATTRS bool ListDirectedLogicalOutput(IoStatementState &io,
ListDirectedStatementState<Direction::Output> &list, bool truth) {
return list.EmitLeadingSpaceOrAdvance(io) &&
EmitAscii(io, truth ? "T" : "F", 1);
}
-bool EditLogicalOutput(IoStatementState &io, const DataEdit &edit, bool truth) {
+RT_API_ATTRS bool EditLogicalOutput(
+ IoStatementState &io, const DataEdit &edit, bool truth) {
switch (edit.descriptor) {
case 'L':
case 'G':
@@ -813,7 +824,7 @@ bool EditLogicalOutput(IoStatementState &io, const DataEdit &edit, bool truth) {
}
template <typename CHAR>
-bool ListDirectedCharacterOutput(IoStatementState &io,
+RT_API_ATTRS bool ListDirectedCharacterOutput(IoStatementState &io,
ListDirectedStatementState<Direction::Output> &list, const CHAR *x,
std::size_t length) {
bool ok{true};
@@ -870,8 +881,8 @@ bool ListDirectedCharacterOutput(IoStatementState &io,
}
template <typename CHAR>
-bool EditCharacterOutput(IoStatementState &io, const DataEdit &edit,
- const CHAR *x, std::size_t length) {
+RT_API_ATTRS bool EditCharacterOutput(IoStatementState &io,
+ const DataEdit &edit, const CHAR *x, std::size_t length) {
int len{static_cast<int>(length)};
int width{edit.width.value_or(len)};
switch (edit.descriptor) {
@@ -903,15 +914,15 @@ bool EditCharacterOutput(IoStatementState &io, const DataEdit &edit,
EmitEncoded(io, x, std::min(width, len));
}
-template bool EditIntegerOutput<1>(
+template RT_API_ATTRS bool EditIntegerOutput<1>(
IoStatementState &, const DataEdit &, std::int8_t);
-template bool EditIntegerOutput<2>(
+template RT_API_ATTRS bool EditIntegerOutput<2>(
IoStatementState &, const DataEdit &, std::int16_t);
-template bool EditIntegerOutput<4>(
+template RT_API_ATTRS bool EditIntegerOutput<4>(
IoStatementState &, const DataEdit &, std::int32_t);
-template bool EditIntegerOutput<8>(
+template RT_API_ATTRS bool EditIntegerOutput<8>(
IoStatementState &, const DataEdit &, std::int64_t);
-template bool EditIntegerOutput<16>(
+template RT_API_ATTRS bool EditIntegerOutput<16>(
IoStatementState &, const DataEdit &, common::int128_t);
template class RealOutputEditing<2>;
@@ -922,21 +933,22 @@ template class RealOutputEditing<10>;
// TODO: double/double
template class RealOutputEditing<16>;
-template bool ListDirectedCharacterOutput(IoStatementState &,
+template RT_API_ATTRS bool ListDirectedCharacterOutput(IoStatementState &,
ListDirectedStatementState<Direction::Output> &, const char *,
std::size_t chars);
-template bool ListDirectedCharacterOutput(IoStatementState &,
+template RT_API_ATTRS bool ListDirectedCharacterOutput(IoStatementState &,
ListDirectedStatementState<Direction::Output> &, const char16_t *,
std::size_t chars);
-template bool ListDirectedCharacterOutput(IoStatementState &,
+template RT_API_ATTRS bool ListDirectedCharacterOutput(IoStatementState &,
ListDirectedStatementState<Direction::Output> &, const char32_t *,
std::size_t chars);
-template bool EditCharacterOutput(
+template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char *, std::size_t chars);
-template bool EditCharacterOutput(
+template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char16_t *, std::size_t chars);
-template bool EditCharacterOutput(
+template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char32_t *, std::size_t chars);
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/edit-output.h b/flang/runtime/edit-output.h
index 4e6d6b25b4dd..365bc2e2a4d1 100644
--- a/flang/runtime/edit-output.h
+++ b/flang/runtime/edit-output.h
@@ -30,18 +30,20 @@ namespace Fortran::runtime::io {
// one edit descriptor with a repeat factor may safely serve to edit
// multiple elements of an array.
template <int KIND>
-bool EditIntegerOutput(
+RT_API_ATTRS bool EditIntegerOutput(
IoStatementState &, const DataEdit &, common::HostSignedIntType<8 * KIND>);
// Encapsulates the state of a REAL output conversion.
class RealOutputEditingBase {
protected:
- explicit RealOutputEditingBase(IoStatementState &io) : io_{io} {}
+ explicit RT_API_ATTRS RealOutputEditingBase(IoStatementState &io) : io_{io} {}
// Returns null when the exponent overflows a fixed-size output field.
- const char *FormatExponent(int, const DataEdit &edit, int &length);
- bool EmitPrefix(const DataEdit &, std::size_t length, std::size_t width);
- bool EmitSuffix(const DataEdit &);
+ RT_API_ATTRS const char *FormatExponent(
+ int, const DataEdit &edit, int &length);
+ RT_API_ATTRS bool EmitPrefix(
+ const DataEdit &, std::size_t length, std::size_t width);
+ RT_API_ATTRS bool EmitSuffix(const DataEdit &);
IoStatementState &io_;
int trailingBlanks_{0}; // created when Gw editing maps to Fw
@@ -50,27 +52,29 @@ protected:
template <int KIND> class RealOutputEditing : public RealOutputEditingBase {
public:
+ RT_VAR_GROUP_BEGIN
static constexpr int binaryPrecision{common::PrecisionOfRealKind(KIND)};
+ RT_VAR_GROUP_END
using BinaryFloatingPoint =
decimal::BinaryFloatingPointNumber<binaryPrecision>;
template <typename A>
- RealOutputEditing(IoStatementState &io, A x)
+ RT_API_ATTRS RealOutputEditing(IoStatementState &io, A x)
: RealOutputEditingBase{io}, x_{x} {}
- bool Edit(const DataEdit &);
+ RT_API_ATTRS bool Edit(const DataEdit &);
private:
// The DataEdit arguments here are const references or copies so that
// the original DataEdit can safely serve multiple array elements when
// it has a repeat count.
- bool EditEorDOutput(const DataEdit &);
- bool EditFOutput(const DataEdit &);
- DataEdit EditForGOutput(DataEdit); // returns an E or F edit
- bool EditEXOutput(const DataEdit &);
- bool EditListDirectedOutput(const DataEdit &);
+ RT_API_ATTRS bool EditEorDOutput(const DataEdit &);
+ RT_API_ATTRS bool EditFOutput(const DataEdit &);
+ RT_API_ATTRS DataEdit EditForGOutput(DataEdit); // returns an E or F edit
+ RT_API_ATTRS bool EditEXOutput(const DataEdit &);
+ RT_API_ATTRS bool EditListDirectedOutput(const DataEdit &);
- bool IsZero() const { return x_.IsZero(); }
+ RT_API_ATTRS bool IsZero() const { return x_.IsZero(); }
- decimal::ConversionToDecimalResult ConvertToDecimal(
+ RT_API_ATTRS decimal::ConversionToDecimalResult ConvertToDecimal(
int significantDigits, enum decimal::FortranRounding, int flags = 0);
struct ConvertToHexadecimalResult {
@@ -78,7 +82,7 @@ private:
int length;
int exponent;
};
- ConvertToHexadecimalResult ConvertToHexadecimal(
+ RT_API_ATTRS ConvertToHexadecimalResult ConvertToHexadecimal(
int significantDigits, enum decimal::FortranRounding, int flags = 0);
BinaryFloatingPoint x_;
@@ -86,43 +90,43 @@ private:
EXTRA_DECIMAL_CONVERSION_SPACE];
};
-bool ListDirectedLogicalOutput(
+RT_API_ATTRS bool ListDirectedLogicalOutput(
IoStatementState &, ListDirectedStatementState<Direction::Output> &, bool);
-bool EditLogicalOutput(IoStatementState &, const DataEdit &, bool);
+RT_API_ATTRS bool EditLogicalOutput(IoStatementState &, const DataEdit &, bool);
template <typename CHAR>
-bool ListDirectedCharacterOutput(IoStatementState &,
+RT_API_ATTRS bool ListDirectedCharacterOutput(IoStatementState &,
ListDirectedStatementState<Direction::Output> &, const CHAR *,
std::size_t chars);
-extern template bool ListDirectedCharacterOutput(IoStatementState &,
- ListDirectedStatementState<Direction::Output> &, const char *,
- std::size_t chars);
-extern template bool ListDirectedCharacterOutput(IoStatementState &,
- ListDirectedStatementState<Direction::Output> &, const char16_t *,
- std::size_t chars);
-extern template bool ListDirectedCharacterOutput(IoStatementState &,
- ListDirectedStatementState<Direction::Output> &, const char32_t *,
- std::size_t chars);
+extern template RT_API_ATTRS bool ListDirectedCharacterOutput(
+ IoStatementState &, ListDirectedStatementState<Direction::Output> &,
+ const char *, std::size_t chars);
+extern template RT_API_ATTRS bool ListDirectedCharacterOutput(
+ IoStatementState &, ListDirectedStatementState<Direction::Output> &,
+ const char16_t *, std::size_t chars);
+extern template RT_API_ATTRS bool ListDirectedCharacterOutput(
+ IoStatementState &, ListDirectedStatementState<Direction::Output> &,
+ const char32_t *, std::size_t chars);
template <typename CHAR>
-bool EditCharacterOutput(
+RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const CHAR *, std::size_t chars);
-extern template bool EditCharacterOutput(
+extern template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char *, std::size_t chars);
-extern template bool EditCharacterOutput(
+extern template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char16_t *, std::size_t chars);
-extern template bool EditCharacterOutput(
+extern template RT_API_ATTRS bool EditCharacterOutput(
IoStatementState &, const DataEdit &, const char32_t *, std::size_t chars);
-extern template bool EditIntegerOutput<1>(
+extern template RT_API_ATTRS bool EditIntegerOutput<1>(
IoStatementState &, const DataEdit &, std::int8_t);
-extern template bool EditIntegerOutput<2>(
+extern template RT_API_ATTRS bool EditIntegerOutput<2>(
IoStatementState &, const DataEdit &, std::int16_t);
-extern template bool EditIntegerOutput<4>(
+extern template RT_API_ATTRS bool EditIntegerOutput<4>(
IoStatementState &, const DataEdit &, std::int32_t);
-extern template bool EditIntegerOutput<8>(
+extern template RT_API_ATTRS bool EditIntegerOutput<8>(
IoStatementState &, const DataEdit &, std::int64_t);
-extern template bool EditIntegerOutput<16>(
+extern template RT_API_ATTRS bool EditIntegerOutput<16>(
IoStatementState &, const DataEdit &, common::int128_t);
extern template class RealOutputEditing<2>;
diff --git a/flang/runtime/emit-encoded.h b/flang/runtime/emit-encoded.h
index 864848c3b19c..ac8c7d758a0d 100644
--- a/flang/runtime/emit-encoded.h
+++ b/flang/runtime/emit-encoded.h
@@ -19,7 +19,8 @@
namespace Fortran::runtime::io {
template <typename CONTEXT, typename CHAR>
-bool EmitEncoded(CONTEXT &to, const CHAR *data, std::size_t chars) {
+RT_API_ATTRS bool EmitEncoded(
+ CONTEXT &to, const CHAR *data, std::size_t chars) {
ConnectionState &connection{to.GetConnectionState()};
if (connection.access == Access::Stream &&
connection.internalIoCharKind == 0) {
@@ -74,7 +75,7 @@ bool EmitEncoded(CONTEXT &to, const CHAR *data, std::size_t chars) {
}
template <typename CONTEXT>
-bool EmitAscii(CONTEXT &to, const char *data, std::size_t chars) {
+RT_API_ATTRS bool EmitAscii(CONTEXT &to, const char *data, std::size_t chars) {
ConnectionState &connection{to.GetConnectionState()};
if (connection.internalIoCharKind <= 1 &&
connection.access != Access::Stream) {
@@ -85,7 +86,7 @@ bool EmitAscii(CONTEXT &to, const char *data, std::size_t chars) {
}
template <typename CONTEXT>
-bool EmitRepeated(CONTEXT &to, char ch, std::size_t n) {
+RT_API_ATTRS bool EmitRepeated(CONTEXT &to, char ch, std::size_t n) {
if (n <= 0) {
return true;
}
diff --git a/flang/runtime/environment.h b/flang/runtime/environment.h
index 9bc115850961..6c56993fb1d6 100644
--- a/flang/runtime/environment.h
+++ b/flang/runtime/environment.h
@@ -18,6 +18,7 @@ namespace Fortran::runtime {
class Terminator;
+RT_OFFLOAD_VAR_GROUP_BEGIN
#if FLANG_BIG_ENDIAN
constexpr bool isHostLittleEndian{false};
#elif FLANG_LITTLE_ENDIAN
@@ -25,6 +26,7 @@ constexpr bool isHostLittleEndian{true};
#else
#error host endianness is not known
#endif
+RT_OFFLOAD_VAR_GROUP_END
// External unformatted I/O data conversions
enum class Convert { Unknown, Native, LittleEndian, BigEndian, Swap };
diff --git a/flang/runtime/external-unit.cpp b/flang/runtime/external-unit.cpp
new file mode 100644
index 000000000000..b48549d54587
--- /dev/null
+++ b/flang/runtime/external-unit.cpp
@@ -0,0 +1,336 @@
+//===-- runtime/external-unit.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implemenation of ExternalFileUnit for RT_USE_PSEUDO_FILE_UNIT=0.
+//
+//===----------------------------------------------------------------------===//
+
+#include "io-error.h"
+#include "lock.h"
+#include "tools.h"
+#include "unit-map.h"
+#include "unit.h"
+
+// NOTE: the header files above may define OpenMP declare target
+// variables, so they have to be included unconditionally
+// so that the offload entries are consistent between host and device.
+#if !defined(RT_USE_PSEUDO_FILE_UNIT)
+
+#include <cstdio>
+#include <limits>
+
+namespace Fortran::runtime::io {
+
+// The per-unit data structures are created on demand so that Fortran I/O
+// should work without a Fortran main program.
+static Lock unitMapLock;
+static Lock createOpenLock;
+static UnitMap *unitMap{nullptr};
+
+void FlushOutputOnCrash(const Terminator &terminator) {
+ if (!defaultOutput && !errorOutput) {
+ return;
+ }
+ IoErrorHandler handler{terminator};
+ handler.HasIoStat(); // prevent nested crash if flush has error
+ CriticalSection critical{unitMapLock};
+ if (defaultOutput) {
+ defaultOutput->FlushOutput(handler);
+ }
+ if (errorOutput) {
+ errorOutput->FlushOutput(handler);
+ }
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUp(int unit) {
+ return GetUnitMap().LookUp(unit);
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpOrCreate(
+ int unit, const Terminator &terminator, bool &wasExtant) {
+ return GetUnitMap().LookUpOrCreate(unit, terminator, wasExtant);
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpOrCreateAnonymous(int unit,
+ Direction dir, Fortran::common::optional<bool> isUnformatted,
+ const Terminator &terminator) {
+ // Make sure that the returned anonymous unit has been opened
+ // not just created in the unitMap.
+ CriticalSection critical{createOpenLock};
+ bool exists{false};
+ ExternalFileUnit *result{
+ GetUnitMap().LookUpOrCreate(unit, terminator, exists)};
+ if (result && !exists) {
+ IoErrorHandler handler{terminator};
+ result->OpenAnonymousUnit(
+ dir == Direction::Input ? OpenStatus::Unknown : OpenStatus::Replace,
+ Action::ReadWrite, Position::Rewind, Convert::Unknown, handler);
+ result->isUnformatted = isUnformatted;
+ }
+ return result;
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUp(
+ const char *path, std::size_t pathLen) {
+ return GetUnitMap().LookUp(path, pathLen);
+}
+
+ExternalFileUnit &ExternalFileUnit::CreateNew(
+ int unit, const Terminator &terminator) {
+ bool wasExtant{false};
+ ExternalFileUnit *result{
+ GetUnitMap().LookUpOrCreate(unit, terminator, wasExtant)};
+ RUNTIME_CHECK(terminator, result && !wasExtant);
+ return *result;
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpForClose(int unit) {
+ return GetUnitMap().LookUpForClose(unit);
+}
+
+ExternalFileUnit &ExternalFileUnit::NewUnit(
+ const Terminator &terminator, bool forChildIo) {
+ ExternalFileUnit &unit{GetUnitMap().NewUnit(terminator)};
+ unit.createdForInternalChildIo_ = forChildIo;
+ return unit;
+}
+
+bool ExternalFileUnit::OpenUnit(Fortran::common::optional<OpenStatus> status,
+ Fortran::common::optional<Action> action, Position position,
+ OwningPtr<char> &&newPath, std::size_t newPathLength, Convert convert,
+ IoErrorHandler &handler) {
+ if (convert == Convert::Unknown) {
+ convert = executionEnvironment.conversion;
+ }
+ swapEndianness_ = convert == Convert::Swap ||
+ (convert == Convert::LittleEndian && !isHostLittleEndian) ||
+ (convert == Convert::BigEndian && isHostLittleEndian);
+ bool impliedClose{false};
+ if (IsConnected()) {
+ bool isSamePath{newPath.get() && path() && pathLength() == newPathLength &&
+ std::memcmp(path(), newPath.get(), newPathLength) == 0};
+ if (status && *status != OpenStatus::Old && isSamePath) {
+ handler.SignalError("OPEN statement for connected unit may not have "
+ "explicit STATUS= other than 'OLD'");
+ return impliedClose;
+ }
+ if (!newPath.get() || isSamePath) {
+ // OPEN of existing unit, STATUS='OLD' or unspecified, not new FILE=
+ newPath.reset();
+ return impliedClose;
+ }
+ // Otherwise, OPEN on open unit with new FILE= implies CLOSE
+ DoImpliedEndfile(handler);
+ FlushOutput(handler);
+ TruncateFrame(0, handler);
+ Close(CloseStatus::Keep, handler);
+ impliedClose = true;
+ }
+ if (newPath.get() && newPathLength > 0) {
+ if (const auto *already{
+ GetUnitMap().LookUp(newPath.get(), newPathLength)}) {
+ handler.SignalError(IostatOpenAlreadyConnected,
+ "OPEN(UNIT=%d,FILE='%.*s'): file is already connected to unit %d",
+ unitNumber_, static_cast<int>(newPathLength), newPath.get(),
+ already->unitNumber_);
+ return impliedClose;
+ }
+ }
+ set_path(std::move(newPath), newPathLength);
+ Open(status.value_or(OpenStatus::Unknown), action, position, handler);
+ auto totalBytes{knownSize()};
+ if (access == Access::Direct) {
+ if (!openRecl) {
+ handler.SignalError(IostatOpenBadRecl,
+ "OPEN(UNIT=%d,ACCESS='DIRECT'): record length is not known",
+ unitNumber());
+ } else if (*openRecl <= 0) {
+ handler.SignalError(IostatOpenBadRecl,
+ "OPEN(UNIT=%d,ACCESS='DIRECT',RECL=%jd): record length is invalid",
+ unitNumber(), static_cast<std::intmax_t>(*openRecl));
+ } else if (totalBytes && (*totalBytes % *openRecl != 0)) {
+ handler.SignalError(IostatOpenBadRecl,
+ "OPEN(UNIT=%d,ACCESS='DIRECT',RECL=%jd): record length is not an "
+ "even divisor of the file size %jd",
+ unitNumber(), static_cast<std::intmax_t>(*openRecl),
+ static_cast<std::intmax_t>(*totalBytes));
+ }
+ recordLength = openRecl;
+ }
+ endfileRecordNumber.reset();
+ currentRecordNumber = 1;
+ if (totalBytes && access == Access::Direct && openRecl.value_or(0) > 0) {
+ endfileRecordNumber = 1 + (*totalBytes / *openRecl);
+ }
+ if (position == Position::Append) {
+ if (totalBytes) {
+ frameOffsetInFile_ = *totalBytes;
+ }
+ if (access != Access::Stream) {
+ if (!endfileRecordNumber) {
+ // Fake it so that we can backspace relative from the end
+ endfileRecordNumber = std::numeric_limits<std::int64_t>::max() - 2;
+ }
+ currentRecordNumber = *endfileRecordNumber;
+ }
+ }
+ return impliedClose;
+}
+
+void ExternalFileUnit::OpenAnonymousUnit(
+ Fortran::common::optional<OpenStatus> status,
+ Fortran::common::optional<Action> action, Position position,
+ Convert convert, IoErrorHandler &handler) {
+ // I/O to an unconnected unit reads/creates a local file, e.g. fort.7
+ std::size_t pathMaxLen{32};
+ auto path{SizedNew<char>{handler}(pathMaxLen)};
+ std::snprintf(path.get(), pathMaxLen, "fort.%d", unitNumber_);
+ OpenUnit(status, action, position, std::move(path), std::strlen(path.get()),
+ convert, handler);
+}
+
+void ExternalFileUnit::CloseUnit(CloseStatus status, IoErrorHandler &handler) {
+ DoImpliedEndfile(handler);
+ FlushOutput(handler);
+ Close(status, handler);
+}
+
+void ExternalFileUnit::DestroyClosed() {
+ GetUnitMap().DestroyClosed(*this); // destroys *this
+}
+
+Iostat ExternalFileUnit::SetDirection(Direction direction) {
+ if (direction == Direction::Input) {
+ if (mayRead()) {
+ direction_ = Direction::Input;
+ return IostatOk;
+ } else {
+ return IostatReadFromWriteOnly;
+ }
+ } else {
+ if (mayWrite()) {
+ direction_ = Direction::Output;
+ return IostatOk;
+ } else {
+ return IostatWriteToReadOnly;
+ }
+ }
+}
+
+UnitMap &ExternalFileUnit::CreateUnitMap() {
+ Terminator terminator{__FILE__, __LINE__};
+ IoErrorHandler handler{terminator};
+ UnitMap &newUnitMap{*New<UnitMap>{terminator}().release()};
+
+ bool wasExtant{false};
+ ExternalFileUnit &out{*newUnitMap.LookUpOrCreate(
+ FORTRAN_DEFAULT_OUTPUT_UNIT, terminator, wasExtant)};
+ RUNTIME_CHECK(terminator, !wasExtant);
+ out.Predefine(1);
+ handler.SignalError(out.SetDirection(Direction::Output));
+ out.isUnformatted = false;
+ defaultOutput = &out;
+
+ ExternalFileUnit &in{*newUnitMap.LookUpOrCreate(
+ FORTRAN_DEFAULT_INPUT_UNIT, terminator, wasExtant)};
+ RUNTIME_CHECK(terminator, !wasExtant);
+ in.Predefine(0);
+ handler.SignalError(in.SetDirection(Direction::Input));
+ in.isUnformatted = false;
+ defaultInput = &in;
+
+ ExternalFileUnit &error{
+ *newUnitMap.LookUpOrCreate(FORTRAN_ERROR_UNIT, terminator, wasExtant)};
+ RUNTIME_CHECK(terminator, !wasExtant);
+ error.Predefine(2);
+ handler.SignalError(error.SetDirection(Direction::Output));
+ error.isUnformatted = false;
+ errorOutput = &error;
+
+ return newUnitMap;
+}
+
+// A back-up atexit() handler for programs that don't terminate with a main
+// program END or a STOP statement or other Fortran-initiated program shutdown,
+// such as programs with a C main() that terminate normally. It flushes all
+// external I/O units. It is registered once the first time that any external
+// I/O is attempted.
+static void CloseAllExternalUnits() {
+ IoErrorHandler handler{"Fortran program termination"};
+ ExternalFileUnit::CloseAll(handler);
+}
+
+UnitMap &ExternalFileUnit::GetUnitMap() {
+ if (unitMap) {
+ return *unitMap;
+ }
+ {
+ CriticalSection critical{unitMapLock};
+ if (unitMap) {
+ return *unitMap;
+ }
+ unitMap = &CreateUnitMap();
+ }
+ std::atexit(CloseAllExternalUnits);
+ return *unitMap;
+}
+
+void ExternalFileUnit::CloseAll(IoErrorHandler &handler) {
+ CriticalSection critical{unitMapLock};
+ if (unitMap) {
+ unitMap->CloseAll(handler);
+ FreeMemoryAndNullify(unitMap);
+ }
+ defaultOutput = nullptr;
+ defaultInput = nullptr;
+ errorOutput = nullptr;
+}
+
+void ExternalFileUnit::FlushAll(IoErrorHandler &handler) {
+ CriticalSection critical{unitMapLock};
+ if (unitMap) {
+ unitMap->FlushAll(handler);
+ }
+}
+
+int ExternalFileUnit::GetAsynchronousId(IoErrorHandler &handler) {
+ if (!mayAsynchronous()) {
+ handler.SignalError(IostatBadAsynchronous);
+ return -1;
+ } else {
+ for (int j{0}; 64 * j < maxAsyncIds; ++j) {
+ if (auto least{asyncIdAvailable_[j].LeastElement()}) {
+ asyncIdAvailable_[j].reset(*least);
+ return 64 * j + static_cast<int>(*least);
+ }
+ }
+ handler.SignalError(IostatTooManyAsyncOps);
+ return -1;
+ }
+}
+
+bool ExternalFileUnit::Wait(int id) {
+ if (static_cast<std::size_t>(id) >= maxAsyncIds ||
+ asyncIdAvailable_[id / 64].test(id % 64)) {
+ return false;
+ } else {
+ if (id == 0) { // means "all IDs"
+ for (int j{0}; 64 * j < maxAsyncIds; ++j) {
+ asyncIdAvailable_[j].set();
+ }
+ asyncIdAvailable_[0].reset(0);
+ } else {
+ asyncIdAvailable_[id / 64].set(id % 64);
+ }
+ return true;
+ }
+}
+
+} // namespace Fortran::runtime::io
+
+#endif // !defined(RT_USE_PSEUDO_FILE_UNIT)
diff --git a/flang/runtime/file.cpp b/flang/runtime/file.cpp
index 6ca5776f812a..67764f1f5626 100644
--- a/flang/runtime/file.cpp
+++ b/flang/runtime/file.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "file.h"
+#include "tools.h"
#include "flang/Runtime/magic-numbers.h"
#include "flang/Runtime/memory.h"
#include <algorithm>
@@ -424,6 +425,7 @@ void OpenFile::CloseFd(IoErrorHandler &handler) {
}
}
+#if !defined(RT_DEVICE_COMPILATION)
bool IsATerminal(int fd) { return ::isatty(fd); }
#if defined(_WIN32) && !defined(F_OK)
@@ -455,5 +457,25 @@ std::int64_t SizeInBytes(const char *path) {
// No Fortran compiler signals an error
return -1;
}
+#else // defined(RT_DEVICE_COMPILATION)
+bool IsATerminal(int fd) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+bool IsExtant(const char *path) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+bool MayRead(const char *path) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+bool MayWrite(const char *path) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+bool MayReadAndWrite(const char *path) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+std::int64_t SizeInBytes(const char *path) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+#endif // defined(RT_DEVICE_COMPILATION)
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/file.h b/flang/runtime/file.h
index 17deeb0e2f82..c06acbb9904c 100644
--- a/flang/runtime/file.h
+++ b/flang/runtime/file.h
@@ -106,11 +106,11 @@ private:
OwningPtr<Pending> pending_;
};
-bool IsATerminal(int fd);
-bool IsExtant(const char *path);
-bool MayRead(const char *path);
-bool MayWrite(const char *path);
-bool MayReadAndWrite(const char *path);
-std::int64_t SizeInBytes(const char *path);
+RT_API_ATTRS bool IsATerminal(int fd);
+RT_API_ATTRS bool IsExtant(const char *path);
+RT_API_ATTRS bool MayRead(const char *path);
+RT_API_ATTRS bool MayWrite(const char *path);
+RT_API_ATTRS bool MayReadAndWrite(const char *path);
+RT_API_ATTRS std::int64_t SizeInBytes(const char *path);
} // namespace Fortran::runtime::io
#endif // FORTRAN_RUNTIME_FILE_H_
diff --git a/flang/runtime/format-implementation.h b/flang/runtime/format-implementation.h
index b84e3208271b..45d4bd641f6f 100644
--- a/flang/runtime/format-implementation.h
+++ b/flang/runtime/format-implementation.h
@@ -25,7 +25,7 @@
namespace Fortran::runtime::io {
template <typename CONTEXT>
-FormatControl<CONTEXT>::FormatControl(const Terminator &terminator,
+RT_API_ATTRS FormatControl<CONTEXT>::FormatControl(const Terminator &terminator,
const CharType *format, std::size_t formatLength,
const Descriptor *formatDescriptor, int maxHeight)
: maxHeight_{static_cast<std::uint8_t>(maxHeight)}, format_{format},
@@ -63,7 +63,7 @@ FormatControl<CONTEXT>::FormatControl(const Terminator &terminator,
}
template <typename CONTEXT>
-int FormatControl<CONTEXT>::GetIntField(
+RT_API_ATTRS int FormatControl<CONTEXT>::GetIntField(
IoErrorHandler &handler, CharType firstCh, bool *hadError) {
CharType ch{firstCh ? firstCh : PeekNext()};
bool negate{ch == '-'};
@@ -114,7 +114,8 @@ int FormatControl<CONTEXT>::GetIntField(
}
template <typename CONTEXT>
-static void HandleControl(CONTEXT &context, char ch, char next, int n) {
+static RT_API_ATTRS void HandleControl(
+ CONTEXT &context, char ch, char next, int n) {
MutableModes &modes{context.mutableModes()};
switch (ch) {
case 'B':
@@ -221,7 +222,8 @@ static void HandleControl(CONTEXT &context, char ch, char next, int n) {
// Generally assumes that the format string has survived the common
// format validator gauntlet.
template <typename CONTEXT>
-int FormatControl<CONTEXT>::CueUpNextDataEdit(Context &context, bool stop) {
+RT_API_ATTRS int FormatControl<CONTEXT>::CueUpNextDataEdit(
+ Context &context, bool stop) {
bool hitUnlimitedLoopEnd{false};
// Do repetitions remain on an unparenthesized data edit?
while (height_ > 1 && format_[stack_[height_ - 1].start] != '(') {
@@ -419,8 +421,8 @@ int FormatControl<CONTEXT>::CueUpNextDataEdit(Context &context, bool stop) {
// Returns the next data edit descriptor
template <typename CONTEXT>
-Fortran::common::optional<DataEdit> FormatControl<CONTEXT>::GetNextDataEdit(
- Context &context, int maxRepeat) {
+RT_API_ATTRS Fortran::common::optional<DataEdit>
+FormatControl<CONTEXT>::GetNextDataEdit(Context &context, int maxRepeat) {
int repeat{CueUpNextDataEdit(context)};
auto start{offset_};
DataEdit edit;
@@ -524,7 +526,7 @@ Fortran::common::optional<DataEdit> FormatControl<CONTEXT>::GetNextDataEdit(
}
template <typename CONTEXT>
-void FormatControl<CONTEXT>::Finish(Context &context) {
+RT_API_ATTRS void FormatControl<CONTEXT>::Finish(Context &context) {
CueUpNextDataEdit(context, true /* stop at colon or end of FORMAT */);
if (freeFormat_) {
FreeMemory(const_cast<CharType *>(format_));
diff --git a/flang/runtime/format.cpp b/flang/runtime/format.cpp
index f219c29aaed1..433acce4b737 100644
--- a/flang/runtime/format.cpp
+++ b/flang/runtime/format.cpp
@@ -9,6 +9,7 @@
#include "format-implementation.h"
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
template class FormatControl<
InternalFormattedIoStatementState<Direction::Output>>;
template class FormatControl<
@@ -19,4 +20,5 @@ template class FormatControl<
ExternalFormattedIoStatementState<Direction::Input>>;
template class FormatControl<ChildFormattedIoStatementState<Direction::Output>>;
template class FormatControl<ChildFormattedIoStatementState<Direction::Input>>;
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/format.h b/flang/runtime/format.h
index e7d945599640..f57cf9204487 100644
--- a/flang/runtime/format.h
+++ b/flang/runtime/format.h
@@ -12,6 +12,7 @@
#define FORTRAN_RUNTIME_FORMAT_H_
#include "environment.h"
+#include "freestanding-tools.h"
#include "io-error.h"
#include "flang/Common/Fortran.h"
#include "flang/Common/optional.h"
@@ -49,20 +50,21 @@ struct DataEdit {
char descriptor; // capitalized: one of A, I, B, O, Z, F, E(N/S/X), D, G
// Special internal data edit descriptors for list-directed & NAMELIST I/O
+ RT_OFFLOAD_VAR_GROUP_BEGIN
static constexpr char ListDirected{'g'}; // non-COMPLEX list-directed
static constexpr char ListDirectedRealPart{'r'}; // emit "(r," or "(r;"
static constexpr char ListDirectedImaginaryPart{'z'}; // emit "z)"
static constexpr char ListDirectedNullValue{'n'}; // see 13.10.3.2
- constexpr bool IsListDirected() const {
+ static constexpr char DefinedDerivedType{'d'}; // DT defined I/O
+ RT_OFFLOAD_VAR_GROUP_END
+ constexpr RT_API_ATTRS bool IsListDirected() const {
return descriptor == ListDirected || descriptor == ListDirectedRealPart ||
descriptor == ListDirectedImaginaryPart;
}
- constexpr bool IsNamelist() const {
+ constexpr RT_API_ATTRS bool IsNamelist() const {
return IsListDirected() && modes.inNamelist;
}
- static constexpr char DefinedDerivedType{'d'}; // DT defined I/O
-
char variation{'\0'}; // N, S, or X for EN, ES, EX; G/l for original G/list
Fortran::common::optional<int> width; // the 'w' field; optional for A
Fortran::common::optional<int> digits; // the 'm' or 'd' field
@@ -72,8 +74,10 @@ struct DataEdit {
// "iotype" &/or "v_list" values for a DT'iotype'(v_list)
// defined I/O data edit descriptor
+ RT_OFFLOAD_VAR_GROUP_BEGIN
static constexpr std::size_t maxIoTypeChars{32};
static constexpr std::size_t maxVListEntries{4};
+ RT_OFFLOAD_VAR_GROUP_END
std::uint8_t ioTypeChars{0};
std::uint8_t vListEntries{0};
char ioType[maxIoTypeChars];
@@ -88,13 +92,13 @@ public:
using Context = CONTEXT;
using CharType = char; // formats are always default kind CHARACTER
- FormatControl() {}
- FormatControl(const Terminator &, const CharType *format,
+ RT_API_ATTRS FormatControl() {}
+ RT_API_ATTRS FormatControl(const Terminator &, const CharType *format,
std::size_t formatLength, const Descriptor *formatDescriptor = nullptr,
int maxHeight = maxMaxHeight);
// For attempting to allocate in a user-supplied stack area
- static std::size_t GetNeededSize(int maxHeight) {
+ static RT_API_ATTRS std::size_t GetNeededSize(int maxHeight) {
return sizeof(FormatControl) -
sizeof(Iteration) * (maxMaxHeight - maxHeight);
}
@@ -102,14 +106,15 @@ public:
// Extracts the next data edit descriptor, handling control edit descriptors
// along the way. If maxRepeat==0, this is a peek at the next data edit
// descriptor.
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
Context &, int maxRepeat = 1);
// Emit any remaining character literals after the last data item (on output)
// and perform remaining record positioning actions.
- void Finish(Context &);
+ RT_API_ATTRS void Finish(Context &);
private:
+ RT_OFFLOAD_VAR_GROUP_BEGIN
static constexpr std::uint8_t maxMaxHeight{100};
struct Iteration {
@@ -117,19 +122,20 @@ private:
int start{0}; // offset in format_ of '(' or a repeated edit descriptor
int remaining{0}; // while >0, decrement and iterate
};
+ RT_OFFLOAD_VAR_GROUP_END
- void SkipBlanks() {
+ RT_API_ATTRS void SkipBlanks() {
while (offset_ < formatLength_ &&
(format_[offset_] == ' ' || format_[offset_] == '\t' ||
format_[offset_] == '\v')) {
++offset_;
}
}
- CharType PeekNext() {
+ RT_API_ATTRS CharType PeekNext() {
SkipBlanks();
return offset_ < formatLength_ ? format_[offset_] : '\0';
}
- CharType GetNextChar(IoErrorHandler &handler) {
+ RT_API_ATTRS CharType GetNextChar(IoErrorHandler &handler) {
SkipBlanks();
if (offset_ >= formatLength_) {
if (formatLength_ == 0) {
@@ -143,7 +149,7 @@ private:
}
return format_[offset_++];
}
- int GetIntField(
+ RT_API_ATTRS int GetIntField(
IoErrorHandler &, CharType firstCh = '\0', bool *hadError = nullptr);
// Advances through the FORMAT until the next data edit
@@ -151,13 +157,14 @@ private:
// along the way. Returns the repeat count that appeared
// before the descriptor (defaulting to 1) and leaves offset_
// pointing to the data edit.
- int CueUpNextDataEdit(Context &, bool stop = false);
+ RT_API_ATTRS int CueUpNextDataEdit(Context &, bool stop = false);
- static constexpr CharType Capitalize(CharType ch) {
+ static constexpr RT_API_ATTRS CharType Capitalize(CharType ch) {
return ch >= 'a' && ch <= 'z' ? ch + 'A' - 'a' : ch;
}
- void ReportBadFormat(Context &context, const char *msg, int offset) const {
+ RT_API_ATTRS void ReportBadFormat(
+ Context &context, const char *msg, int offset) const {
if constexpr (std::is_same_v<CharType, char>) {
// Echo the bad format in the error message, but trim any leading or
// trailing spaces.
diff --git a/flang/runtime/freestanding-tools.h b/flang/runtime/freestanding-tools.h
index 682b4c9b8929..451bf13b9fa6 100644
--- a/flang/runtime/freestanding-tools.h
+++ b/flang/runtime/freestanding-tools.h
@@ -9,7 +9,7 @@
#ifndef FORTRAN_RUNTIME_FREESTANDING_TOOLS_H_
#define FORTRAN_RUNTIME_FREESTANDING_TOOLS_H_
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#include "flang/Runtime/c-or-cpp.h"
#include <algorithm>
#include <cstring>
@@ -47,14 +47,19 @@
#define STD_MEMCHR_UNSUPPORTED 1
#endif
+#if !defined(STD_STRCPY_UNSUPPORTED) && \
+ (defined(__CUDACC__) || defined(__CUDA__)) && defined(__CUDA_ARCH__)
+#define STD_STRCPY_UNSUPPORTED 1
+#endif
+
namespace Fortran::runtime {
#if STD_FILL_N_UNSUPPORTED
// Provides alternative implementation for std::fill_n(), if
// it is not supported.
-template <typename A>
-static inline RT_API_ATTRS void fill_n(
- A *start, std::size_t count, const A &value) {
+template <typename A, typename B>
+static inline RT_API_ATTRS std::enable_if_t<std::is_convertible_v<B, A>, void>
+fill_n(A *start, std::size_t count, const B &value) {
for (std::size_t j{0}; j < count; ++j) {
start[j] = value;
}
@@ -157,5 +162,19 @@ static inline RT_API_ATTRS const void *memchr(
using std::memchr;
#endif // !STD_MEMCMP_UNSUPPORTED
+#if STD_STRCPY_UNSUPPORTED
+// Provides alternative implementation for std::strcpy(), if
+// it is not supported.
+static inline RT_API_ATTRS char *strcpy(char *dest, const char *src) {
+ char *result{dest};
+ do {
+ *dest++ = *src;
+ } while (*src++ != '\0');
+ return result;
+}
+#else // !STD_STRCPY_UNSUPPORTED
+using std::strcpy;
+#endif // !STD_STRCPY_UNSUPPORTED
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_FREESTANDING_TOOLS_H_
diff --git a/flang/runtime/internal-unit.cpp b/flang/runtime/internal-unit.cpp
index 66140e005887..35766306ccef 100644
--- a/flang/runtime/internal-unit.cpp
+++ b/flang/runtime/internal-unit.cpp
@@ -7,15 +7,17 @@
//===----------------------------------------------------------------------===//
#include "internal-unit.h"
+#include "freestanding-tools.h"
#include "io-error.h"
#include "flang/Runtime/descriptor.h"
#include <algorithm>
#include <type_traits>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
template <Direction DIR>
-InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
+RT_API_ATTRS InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
Scalar scalar, std::size_t length, int kind) {
internalIoCharKind = kind;
recordLength = length;
@@ -26,7 +28,7 @@ InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
}
template <Direction DIR>
-InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
+RT_API_ATTRS InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
const Descriptor &that, const Terminator &terminator) {
auto thatType{that.type().GetCategoryAndKind()};
RUNTIME_CHECK(terminator, thatType.has_value());
@@ -42,7 +44,7 @@ InternalDescriptorUnit<DIR>::InternalDescriptorUnit(
}
template <Direction DIR>
-bool InternalDescriptorUnit<DIR>::Emit(
+RT_API_ATTRS bool InternalDescriptorUnit<DIR>::Emit(
const char *data, std::size_t bytes, IoErrorHandler &handler) {
if constexpr (DIR == Direction::Input) {
handler.Crash("InternalDescriptorUnit<Direction::Input>::Emit() called");
@@ -76,7 +78,7 @@ bool InternalDescriptorUnit<DIR>::Emit(
}
template <Direction DIR>
-std::size_t InternalDescriptorUnit<DIR>::GetNextInputBytes(
+RT_API_ATTRS std::size_t InternalDescriptorUnit<DIR>::GetNextInputBytes(
const char *&p, IoErrorHandler &handler) {
if constexpr (DIR == Direction::Output) {
handler.Crash("InternalDescriptorUnit<Direction::Output>::"
@@ -97,7 +99,8 @@ std::size_t InternalDescriptorUnit<DIR>::GetNextInputBytes(
}
template <Direction DIR>
-bool InternalDescriptorUnit<DIR>::AdvanceRecord(IoErrorHandler &handler) {
+RT_API_ATTRS bool InternalDescriptorUnit<DIR>::AdvanceRecord(
+ IoErrorHandler &handler) {
if (currentRecordNumber >= endfileRecordNumber.value_or(0)) {
if constexpr (DIR == Direction::Input) {
handler.SignalEnd();
@@ -115,24 +118,25 @@ bool InternalDescriptorUnit<DIR>::AdvanceRecord(IoErrorHandler &handler) {
}
template <Direction DIR>
-void InternalDescriptorUnit<DIR>::BlankFill(char *at, std::size_t bytes) {
+RT_API_ATTRS void InternalDescriptorUnit<DIR>::BlankFill(
+ char *at, std::size_t bytes) {
switch (internalIoCharKind) {
case 2:
- std::fill_n(reinterpret_cast<char16_t *>(at), bytes / 2,
+ Fortran::runtime::fill_n(reinterpret_cast<char16_t *>(at), bytes / 2,
static_cast<char16_t>(' '));
break;
case 4:
- std::fill_n(reinterpret_cast<char32_t *>(at), bytes / 4,
+ Fortran::runtime::fill_n(reinterpret_cast<char32_t *>(at), bytes / 4,
static_cast<char32_t>(' '));
break;
default:
- std::fill_n(at, bytes, ' ');
+ Fortran::runtime::fill_n(at, bytes, ' ');
break;
}
}
template <Direction DIR>
-void InternalDescriptorUnit<DIR>::BlankFillOutputRecord() {
+RT_API_ATTRS void InternalDescriptorUnit<DIR>::BlankFillOutputRecord() {
if constexpr (DIR == Direction::Output) {
if (furthestPositionInRecord <
recordLength.value_or(furthestPositionInRecord)) {
@@ -143,18 +147,21 @@ void InternalDescriptorUnit<DIR>::BlankFillOutputRecord() {
}
template <Direction DIR>
-void InternalDescriptorUnit<DIR>::BackspaceRecord(IoErrorHandler &handler) {
+RT_API_ATTRS void InternalDescriptorUnit<DIR>::BackspaceRecord(
+ IoErrorHandler &handler) {
RUNTIME_CHECK(handler, currentRecordNumber > 1);
--currentRecordNumber;
BeginRecord();
}
template <Direction DIR>
-std::int64_t InternalDescriptorUnit<DIR>::InquirePos() {
+RT_API_ATTRS std::int64_t InternalDescriptorUnit<DIR>::InquirePos() {
return (currentRecordNumber - 1) * recordLength.value_or(0) +
positionInRecord + 1;
}
template class InternalDescriptorUnit<Direction::Output>;
template class InternalDescriptorUnit<Direction::Input>;
+
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/internal-unit.h b/flang/runtime/internal-unit.h
index b536ffb831d5..bcd38b62468a 100644
--- a/flang/runtime/internal-unit.h
+++ b/flang/runtime/internal-unit.h
@@ -26,26 +26,28 @@ template <Direction DIR> class InternalDescriptorUnit : public ConnectionState {
public:
using Scalar =
std::conditional_t<DIR == Direction::Input, const char *, char *>;
- InternalDescriptorUnit(Scalar, std::size_t chars, int kind);
- InternalDescriptorUnit(const Descriptor &, const Terminator &);
+ RT_API_ATTRS InternalDescriptorUnit(Scalar, std::size_t chars, int kind);
+ RT_API_ATTRS InternalDescriptorUnit(const Descriptor &, const Terminator &);
- bool Emit(const char *, std::size_t, IoErrorHandler &);
- std::size_t GetNextInputBytes(const char *&, IoErrorHandler &);
- bool AdvanceRecord(IoErrorHandler &);
- void BackspaceRecord(IoErrorHandler &);
- std::int64_t InquirePos();
+ RT_API_ATTRS bool Emit(const char *, std::size_t, IoErrorHandler &);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&, IoErrorHandler &);
+ RT_API_ATTRS bool AdvanceRecord(IoErrorHandler &);
+ RT_API_ATTRS void BackspaceRecord(IoErrorHandler &);
+ RT_API_ATTRS std::int64_t InquirePos();
private:
- Descriptor &descriptor() { return staticDescriptor_.descriptor(); }
- const Descriptor &descriptor() const {
+ RT_API_ATTRS Descriptor &descriptor() {
return staticDescriptor_.descriptor();
}
- Scalar CurrentRecord() const {
+ RT_API_ATTRS const Descriptor &descriptor() const {
+ return staticDescriptor_.descriptor();
+ }
+ RT_API_ATTRS Scalar CurrentRecord() const {
return descriptor().template ZeroBasedIndexedElement<char>(
currentRecordNumber - 1);
}
- void BlankFill(char *, std::size_t);
- void BlankFillOutputRecord();
+ RT_API_ATTRS void BlankFill(char *, std::size_t);
+ RT_API_ATTRS void BlankFillOutputRecord();
StaticDescriptor<maxRank, true /*addendum*/> staticDescriptor_;
};
diff --git a/flang/runtime/io-api.cpp b/flang/runtime/io-api.cpp
index 094db5572f15..3a86c9fa7375 100644
--- a/flang/runtime/io-api.cpp
+++ b/flang/runtime/io-api.cpp
@@ -99,7 +99,7 @@ Cookie IONAME(BeginInternalArrayFormattedInput)(const Descriptor &descriptor,
}
template <Direction DIR>
-Cookie BeginInternalListIO(
+RT_API_ATTRS Cookie BeginInternalListIO(
std::conditional_t<DIR == Direction::Input, const char, char> *internal,
std::size_t internalLength, void ** /*scratchArea*/,
std::size_t /*scratchBytes*/, const char *sourceFile, int sourceLine) {
@@ -156,8 +156,8 @@ Cookie IONAME(BeginInternalFormattedInput)(const char *internal,
sourceFile, sourceLine);
}
-static Cookie NoopUnit(const Terminator &terminator, int unitNumber,
- enum Iostat iostat = IostatOk) {
+static RT_API_ATTRS Cookie NoopUnit(const Terminator &terminator,
+ int unitNumber, enum Iostat iostat = IostatOk) {
Cookie cookie{&New<NoopStatementState>{terminator}(
terminator.sourceFileName(), terminator.sourceLine(), unitNumber)
.release()
@@ -168,9 +168,9 @@ static Cookie NoopUnit(const Terminator &terminator, int unitNumber,
return cookie;
}
-static ExternalFileUnit *GetOrCreateUnit(int unitNumber, Direction direction,
- Fortran::common::optional<bool> isUnformatted, const Terminator &terminator,
- Cookie &errorCookie) {
+static RT_API_ATTRS ExternalFileUnit *GetOrCreateUnit(int unitNumber,
+ Direction direction, Fortran::common::optional<bool> isUnformatted,
+ const Terminator &terminator, Cookie &errorCookie) {
if (ExternalFileUnit *
unit{ExternalFileUnit::LookUpOrCreateAnonymous(
unitNumber, direction, isUnformatted, terminator)}) {
@@ -183,7 +183,7 @@ static ExternalFileUnit *GetOrCreateUnit(int unitNumber, Direction direction,
}
template <Direction DIR, template <Direction> class STATE, typename... A>
-Cookie BeginExternalListIO(
+RT_API_ATTRS Cookie BeginExternalListIO(
int unitNumber, const char *sourceFile, int sourceLine, A &&...xs) {
Terminator terminator{sourceFile, sourceLine};
Cookie errorCookie{nullptr};
@@ -227,11 +227,13 @@ Cookie BeginExternalListIO(
}
}
-Cookie IONAME(BeginExternalListOutput)(
+RT_EXT_API_GROUP_BEGIN
+Cookie IODEF(BeginExternalListOutput)(
ExternalUnit unitNumber, const char *sourceFile, int sourceLine) {
return BeginExternalListIO<Direction::Output, ExternalListIoStatementState>(
unitNumber, sourceFile, sourceLine);
}
+RT_EXT_API_GROUP_END
Cookie IONAME(BeginExternalListInput)(
ExternalUnit unitNumber, const char *sourceFile, int sourceLine) {
@@ -1145,7 +1147,7 @@ bool IONAME(OutputInteger8)(Cookie cookie, std::int8_t n) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputInteger8")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, 1, reinterpret_cast<void *>(&n), 0);
@@ -1156,29 +1158,31 @@ bool IONAME(OutputInteger16)(Cookie cookie, std::int16_t n) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputInteger16")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, 2, reinterpret_cast<void *>(&n), 0);
return descr::DescriptorIO<Direction::Output>(*cookie, descriptor);
}
-bool IONAME(OutputInteger32)(Cookie cookie, std::int32_t n) {
+RT_EXT_API_GROUP_BEGIN
+bool IODEF(OutputInteger32)(Cookie cookie, std::int32_t n) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputInteger32")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, 4, reinterpret_cast<void *>(&n), 0);
return descr::DescriptorIO<Direction::Output>(*cookie, descriptor);
}
+RT_EXT_API_GROUP_END
bool IONAME(OutputInteger64)(Cookie cookie, std::int64_t n) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputInteger64")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, 8, reinterpret_cast<void *>(&n), 0);
@@ -1190,7 +1194,7 @@ bool IONAME(OutputInteger128)(Cookie cookie, common::int128_t n) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputInteger128")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, 16, reinterpret_cast<void *>(&n), 0);
@@ -1202,7 +1206,7 @@ bool IONAME(InputInteger)(Cookie cookie, std::int64_t &n, int kind) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputInteger")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Integer, kind, reinterpret_cast<void *>(&n), 0);
@@ -1213,7 +1217,7 @@ bool IONAME(OutputReal32)(Cookie cookie, float x) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputReal32")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(TypeCategory::Real, 4, reinterpret_cast<void *>(&x), 0);
return descr::DescriptorIO<Direction::Output>(*cookie, descriptor);
@@ -1223,7 +1227,7 @@ bool IONAME(OutputReal64)(Cookie cookie, double x) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputReal64")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(TypeCategory::Real, 8, reinterpret_cast<void *>(&x), 0);
return descr::DescriptorIO<Direction::Output>(*cookie, descriptor);
@@ -1233,7 +1237,7 @@ bool IONAME(InputReal32)(Cookie cookie, float &x) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputReal32")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(TypeCategory::Real, 4, reinterpret_cast<void *>(&x), 0);
return descr::DescriptorIO<Direction::Input>(*cookie, descriptor);
@@ -1243,7 +1247,7 @@ bool IONAME(InputReal64)(Cookie cookie, double &x) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputReal64")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(TypeCategory::Real, 8, reinterpret_cast<void *>(&x), 0);
return descr::DescriptorIO<Direction::Input>(*cookie, descriptor);
@@ -1254,7 +1258,7 @@ bool IONAME(OutputComplex32)(Cookie cookie, float r, float i) {
return false;
}
float z[2]{r, i};
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Complex, 4, reinterpret_cast<void *>(&z), 0);
@@ -1266,7 +1270,7 @@ bool IONAME(OutputComplex64)(Cookie cookie, double r, double i) {
return false;
}
double z[2]{r, i};
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Complex, 8, reinterpret_cast<void *>(&z), 0);
@@ -1277,7 +1281,7 @@ bool IONAME(InputComplex32)(Cookie cookie, float z[2]) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputComplex32")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Complex, 4, reinterpret_cast<void *>(z), 0);
@@ -1288,7 +1292,7 @@ bool IONAME(InputComplex64)(Cookie cookie, double z[2]) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputComplex64")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Complex, 8, reinterpret_cast<void *>(z), 0);
@@ -1300,7 +1304,7 @@ bool IONAME(OutputCharacter)(
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputCharacter")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
kind, length, reinterpret_cast<void *>(const_cast<char *>(x)), 0);
@@ -1316,7 +1320,7 @@ bool IONAME(InputCharacter)(
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputCharacter")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(kind, length, reinterpret_cast<void *>(x), 0);
return descr::DescriptorIO<Direction::Input>(*cookie, descriptor);
@@ -1330,7 +1334,7 @@ bool IONAME(OutputLogical)(Cookie cookie, bool truth) {
if (!cookie->CheckFormattedStmtType<Direction::Output>("OutputLogical")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Logical, sizeof truth, reinterpret_cast<void *>(&truth), 0);
@@ -1341,7 +1345,7 @@ bool IONAME(InputLogical)(Cookie cookie, bool &truth) {
if (!cookie->CheckFormattedStmtType<Direction::Input>("InputLogical")) {
return false;
}
- StaticDescriptor staticDescriptor;
+ StaticDescriptor<0> staticDescriptor;
Descriptor &descriptor{staticDescriptor.descriptor()};
descriptor.Establish(
TypeCategory::Logical, sizeof truth, reinterpret_cast<void *>(&truth), 0);
@@ -1448,10 +1452,12 @@ bool IONAME(InquireInteger64)(
return false;
}
-enum Iostat IONAME(EndIoStatement)(Cookie cookie) {
+RT_EXT_API_GROUP_BEGIN
+enum Iostat IODEF(EndIoStatement)(Cookie cookie) {
IoStatementState &io{*cookie};
return static_cast<enum Iostat>(io.EndIoStatement());
}
+RT_EXT_API_GROUP_END
template <typename INT>
static enum Iostat CheckUnitNumberInRangeImpl(INT unit, bool handleError,
diff --git a/flang/runtime/io-error.cpp b/flang/runtime/io-error.cpp
index c8f6675c60a6..b006b82f6224 100644
--- a/flang/runtime/io-error.cpp
+++ b/flang/runtime/io-error.cpp
@@ -16,6 +16,7 @@
#include <cstring>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
void IoErrorHandler::SignalError(int iostatOrErrno, const char *msg, ...) {
// Note that IOMSG= alone without IOSTAT=/END=/EOR=/ERR= does not suffice
@@ -44,12 +45,17 @@ void IoErrorHandler::SignalError(int iostatOrErrno, const char *msg, ...) {
if (ioStat_ <= 0) {
ioStat_ = iostatOrErrno; // priority over END=/EOR=
if (msg && (flags_ & hasIoMsg)) {
+#if !defined(RT_DEVICE_COMPILATION)
char buffer[256];
va_list ap;
va_start(ap, msg);
std::vsnprintf(buffer, sizeof buffer, msg, ap);
- ioMsg_ = SaveDefaultCharacter(buffer, std::strlen(buffer) + 1, *this);
va_end(ap);
+#else
+ const char *buffer = "not implemented yet: IOSTAT with varargs";
+#endif
+ ioMsg_ = SaveDefaultCharacter(
+ buffer, Fortran::runtime::strlen(buffer) + 1, *this);
}
}
return;
@@ -58,15 +64,23 @@ void IoErrorHandler::SignalError(int iostatOrErrno, const char *msg, ...) {
}
// I/O error not caught!
if (msg) {
+#if !defined(RT_DEVICE_COMPILATION)
va_list ap;
va_start(ap, msg);
CrashArgs(msg, ap);
va_end(ap);
+#else
+ Crash("not implemented yet: IOSTAT with varargs");
+#endif
} else if (const char *errstr{IostatErrorString(iostatOrErrno)}) {
Crash(errstr);
} else {
+#if !defined(RT_DEVICE_COMPILATION)
Crash("I/O error (errno=%d): %s", iostatOrErrno,
std::strerror(iostatOrErrno));
+#else
+ Crash("I/O error (errno=%d)", iostatOrErrno);
+#endif
}
}
@@ -85,8 +99,6 @@ void IoErrorHandler::Forward(
}
}
-void IoErrorHandler::SignalErrno() { SignalError(errno); }
-
void IoErrorHandler::SignalEnd() { SignalError(IostatEnd); }
void IoErrorHandler::SignalEor() { SignalError(IostatEor); }
@@ -97,6 +109,10 @@ void IoErrorHandler::SignalPendingError() {
SignalError(error);
}
+RT_OFFLOAD_API_GROUP_END
+
+void IoErrorHandler::SignalErrno() { SignalError(errno); }
+
bool IoErrorHandler::GetIoMsg(char *buffer, std::size_t bufferLength) {
const char *msg{ioMsg_.get()};
if (!msg) {
@@ -132,7 +148,7 @@ bool IoErrorHandler::GetIoMsg(char *buffer, std::size_t bufferLength) {
ToFortranDefaultCharacter(buffer, bufferLength, msg);
return true;
} else if (ok) {
- std::size_t copied{std::strlen(buffer)};
+ std::size_t copied{Fortran::runtime::strlen(buffer)};
if (copied < bufferLength) {
std::memset(buffer + copied, ' ', bufferLength - copied);
}
diff --git a/flang/runtime/io-error.h b/flang/runtime/io-error.h
index 565e7153351e..0fe11c9185c0 100644
--- a/flang/runtime/io-error.h
+++ b/flang/runtime/io-error.h
@@ -26,14 +26,15 @@ namespace Fortran::runtime::io {
class IoErrorHandler : public Terminator {
public:
using Terminator::Terminator;
- explicit IoErrorHandler(const Terminator &that) : Terminator{that} {}
- void HasIoStat() { flags_ |= hasIoStat; }
- void HasErrLabel() { flags_ |= hasErr; }
- void HasEndLabel() { flags_ |= hasEnd; }
- void HasEorLabel() { flags_ |= hasEor; }
- void HasIoMsg() { flags_ |= hasIoMsg; }
+ explicit RT_API_ATTRS IoErrorHandler(const Terminator &that)
+ : Terminator{that} {}
+ RT_API_ATTRS void HasIoStat() { flags_ |= hasIoStat; }
+ RT_API_ATTRS void HasErrLabel() { flags_ |= hasErr; }
+ RT_API_ATTRS void HasEndLabel() { flags_ |= hasEnd; }
+ RT_API_ATTRS void HasEorLabel() { flags_ |= hasEor; }
+ RT_API_ATTRS void HasIoMsg() { flags_ |= hasIoMsg; }
- bool InError() const {
+ RT_API_ATTRS bool InError() const {
return ioStat_ != IostatOk || pendingError_ != IostatOk;
}
@@ -41,22 +42,25 @@ public:
// Begin...() API routines before it is known whether they
// have error handling control list items. Such statements
// have an ErroneousIoStatementState with a pending error.
- void SetPendingError(int iostat) { pendingError_ = iostat; }
+ RT_API_ATTRS void SetPendingError(int iostat) { pendingError_ = iostat; }
- void SignalError(int iostatOrErrno, const char *msg, ...);
- void SignalError(int iostatOrErrno);
- template <typename... X> void SignalError(const char *msg, X &&...xs) {
+ RT_API_ATTRS void SignalError(int iostatOrErrno, const char *msg, ...);
+ RT_API_ATTRS void SignalError(int iostatOrErrno);
+ template <typename... X>
+ RT_API_ATTRS void SignalError(const char *msg, X &&...xs) {
SignalError(IostatGenericError, msg, std::forward<X>(xs)...);
}
- void Forward(int iostatOrErrno, const char *, std::size_t);
+ RT_API_ATTRS void Forward(int iostatOrErrno, const char *, std::size_t);
void SignalErrno(); // SignalError(errno)
- void SignalEnd(); // input only; EOF on internal write is an error
- void SignalEor(); // non-advancing input only; EOR on write is an error
- void SignalPendingError();
+ RT_API_ATTRS void
+ SignalEnd(); // input only; EOF on internal write is an error
+ RT_API_ATTRS void
+ SignalEor(); // non-advancing input only; EOR on write is an error
+ RT_API_ATTRS void SignalPendingError();
- int GetIoStat() const { return ioStat_; }
+ RT_API_ATTRS int GetIoStat() const { return ioStat_; }
bool GetIoMsg(char *, std::size_t);
private:
diff --git a/flang/runtime/io-stmt.cpp b/flang/runtime/io-stmt.cpp
index 075d7b5ae518..022e4c806bf6 100644
--- a/flang/runtime/io-stmt.cpp
+++ b/flang/runtime/io-stmt.cpp
@@ -21,6 +21,7 @@
#include <type_traits>
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
bool IoStatementBase::Emit(const char *, std::size_t, std::size_t) {
return false;
@@ -44,10 +45,6 @@ Fortran::common::optional<DataEdit> IoStatementBase::GetNextDataEdit(
return Fortran::common::nullopt;
}
-ExternalFileUnit *IoStatementBase::GetExternalFileUnit() const {
- return nullptr;
-}
-
bool IoStatementBase::BeginReadingRecord() { return true; }
void IoStatementBase::FinishReadingRecord() {}
@@ -56,6 +53,12 @@ void IoStatementBase::HandleAbsolutePosition(std::int64_t) {}
void IoStatementBase::HandleRelativePosition(std::int64_t) {}
+std::int64_t IoStatementBase::InquirePos() { return 0; }
+
+ExternalFileUnit *IoStatementBase::GetExternalFileUnit() const {
+ return nullptr;
+}
+
bool IoStatementBase::Inquire(InquiryKeywordHash, char *, std::size_t) {
return false;
}
@@ -70,8 +73,6 @@ bool IoStatementBase::Inquire(InquiryKeywordHash, std::int64_t &) {
return false;
}
-std::int64_t IoStatementBase::InquirePos() { return 0; }
-
void IoStatementBase::BadInquiryKeywordHashCrash(InquiryKeywordHash inquiry) {
char buffer[16];
const char *decode{InquiryKeywordHashDecode(buffer, sizeof buffer, inquiry)};
@@ -142,21 +143,23 @@ std::int64_t InternalIoStatementState<DIR>::InquirePos() {
}
template <Direction DIR, typename CHAR>
+RT_API_ATTRS
InternalFormattedIoStatementState<DIR, CHAR>::InternalFormattedIoStatementState(
Buffer buffer, std::size_t length, const CharType *format,
std::size_t formatLength, const Descriptor *formatDescriptor,
const char *sourceFile, int sourceLine)
: InternalIoStatementState<DIR>{buffer, length, sourceFile, sourceLine},
- ioStatementState_{*this}, format_{*this, format, formatLength,
- formatDescriptor} {}
+ ioStatementState_{*this},
+ format_{*this, format, formatLength, formatDescriptor} {}
template <Direction DIR, typename CHAR>
+RT_API_ATTRS
InternalFormattedIoStatementState<DIR, CHAR>::InternalFormattedIoStatementState(
const Descriptor &d, const CharType *format, std::size_t formatLength,
const Descriptor *formatDescriptor, const char *sourceFile, int sourceLine)
: InternalIoStatementState<DIR>{d, sourceFile, sourceLine},
- ioStatementState_{*this}, format_{*this, format, formatLength,
- formatDescriptor} {}
+ ioStatementState_{*this},
+ format_{*this, format, formatLength, formatDescriptor} {}
template <Direction DIR, typename CHAR>
void InternalFormattedIoStatementState<DIR, CHAR>::CompleteOperation() {
@@ -227,7 +230,17 @@ ConnectionState &ExternalIoStatementBase::GetConnectionState() { return unit_; }
int ExternalIoStatementBase::EndIoStatement() {
CompleteOperation();
auto result{IoStatementBase::EndIoStatement()};
+#if !defined(RT_USE_PSEUDO_FILE_UNIT)
unit_.EndIoStatement(); // annihilates *this in unit_.u_
+#else
+ // Fetch the unit pointer before *this disappears.
+ ExternalFileUnit *unitPtr{&unit_};
+ // The pseudo file units are dynamically allocated
+ // and are not tracked in the unit map.
+ // They have to be destructed and deallocated here.
+ unitPtr->~ExternalFileUnit();
+ FreeMemory(unitPtr);
+#endif
return result;
}
@@ -994,7 +1007,9 @@ void ExternalMiscIoStatementState::CompleteOperation() {
switch (which_) {
case Flush:
ext.FlushOutput(*this);
+#if !defined(RT_DEVICE_COMPILATION)
std::fflush(nullptr); // flushes C stdio output streams (12.9(2))
+#endif
break;
case Backspace:
ext.BackspaceRecord(*this);
@@ -1498,4 +1513,5 @@ int ErroneousIoStatementState::EndIoStatement() {
return IoStatementBase::EndIoStatement();
}
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/io-stmt.h b/flang/runtime/io-stmt.h
index e00d54980aae..8b5752311de5 100644
--- a/flang/runtime/io-stmt.h
+++ b/flang/runtime/io-stmt.h
@@ -21,9 +21,9 @@
#include "flang/Common/visit.h"
#include "flang/Runtime/descriptor.h"
#include "flang/Runtime/io-api.h"
+#include <flang/Common/variant.h>
#include <functional>
#include <type_traits>
-#include <variant>
namespace Fortran::runtime::io {
@@ -61,8 +61,8 @@ using IoDirectionState = std::conditional_t<D == Direction::Input,
template <Direction D> class FormattedIoStatementState {};
template <> class FormattedIoStatementState<Direction::Input> {
public:
- std::size_t GetEditDescriptorChars() const;
- void GotChar(int);
+ RT_API_ATTRS std::size_t GetEditDescriptorChars() const;
+ RT_API_ATTRS void GotChar(int);
private:
// Account of characters read for edit descriptors (i.e., formatted I/O
@@ -73,7 +73,7 @@ private:
// The Cookie type in the I/O API is a pointer (for C) to this class.
class IoStatementState {
public:
- template <typename A> explicit IoStatementState(A &x) : u_{x} {}
+ template <typename A> explicit RT_API_ATTRS IoStatementState(A &x) : u_{x} {}
// These member functions each project themselves into the active alternative.
// They're used by per-data-item routines in the I/O API (e.g., OutputReal64)
@@ -85,34 +85,39 @@ public:
// It is called by EndIoStatement(), but it can be invoked earlier to
// catch errors for (e.g.) GetIoMsg() and GetNewUnit(). If called
// more than once, it is a no-op.
- void CompleteOperation();
+ RT_API_ATTRS void CompleteOperation();
// Completes an I/O statement and reclaims storage.
- int EndIoStatement();
-
- bool Emit(const char *, std::size_t bytes, std::size_t elementBytes = 0);
- bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
- std::size_t GetNextInputBytes(const char *&);
- bool AdvanceRecord(int = 1);
- void BackspaceRecord();
- void HandleRelativePosition(std::int64_t byteOffset);
- void HandleAbsolutePosition(std::int64_t byteOffset); // for r* in list I/O
- Fortran::common::optional<DataEdit> GetNextDataEdit(int maxRepeat = 1);
- ExternalFileUnit *GetExternalFileUnit() const; // null if internal unit
- bool BeginReadingRecord();
- void FinishReadingRecord();
- bool Inquire(InquiryKeywordHash, char *, std::size_t);
- bool Inquire(InquiryKeywordHash, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t, bool &); // PENDING=
- bool Inquire(InquiryKeywordHash, std::int64_t &);
- std::int64_t InquirePos();
- void GotChar(signed int = 1); // for READ(SIZE=); can be <0
-
- MutableModes &mutableModes();
- ConnectionState &GetConnectionState();
- IoErrorHandler &GetIoErrorHandler() const;
+ RT_API_ATTRS int EndIoStatement();
+
+ RT_API_ATTRS bool Emit(
+ const char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&);
+ RT_API_ATTRS bool AdvanceRecord(int = 1);
+ RT_API_ATTRS void BackspaceRecord();
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t byteOffset);
+ RT_API_ATTRS void HandleAbsolutePosition(
+ std::int64_t byteOffset); // for r* in list I/O
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
+ int maxRepeat = 1);
+ RT_API_ATTRS ExternalFileUnit *
+ GetExternalFileUnit() const; // null if internal unit
+ RT_API_ATTRS bool BeginReadingRecord();
+ RT_API_ATTRS void FinishReadingRecord();
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, char *, std::size_t);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, bool &);
+ RT_API_ATTRS bool Inquire(
+ InquiryKeywordHash, std::int64_t, bool &); // PENDING=
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t &);
+ RT_API_ATTRS std::int64_t InquirePos();
+ RT_API_ATTRS void GotChar(signed int = 1); // for READ(SIZE=); can be <0
+
+ RT_API_ATTRS MutableModes &mutableModes();
+ RT_API_ATTRS ConnectionState &GetConnectionState();
+ RT_API_ATTRS IoErrorHandler &GetIoErrorHandler() const;
// N.B.: this also works with base classes
- template <typename A> A *get_if() const {
+ template <typename A> RT_API_ATTRS A *get_if() const {
return common::visit(
[](auto &x) -> A * {
if constexpr (std::is_convertible_v<decltype(x.get()), A &>) {
@@ -124,7 +129,8 @@ public:
}
// Vacant after the end of the current record
- Fortran::common::optional<char32_t> GetCurrentChar(std::size_t &byteCount);
+ RT_API_ATTRS Fortran::common::optional<char32_t> GetCurrentChar(
+ std::size_t &byteCount);
// The "remaining" arguments to CueUpInput(), SkipSpaces(), & NextInField()
// are always in units of bytes, not characters; the distinction matters
@@ -132,7 +138,7 @@ public:
// For fixed-width fields, return the number of remaining bytes.
// Skip over leading blanks.
- Fortran::common::optional<int> CueUpInput(const DataEdit &edit) {
+ RT_API_ATTRS Fortran::common::optional<int> CueUpInput(const DataEdit &edit) {
Fortran::common::optional<int> remaining;
if (edit.IsListDirected()) {
std::size_t byteCount{0};
@@ -150,7 +156,7 @@ public:
return remaining;
}
- Fortran::common::optional<char32_t> SkipSpaces(
+ RT_API_ATTRS Fortran::common::optional<char32_t> SkipSpaces(
Fortran::common::optional<int> &remaining) {
while (!remaining || *remaining > 0) {
std::size_t byteCount{0};
@@ -175,15 +181,16 @@ public:
// Acquires the next input character, respecting any applicable field width
// or separator character.
- Fortran::common::optional<char32_t> NextInField(
+ RT_API_ATTRS Fortran::common::optional<char32_t> NextInField(
Fortran::common::optional<int> &remaining, const DataEdit &);
// Detect and signal any end-of-record condition after input.
// Returns true if at EOR and remaining input should be padded with blanks.
- bool CheckForEndOfRecord(std::size_t afterReading);
+ RT_API_ATTRS bool CheckForEndOfRecord(std::size_t afterReading);
// Skips spaces, advances records, and ignores NAMELIST comments
- Fortran::common::optional<char32_t> GetNextNonBlank(std::size_t &byteCount) {
+ RT_API_ATTRS Fortran::common::optional<char32_t> GetNextNonBlank(
+ std::size_t &byteCount) {
auto ch{GetCurrentChar(byteCount)};
bool inNamelist{mutableModes().inNamelist};
while (!ch || *ch == ' ' || *ch == '\t' || (inNamelist && *ch == '!')) {
@@ -197,7 +204,8 @@ public:
return ch;
}
- template <Direction D> bool CheckFormattedStmtType(const char *name) {
+ template <Direction D>
+ RT_API_ATTRS bool CheckFormattedStmtType(const char *name) {
if (get_if<FormattedIoStatementState<D>>()) {
return true;
} else {
@@ -260,31 +268,33 @@ class IoStatementBase : public IoErrorHandler {
public:
using IoErrorHandler::IoErrorHandler;
- bool completedOperation() const { return completedOperation_; }
+ RT_API_ATTRS bool completedOperation() const { return completedOperation_; }
- void CompleteOperation() { completedOperation_ = true; }
- int EndIoStatement() { return GetIoStat(); }
+ RT_API_ATTRS void CompleteOperation() { completedOperation_ = true; }
+ RT_API_ATTRS int EndIoStatement() { return GetIoStat(); }
// These are default no-op backstops that can be overridden by descendants.
- bool Emit(const char *, std::size_t bytes, std::size_t elementBytes = 0);
- bool Receive(char *, std::size_t bytes, std::size_t elementBytes = 0);
- std::size_t GetNextInputBytes(const char *&);
- bool AdvanceRecord(int);
- void BackspaceRecord();
- void HandleRelativePosition(std::int64_t);
- void HandleAbsolutePosition(std::int64_t);
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS bool Emit(
+ const char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS bool Receive(
+ char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&);
+ RT_API_ATTRS bool AdvanceRecord(int);
+ RT_API_ATTRS void BackspaceRecord();
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t);
+ RT_API_ATTRS void HandleAbsolutePosition(std::int64_t);
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1);
- ExternalFileUnit *GetExternalFileUnit() const;
- bool BeginReadingRecord();
- void FinishReadingRecord();
- bool Inquire(InquiryKeywordHash, char *, std::size_t);
- bool Inquire(InquiryKeywordHash, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t &);
- std::int64_t InquirePos();
+ RT_API_ATTRS ExternalFileUnit *GetExternalFileUnit() const;
+ RT_API_ATTRS bool BeginReadingRecord();
+ RT_API_ATTRS void FinishReadingRecord();
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, char *, std::size_t);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t &);
+ RT_API_ATTRS std::int64_t InquirePos();
- void BadInquiryKeywordHashCrash(InquiryKeywordHash);
+ RT_API_ATTRS void BadInquiryKeywordHashCrash(InquiryKeywordHash);
protected:
bool completedOperation_{false};
@@ -296,14 +306,14 @@ template <>
class ListDirectedStatementState<Direction::Output>
: public FormattedIoStatementState<Direction::Output> {
public:
- bool EmitLeadingSpaceOrAdvance(
+ RT_API_ATTRS bool EmitLeadingSpaceOrAdvance(
IoStatementState &, std::size_t = 1, bool isCharacter = false);
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1);
- bool lastWasUndelimitedCharacter() const {
+ RT_API_ATTRS bool lastWasUndelimitedCharacter() const {
return lastWasUndelimitedCharacter_;
}
- void set_lastWasUndelimitedCharacter(bool yes = true) {
+ RT_API_ATTRS void set_lastWasUndelimitedCharacter(bool yes = true) {
lastWasUndelimitedCharacter_ = yes;
}
@@ -314,20 +324,20 @@ template <>
class ListDirectedStatementState<Direction::Input>
: public FormattedIoStatementState<Direction::Input> {
public:
- bool inNamelistSequence() const { return inNamelistSequence_; }
- int EndIoStatement();
+ RT_API_ATTRS bool inNamelistSequence() const { return inNamelistSequence_; }
+ RT_API_ATTRS int EndIoStatement();
// Skips value separators, handles repetition and null values.
// Vacant when '/' appears; present with descriptor == ListDirectedNullValue
// when a null value appears.
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1);
// Each NAMELIST input item is treated like a distinct list-directed
// input statement. This member function resets some state so that
// repetition and null values work correctly for each successive
// NAMELIST input item.
- void ResetForNextNamelistItem(bool inNamelistSequence) {
+ RT_API_ATTRS void ResetForNextNamelistItem(bool inNamelistSequence) {
remaining_ = 0;
if (repeatPosition_) {
repeatPosition_->Cancel();
@@ -353,21 +363,22 @@ class InternalIoStatementState : public IoStatementBase,
public:
using Buffer =
std::conditional_t<DIR == Direction::Input, const char *, char *>;
- InternalIoStatementState(Buffer, std::size_t,
+ RT_API_ATTRS InternalIoStatementState(Buffer, std::size_t,
const char *sourceFile = nullptr, int sourceLine = 0);
- InternalIoStatementState(
+ RT_API_ATTRS InternalIoStatementState(
const Descriptor &, const char *sourceFile = nullptr, int sourceLine = 0);
- int EndIoStatement();
-
- bool Emit(const char *data, std::size_t bytes, std::size_t elementBytes = 0);
- std::size_t GetNextInputBytes(const char *&);
- bool AdvanceRecord(int = 1);
- void BackspaceRecord();
- ConnectionState &GetConnectionState() { return unit_; }
- MutableModes &mutableModes() { return unit_.modes; }
- void HandleRelativePosition(std::int64_t);
- void HandleAbsolutePosition(std::int64_t);
- std::int64_t InquirePos();
+ RT_API_ATTRS int EndIoStatement();
+
+ RT_API_ATTRS bool Emit(
+ const char *data, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&);
+ RT_API_ATTRS bool AdvanceRecord(int = 1);
+ RT_API_ATTRS void BackspaceRecord();
+ RT_API_ATTRS ConnectionState &GetConnectionState() { return unit_; }
+ RT_API_ATTRS MutableModes &mutableModes() { return unit_.modes; }
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t);
+ RT_API_ATTRS void HandleAbsolutePosition(std::int64_t);
+ RT_API_ATTRS std::int64_t InquirePos();
protected:
bool free_{true};
@@ -381,17 +392,20 @@ class InternalFormattedIoStatementState
public:
using CharType = CHAR;
using typename InternalIoStatementState<DIR>::Buffer;
- InternalFormattedIoStatementState(Buffer internal, std::size_t internalLength,
+ RT_API_ATTRS InternalFormattedIoStatementState(Buffer internal,
+ std::size_t internalLength, const CharType *format,
+ std::size_t formatLength, const Descriptor *formatDescriptor = nullptr,
+ const char *sourceFile = nullptr, int sourceLine = 0);
+ RT_API_ATTRS InternalFormattedIoStatementState(const Descriptor &,
const CharType *format, std::size_t formatLength,
const Descriptor *formatDescriptor = nullptr,
const char *sourceFile = nullptr, int sourceLine = 0);
- InternalFormattedIoStatementState(const Descriptor &, const CharType *format,
- std::size_t formatLength, const Descriptor *formatDescriptor = nullptr,
- const char *sourceFile = nullptr, int sourceLine = 0);
- IoStatementState &ioStatementState() { return ioStatementState_; }
- void CompleteOperation();
- int EndIoStatement();
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS IoStatementState &ioStatementState() {
+ return ioStatementState_;
+ }
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1) {
return format_.GetNextDataEdit(*this, maxRepeat);
}
@@ -408,14 +422,17 @@ class InternalListIoStatementState : public InternalIoStatementState<DIR>,
public ListDirectedStatementState<DIR> {
public:
using typename InternalIoStatementState<DIR>::Buffer;
- InternalListIoStatementState(Buffer internal, std::size_t internalLength,
- const char *sourceFile = nullptr, int sourceLine = 0);
- InternalListIoStatementState(
+ RT_API_ATTRS InternalListIoStatementState(Buffer internal,
+ std::size_t internalLength, const char *sourceFile = nullptr,
+ int sourceLine = 0);
+ RT_API_ATTRS InternalListIoStatementState(
const Descriptor &, const char *sourceFile = nullptr, int sourceLine = 0);
- IoStatementState &ioStatementState() { return ioStatementState_; }
+ RT_API_ATTRS IoStatementState &ioStatementState() {
+ return ioStatementState_;
+ }
using ListDirectedStatementState<DIR>::GetNextDataEdit;
- void CompleteOperation();
- int EndIoStatement();
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
private:
IoStatementState ioStatementState_; // points to *this
@@ -424,16 +441,16 @@ private:
class ExternalIoStatementBase : public IoStatementBase {
public:
- ExternalIoStatementBase(
+ RT_API_ATTRS ExternalIoStatementBase(
ExternalFileUnit &, const char *sourceFile = nullptr, int sourceLine = 0);
- ExternalFileUnit &unit() { return unit_; }
- MutableModes &mutableModes();
- ConnectionState &GetConnectionState();
- int asynchronousID() const { return asynchronousID_; }
- int EndIoStatement();
- ExternalFileUnit *GetExternalFileUnit() const { return &unit_; }
- void SetAsynchronous();
- std::int64_t InquirePos();
+ RT_API_ATTRS ExternalFileUnit &unit() { return unit_; }
+ RT_API_ATTRS MutableModes &mutableModes();
+ RT_API_ATTRS ConnectionState &GetConnectionState();
+ RT_API_ATTRS int asynchronousID() const { return asynchronousID_; }
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS ExternalFileUnit *GetExternalFileUnit() const { return &unit_; }
+ RT_API_ATTRS void SetAsynchronous();
+ RT_API_ATTRS std::int64_t InquirePos();
private:
ExternalFileUnit &unit_;
@@ -444,19 +461,20 @@ template <Direction DIR>
class ExternalIoStatementState : public ExternalIoStatementBase,
public IoDirectionState<DIR> {
public:
- ExternalIoStatementState(
+ RT_API_ATTRS ExternalIoStatementState(
ExternalFileUnit &, const char *sourceFile = nullptr, int sourceLine = 0);
- MutableModes &mutableModes() { return mutableModes_; }
- void CompleteOperation();
- int EndIoStatement();
- bool Emit(const char *, std::size_t bytes, std::size_t elementBytes = 0);
- std::size_t GetNextInputBytes(const char *&);
- bool AdvanceRecord(int = 1);
- void BackspaceRecord();
- void HandleRelativePosition(std::int64_t);
- void HandleAbsolutePosition(std::int64_t);
- bool BeginReadingRecord();
- void FinishReadingRecord();
+ RT_API_ATTRS MutableModes &mutableModes() { return mutableModes_; }
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS bool Emit(
+ const char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&);
+ RT_API_ATTRS bool AdvanceRecord(int = 1);
+ RT_API_ATTRS void BackspaceRecord();
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t);
+ RT_API_ATTRS void HandleAbsolutePosition(std::int64_t);
+ RT_API_ATTRS bool BeginReadingRecord();
+ RT_API_ATTRS void FinishReadingRecord();
private:
// These are forked from ConnectionState's modes at the beginning
@@ -471,12 +489,13 @@ class ExternalFormattedIoStatementState
public FormattedIoStatementState<DIR> {
public:
using CharType = CHAR;
- ExternalFormattedIoStatementState(ExternalFileUnit &, const CharType *format,
- std::size_t formatLength, const Descriptor *formatDescriptor = nullptr,
+ RT_API_ATTRS ExternalFormattedIoStatementState(ExternalFileUnit &,
+ const CharType *format, std::size_t formatLength,
+ const Descriptor *formatDescriptor = nullptr,
const char *sourceFile = nullptr, int sourceLine = 0);
- void CompleteOperation();
- int EndIoStatement();
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1) {
return format_.GetNextDataEdit(*this, maxRepeat);
}
@@ -491,7 +510,7 @@ class ExternalListIoStatementState : public ExternalIoStatementState<DIR>,
public:
using ExternalIoStatementState<DIR>::ExternalIoStatementState;
using ListDirectedStatementState<DIR>::GetNextDataEdit;
- int EndIoStatement();
+ RT_API_ATTRS int EndIoStatement();
};
template <Direction DIR>
@@ -499,24 +518,25 @@ class ExternalUnformattedIoStatementState
: public ExternalIoStatementState<DIR> {
public:
using ExternalIoStatementState<DIR>::ExternalIoStatementState;
- bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
+ RT_API_ATTRS bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
};
template <Direction DIR>
class ChildIoStatementState : public IoStatementBase,
public IoDirectionState<DIR> {
public:
- ChildIoStatementState(
+ RT_API_ATTRS ChildIoStatementState(
ChildIo &, const char *sourceFile = nullptr, int sourceLine = 0);
- ChildIo &child() { return child_; }
- MutableModes &mutableModes();
- ConnectionState &GetConnectionState();
- ExternalFileUnit *GetExternalFileUnit() const;
- int EndIoStatement();
- bool Emit(const char *, std::size_t bytes, std::size_t elementBytes = 0);
- std::size_t GetNextInputBytes(const char *&);
- void HandleRelativePosition(std::int64_t);
- void HandleAbsolutePosition(std::int64_t);
+ RT_API_ATTRS ChildIo &child() { return child_; }
+ RT_API_ATTRS MutableModes &mutableModes();
+ RT_API_ATTRS ConnectionState &GetConnectionState();
+ RT_API_ATTRS ExternalFileUnit *GetExternalFileUnit() const;
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS bool Emit(
+ const char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&);
+ RT_API_ATTRS void HandleRelativePosition(std::int64_t);
+ RT_API_ATTRS void HandleAbsolutePosition(std::int64_t);
private:
ChildIo &child_;
@@ -527,14 +547,14 @@ class ChildFormattedIoStatementState : public ChildIoStatementState<DIR>,
public FormattedIoStatementState<DIR> {
public:
using CharType = CHAR;
- ChildFormattedIoStatementState(ChildIo &, const CharType *format,
+ RT_API_ATTRS ChildFormattedIoStatementState(ChildIo &, const CharType *format,
std::size_t formatLength, const Descriptor *formatDescriptor = nullptr,
const char *sourceFile = nullptr, int sourceLine = 0);
- MutableModes &mutableModes() { return mutableModes_; }
- void CompleteOperation();
- int EndIoStatement();
- bool AdvanceRecord(int = 1);
- Fortran::common::optional<DataEdit> GetNextDataEdit(
+ RT_API_ATTRS MutableModes &mutableModes() { return mutableModes_; }
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS bool AdvanceRecord(int = 1);
+ RT_API_ATTRS Fortran::common::optional<DataEdit> GetNextDataEdit(
IoStatementState &, int maxRepeat = 1) {
return format_.GetNextDataEdit(*this, maxRepeat);
}
@@ -550,34 +570,42 @@ class ChildListIoStatementState : public ChildIoStatementState<DIR>,
public:
using ChildIoStatementState<DIR>::ChildIoStatementState;
using ListDirectedStatementState<DIR>::GetNextDataEdit;
- int EndIoStatement();
+ RT_API_ATTRS int EndIoStatement();
};
template <Direction DIR>
class ChildUnformattedIoStatementState : public ChildIoStatementState<DIR> {
public:
using ChildIoStatementState<DIR>::ChildIoStatementState;
- bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
+ RT_API_ATTRS bool Receive(char *, std::size_t, std::size_t elementBytes = 0);
};
// OPEN
class OpenStatementState : public ExternalIoStatementBase {
public:
- OpenStatementState(ExternalFileUnit &unit, bool wasExtant, bool isNewUnit,
- const char *sourceFile = nullptr, int sourceLine = 0)
+ RT_API_ATTRS OpenStatementState(ExternalFileUnit &unit, bool wasExtant,
+ bool isNewUnit, const char *sourceFile = nullptr, int sourceLine = 0)
: ExternalIoStatementBase{unit, sourceFile, sourceLine},
wasExtant_{wasExtant}, isNewUnit_{isNewUnit} {}
- bool wasExtant() const { return wasExtant_; }
- void set_status(OpenStatus status) { status_ = status; } // STATUS=
- void set_path(const char *, std::size_t); // FILE=
- void set_position(Position position) { position_ = position; } // POSITION=
- void set_action(Action action) { action_ = action; } // ACTION=
- void set_convert(Convert convert) { convert_ = convert; } // CONVERT=
- void set_access(Access access) { access_ = access; } // ACCESS=
- void set_isUnformatted(bool yes = true) { isUnformatted_ = yes; } // FORM=
-
- void CompleteOperation();
- int EndIoStatement();
+ RT_API_ATTRS bool wasExtant() const { return wasExtant_; }
+ RT_API_ATTRS void set_status(OpenStatus status) {
+ status_ = status;
+ } // STATUS=
+ RT_API_ATTRS void set_path(const char *, std::size_t); // FILE=
+ RT_API_ATTRS void set_position(Position position) {
+ position_ = position;
+ } // POSITION=
+ RT_API_ATTRS void set_action(Action action) { action_ = action; } // ACTION=
+ RT_API_ATTRS void set_convert(Convert convert) {
+ convert_ = convert;
+ } // CONVERT=
+ RT_API_ATTRS void set_access(Access access) { access_ = access; } // ACCESS=
+ RT_API_ATTRS void set_isUnformatted(bool yes = true) {
+ isUnformatted_ = yes;
+ } // FORM=
+
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
private:
bool wasExtant_;
@@ -594,11 +622,11 @@ private:
class CloseStatementState : public ExternalIoStatementBase {
public:
- CloseStatementState(ExternalFileUnit &unit, const char *sourceFile = nullptr,
- int sourceLine = 0)
+ RT_API_ATTRS CloseStatementState(ExternalFileUnit &unit,
+ const char *sourceFile = nullptr, int sourceLine = 0)
: ExternalIoStatementBase{unit, sourceFile, sourceLine} {}
- void set_status(CloseStatus status) { status_ = status; }
- int EndIoStatement();
+ RT_API_ATTRS void set_status(CloseStatus status) { status_ = status; }
+ RT_API_ATTRS int EndIoStatement();
private:
CloseStatus status_{CloseStatus::Keep};
@@ -608,16 +636,18 @@ private:
// and recoverable BACKSPACE(bad unit)
class NoUnitIoStatementState : public IoStatementBase {
public:
- IoStatementState &ioStatementState() { return ioStatementState_; }
- MutableModes &mutableModes() { return connection_.modes; }
- ConnectionState &GetConnectionState() { return connection_; }
- int badUnitNumber() const { return badUnitNumber_; }
- void CompleteOperation();
- int EndIoStatement();
+ RT_API_ATTRS IoStatementState &ioStatementState() {
+ return ioStatementState_;
+ }
+ RT_API_ATTRS MutableModes &mutableModes() { return connection_.modes; }
+ RT_API_ATTRS ConnectionState &GetConnectionState() { return connection_; }
+ RT_API_ATTRS int badUnitNumber() const { return badUnitNumber_; }
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
protected:
template <typename A>
- NoUnitIoStatementState(A &stmt, const char *sourceFile = nullptr,
+ RT_API_ATTRS NoUnitIoStatementState(A &stmt, const char *sourceFile = nullptr,
int sourceLine = 0, int badUnitNumber = -1)
: IoStatementBase{sourceFile, sourceLine}, ioStatementState_{stmt},
badUnitNumber_{badUnitNumber} {}
@@ -630,10 +660,10 @@ private:
class NoopStatementState : public NoUnitIoStatementState {
public:
- NoopStatementState(
+ RT_API_ATTRS NoopStatementState(
const char *sourceFile = nullptr, int sourceLine = 0, int unitNumber = -1)
: NoUnitIoStatementState{*this, sourceFile, sourceLine, unitNumber} {}
- void set_status(CloseStatus) {} // discards
+ RT_API_ATTRS void set_status(CloseStatus) {} // discards
};
extern template class InternalIoStatementState<Direction::Output>;
@@ -674,32 +704,32 @@ extern template class FormatControl<
class InquireUnitState : public ExternalIoStatementBase {
public:
- InquireUnitState(ExternalFileUnit &unit, const char *sourceFile = nullptr,
- int sourceLine = 0);
- bool Inquire(InquiryKeywordHash, char *, std::size_t);
- bool Inquire(InquiryKeywordHash, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t &);
+ RT_API_ATTRS InquireUnitState(ExternalFileUnit &unit,
+ const char *sourceFile = nullptr, int sourceLine = 0);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, char *, std::size_t);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t &);
};
class InquireNoUnitState : public NoUnitIoStatementState {
public:
- InquireNoUnitState(const char *sourceFile = nullptr, int sourceLine = 0,
- int badUnitNumber = -1);
- bool Inquire(InquiryKeywordHash, char *, std::size_t);
- bool Inquire(InquiryKeywordHash, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t &);
+ RT_API_ATTRS InquireNoUnitState(const char *sourceFile = nullptr,
+ int sourceLine = 0, int badUnitNumber = -1);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, char *, std::size_t);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t &);
};
class InquireUnconnectedFileState : public NoUnitIoStatementState {
public:
- InquireUnconnectedFileState(OwningPtr<char> &&path,
+ RT_API_ATTRS InquireUnconnectedFileState(OwningPtr<char> &&path,
const char *sourceFile = nullptr, int sourceLine = 0);
- bool Inquire(InquiryKeywordHash, char *, std::size_t);
- bool Inquire(InquiryKeywordHash, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
- bool Inquire(InquiryKeywordHash, std::int64_t &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, char *, std::size_t);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t, bool &);
+ RT_API_ATTRS bool Inquire(InquiryKeywordHash, std::int64_t &);
private:
OwningPtr<char> path_; // trimmed and NUL terminated
@@ -708,9 +738,11 @@ private:
class InquireIOLengthState : public NoUnitIoStatementState,
public OutputStatementState {
public:
- InquireIOLengthState(const char *sourceFile = nullptr, int sourceLine = 0);
- std::size_t bytes() const { return bytes_; }
- bool Emit(const char *, std::size_t bytes, std::size_t elementBytes = 0);
+ RT_API_ATTRS InquireIOLengthState(
+ const char *sourceFile = nullptr, int sourceLine = 0);
+ RT_API_ATTRS std::size_t bytes() const { return bytes_; }
+ RT_API_ATTRS bool Emit(
+ const char *, std::size_t bytes, std::size_t elementBytes = 0);
private:
std::size_t bytes_{0};
@@ -719,11 +751,11 @@ private:
class ExternalMiscIoStatementState : public ExternalIoStatementBase {
public:
enum Which { Flush, Backspace, Endfile, Rewind, Wait };
- ExternalMiscIoStatementState(ExternalFileUnit &unit, Which which,
+ RT_API_ATTRS ExternalMiscIoStatementState(ExternalFileUnit &unit, Which which,
const char *sourceFile = nullptr, int sourceLine = 0)
: ExternalIoStatementBase{unit, sourceFile, sourceLine}, which_{which} {}
- void CompleteOperation();
- int EndIoStatement();
+ RT_API_ATTRS void CompleteOperation();
+ RT_API_ATTRS int EndIoStatement();
private:
Which which_;
@@ -731,15 +763,15 @@ private:
class ErroneousIoStatementState : public IoStatementBase {
public:
- explicit ErroneousIoStatementState(Iostat iostat,
+ explicit RT_API_ATTRS ErroneousIoStatementState(Iostat iostat,
ExternalFileUnit *unit = nullptr, const char *sourceFile = nullptr,
int sourceLine = 0)
: IoStatementBase{sourceFile, sourceLine}, unit_{unit} {
SetPendingError(iostat);
}
- int EndIoStatement();
- ConnectionState &GetConnectionState() { return connection_; }
- MutableModes &mutableModes() { return connection_.modes; }
+ RT_API_ATTRS int EndIoStatement();
+ RT_API_ATTRS ConnectionState &GetConnectionState() { return connection_; }
+ RT_API_ATTRS MutableModes &mutableModes() { return connection_.modes; }
private:
ConnectionState connection_;
diff --git a/flang/runtime/iostat.cpp b/flang/runtime/iostat.cpp
index c993b778e9e1..39e224cb0128 100644
--- a/flang/runtime/iostat.cpp
+++ b/flang/runtime/iostat.cpp
@@ -9,6 +9,8 @@
#include "flang/Runtime/iostat.h"
namespace Fortran::runtime::io {
+RT_OFFLOAD_API_GROUP_BEGIN
+
const char *IostatErrorString(int iostat) {
switch (iostat) {
case IostatOk:
@@ -122,4 +124,6 @@ const char *IostatErrorString(int iostat) {
}
}
+RT_OFFLOAD_API_GROUP_END
+
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/lock.h b/flang/runtime/lock.h
index 5fdcf4745c21..9f27a8295c46 100644
--- a/flang/runtime/lock.h
+++ b/flang/runtime/lock.h
@@ -12,6 +12,7 @@
#define FORTRAN_RUNTIME_LOCK_H_
#include "terminator.h"
+#include "tools.h"
// Avoid <mutex> if possible to avoid introduction of C++ runtime
// library dependence.
@@ -35,7 +36,17 @@ namespace Fortran::runtime {
class Lock {
public:
-#if USE_PTHREADS
+#if RT_USE_PSEUDO_LOCK
+ // No lock implementation, e.g. for using together
+ // with RT_USE_PSEUDO_FILE_UNIT.
+ // The users of Lock class may use it under
+ // USE_PTHREADS and otherwise, so it has to provide
+ // all the interfaces.
+ RT_API_ATTRS void Take() {}
+ RT_API_ATTRS bool Try() { return true; }
+ RT_API_ATTRS void Drop() {}
+ RT_API_ATTRS bool TakeIfNoDeadlock() { return true; }
+#elif USE_PTHREADS
Lock() { pthread_mutex_init(&mutex_, nullptr); }
~Lock() { pthread_mutex_destroy(&mutex_); }
void Take() {
@@ -79,7 +90,9 @@ public:
}
private:
-#if USE_PTHREADS
+#if RT_USE_PSEUDO_FILE_UNIT
+ // No state.
+#elif USE_PTHREADS
pthread_mutex_t mutex_{};
volatile bool isBusy_{false};
volatile pthread_t holder_;
@@ -92,8 +105,10 @@ private:
class CriticalSection {
public:
- explicit CriticalSection(Lock &lock) : lock_{lock} { lock_.Take(); }
- ~CriticalSection() { lock_.Drop(); }
+ explicit RT_API_ATTRS CriticalSection(Lock &lock) : lock_{lock} {
+ lock_.Take();
+ }
+ RT_API_ATTRS ~CriticalSection() { lock_.Drop(); }
private:
Lock &lock_;
diff --git a/flang/runtime/memory.cpp b/flang/runtime/memory.cpp
index aa6ff9723d1a..de6c4c72fdac 100644
--- a/flang/runtime/memory.cpp
+++ b/flang/runtime/memory.cpp
@@ -7,15 +7,15 @@
//===----------------------------------------------------------------------===//
#include "flang/Runtime/memory.h"
+#include "freestanding-tools.h"
#include "terminator.h"
#include "tools.h"
#include <cstdlib>
namespace Fortran::runtime {
-RT_OFFLOAD_VAR_GROUP_BEGIN
+RT_OFFLOAD_API_GROUP_BEGIN
-RT_API_ATTRS void *AllocateMemoryOrCrash(
- const Terminator &terminator, std::size_t bytes) {
+void *AllocateMemoryOrCrash(const Terminator &terminator, std::size_t bytes) {
if (void *p{std::malloc(bytes)}) {
return p;
}
@@ -27,7 +27,7 @@ RT_API_ATTRS void *AllocateMemoryOrCrash(
return nullptr;
}
-RT_API_ATTRS void *ReallocateMemoryOrCrash(
+void *ReallocateMemoryOrCrash(
const Terminator &terminator, void *ptr, std::size_t newByteSize) {
if (void *p{Fortran::runtime::realloc(ptr, newByteSize)}) {
return p;
@@ -40,7 +40,7 @@ RT_API_ATTRS void *ReallocateMemoryOrCrash(
return nullptr;
}
-RT_API_ATTRS void FreeMemory(void *p) { std::free(p); }
+void FreeMemory(void *p) { std::free(p); }
-RT_OFFLOAD_VAR_GROUP_END
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime
diff --git a/flang/runtime/namelist.cpp b/flang/runtime/namelist.cpp
index ac9234f4af83..b502d41a8d5c 100644
--- a/flang/runtime/namelist.cpp
+++ b/flang/runtime/namelist.cpp
@@ -31,9 +31,12 @@ bool IONAME(OutputNamelist)(Cookie cookie, const NamelistGroup &group) {
io.CheckFormattedStmtType<Direction::Output>("OutputNamelist");
io.mutableModes().inNamelist = true;
ConnectionState &connection{io.GetConnectionState()};
+ // The following lambda definition violates the conding style,
+ // but cuda-11.8 nvcc hits an internal error with the brace initialization.
+
// Internal function to advance records and convert case
- const auto EmitUpperCase{[&](const char *prefix, std::size_t prefixLen,
- const char *str, char suffix) -> bool {
+ const auto EmitUpperCase = [&](const char *prefix, std::size_t prefixLen,
+ const char *str, char suffix) -> bool {
if ((connection.NeedAdvance(prefixLen) &&
!(io.AdvanceRecord() && EmitAscii(io, " ", 1))) ||
!EmitAscii(io, prefix, prefixLen) ||
@@ -49,7 +52,7 @@ bool IONAME(OutputNamelist)(Cookie cookie, const NamelistGroup &group) {
}
}
return suffix == ' ' || EmitAscii(io, &suffix, 1);
- }};
+ };
// &GROUP
if (!EmitUpperCase(" &", 2, group.groupName, ' ')) {
return false;
@@ -294,7 +297,7 @@ static bool HandleSubstring(
ch = io.GetNextNonBlank(byteCount);
}
}
- if (ch && ch == ':') {
+ if (ch && *ch == ':') {
io.HandleRelativePosition(byteCount);
ch = io.GetNextNonBlank(byteCount);
if (ch) {
@@ -587,6 +590,8 @@ bool IONAME(InputNamelist)(Cookie cookie, const NamelistGroup &group) {
return true;
}
+RT_OFFLOAD_API_GROUP_BEGIN
+
bool IsNamelistNameOrSlash(IoStatementState &io) {
if (auto *listInput{
io.get_if<ListDirectedStatementState<Direction::Input>>()}) {
@@ -611,4 +616,6 @@ bool IsNamelistNameOrSlash(IoStatementState &io) {
return false;
}
+RT_OFFLOAD_API_GROUP_END
+
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/namelist.h b/flang/runtime/namelist.h
index 9a5da33a907e..25216a75e936 100644
--- a/flang/runtime/namelist.h
+++ b/flang/runtime/namelist.h
@@ -12,6 +12,7 @@
#define FORTRAN_RUNTIME_NAMELIST_H_
#include "non-tbp-dio.h"
+#include "flang/Common/api-attrs.h"
#include <cstddef>
@@ -47,7 +48,7 @@ public:
// character; for use in disambiguating a name-like value (e.g. F or T) from a
// NAMELIST group item name and for coping with short arrays. Always false
// when not reading a NAMELIST.
-bool IsNamelistNameOrSlash(IoStatementState &);
+RT_API_ATTRS bool IsNamelistNameOrSlash(IoStatementState &);
} // namespace Fortran::runtime::io
#endif // FORTRAN_RUNTIME_NAMELIST_H_
diff --git a/flang/runtime/non-tbp-dio.h b/flang/runtime/non-tbp-dio.h
index a2030dbfdfe8..05038a264ed9 100644
--- a/flang/runtime/non-tbp-dio.h
+++ b/flang/runtime/non-tbp-dio.h
@@ -39,7 +39,7 @@ struct NonTbpDefinedIo {
};
struct NonTbpDefinedIoTable {
- const NonTbpDefinedIo *Find(
+ RT_API_ATTRS const NonTbpDefinedIo *Find(
const typeInfo::DerivedType &, common::DefinedIo) const;
std::size_t items{0};
const NonTbpDefinedIo *item{nullptr};
diff --git a/flang/runtime/numeric-templates.h b/flang/runtime/numeric-templates.h
index 8ea3daaa57bc..af552f9ddfc0 100644
--- a/flang/runtime/numeric-templates.h
+++ b/flang/runtime/numeric-templates.h
@@ -20,8 +20,8 @@
#include "terminator.h"
#include "tools.h"
+#include "flang/Common/api-attrs.h"
#include "flang/Common/float128.h"
-#include "flang/Runtime/api-attrs.h"
#include <cstdint>
#include <limits>
@@ -193,11 +193,6 @@ inline RT_API_ATTRS RESULT Exponent(ARG x) {
}
}
-// Suppress the warnings about calling __host__-only std::frexp,
-// defined in C++ STD header files, from __device__ code.
-RT_DIAG_PUSH
-RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
-
// FRACTION (16.9.80)
template <typename T> inline RT_API_ATTRS T Fraction(T x) {
if (ISNANTy<T>::compute(x)) {
@@ -212,8 +207,6 @@ template <typename T> inline RT_API_ATTRS T Fraction(T x) {
}
}
-RT_DIAG_POP
-
// SET_EXPONENT (16.9.171)
template <typename T> inline RT_API_ATTRS T SetExponent(T x, std::int64_t p) {
if (ISNANTy<T>::compute(x)) {
diff --git a/flang/runtime/pointer.cpp b/flang/runtime/pointer.cpp
index b01735dc30e6..08a1223764f3 100644
--- a/flang/runtime/pointer.cpp
+++ b/flang/runtime/pointer.cpp
@@ -185,7 +185,6 @@ int RTDEF(PointerDeallocate)(Descriptor &pointer, bool hasStat,
if (!pointer.IsAllocated()) {
return ReturnError(terminator, StatBaseNull, errMsg, hasStat);
}
-#if !defined(RT_DEVICE_COMPILATION)
if (executionEnvironment.checkPointerDeallocation) {
// Validate the footer. This should fail if the pointer doesn't
// span the entire object, or the object was not allocated as a
@@ -201,7 +200,6 @@ int RTDEF(PointerDeallocate)(Descriptor &pointer, bool hasStat,
terminator, StatBadPointerDeallocation, errMsg, hasStat);
}
}
-#endif
return ReturnError(terminator,
pointer.Destroy(/*finalize=*/true, /*destroyPointers=*/true, &terminator),
errMsg, hasStat);
diff --git a/flang/runtime/pseudo-unit.cpp b/flang/runtime/pseudo-unit.cpp
new file mode 100644
index 000000000000..a57e3a59efa5
--- /dev/null
+++ b/flang/runtime/pseudo-unit.cpp
@@ -0,0 +1,169 @@
+//===-- runtime/pseudo-unit.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implemenation of ExternalFileUnit and PseudoOpenFile for
+// RT_USE_PSEUDO_FILE_UNIT=1.
+//
+//===----------------------------------------------------------------------===//
+
+#include "io-error.h"
+#include "tools.h"
+#include "unit.h"
+
+// NOTE: the header files above may define OpenMP declare target
+// variables, so they have to be included unconditionally
+// so that the offload entries are consistent between host and device.
+#if defined(RT_USE_PSEUDO_FILE_UNIT)
+#include <cstdio>
+
+namespace Fortran::runtime::io {
+
+void FlushOutputOnCrash(const Terminator &) {}
+
+ExternalFileUnit *ExternalFileUnit::LookUp(int) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpOrCreate(
+ int, const Terminator &, bool &) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpOrCreateAnonymous(int unit,
+ Direction direction, Fortran::common::optional<bool>,
+ const Terminator &terminator) {
+ if (direction != Direction::Output) {
+ terminator.Crash("ExternalFileUnit only supports output IO");
+ }
+ return New<ExternalFileUnit>{terminator}(unit).release();
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUp(const char *, std::size_t) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+ExternalFileUnit &ExternalFileUnit::CreateNew(int, const Terminator &) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+ExternalFileUnit *ExternalFileUnit::LookUpForClose(int) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+ExternalFileUnit &ExternalFileUnit::NewUnit(const Terminator &, bool) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+bool ExternalFileUnit::OpenUnit(Fortran::common::optional<OpenStatus> status,
+ Fortran::common::optional<Action>, Position, OwningPtr<char> &&,
+ std::size_t, Convert, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void ExternalFileUnit::OpenAnonymousUnit(Fortran::common::optional<OpenStatus>,
+ Fortran::common::optional<Action>, Position, Convert convert,
+ IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void ExternalFileUnit::CloseUnit(CloseStatus, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void ExternalFileUnit::DestroyClosed() {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+Iostat ExternalFileUnit::SetDirection(Direction direction) {
+ if (direction != Direction::Output) {
+ return IostatReadFromWriteOnly;
+ }
+ direction_ = direction;
+ return IostatOk;
+}
+
+void ExternalFileUnit::CloseAll(IoErrorHandler &) {}
+
+void ExternalFileUnit::FlushAll(IoErrorHandler &) {}
+
+int ExternalFileUnit::GetAsynchronousId(IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+bool ExternalFileUnit::Wait(int) {
+ Terminator{__FILE__, __LINE__}.Crash("unsupported");
+}
+
+void PseudoOpenFile::set_mayAsynchronous(bool yes) {
+ if (yes) {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+ }
+}
+
+Fortran::common::optional<PseudoOpenFile::FileOffset>
+PseudoOpenFile::knownSize() const {
+ Terminator{__FILE__, __LINE__}.Crash("unsupported");
+}
+
+void PseudoOpenFile::Open(OpenStatus, Fortran::common::optional<Action>,
+ Position, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void PseudoOpenFile::Close(CloseStatus, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+std::size_t PseudoOpenFile::Read(
+ FileOffset, char *, std::size_t, std::size_t, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+std::size_t PseudoOpenFile::Write(FileOffset at, const char *buffer,
+ std::size_t bytes, IoErrorHandler &handler) {
+ if (at) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+ }
+ // TODO: use persistent string buffer that can be reallocated
+ // as needed, and only freed at destruction of *this.
+ auto string{SizedNew<char>{handler}(bytes + 1)};
+ std::memcpy(string.get(), buffer, bytes);
+ string.get()[bytes] = '\0';
+ std::printf("%s", string.get());
+ return bytes;
+}
+
+void PseudoOpenFile::Truncate(FileOffset, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+int PseudoOpenFile::ReadAsynchronously(
+ FileOffset, char *, std::size_t, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+int PseudoOpenFile::WriteAsynchronously(
+ FileOffset, const char *, std::size_t, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void PseudoOpenFile::Wait(int, IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+void PseudoOpenFile::WaitAll(IoErrorHandler &handler) {
+ handler.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+Position PseudoOpenFile::InquirePosition() const {
+ Terminator{__FILE__, __LINE__}.Crash("%s: unsupported", RT_PRETTY_FUNCTION);
+}
+
+} // namespace Fortran::runtime::io
+
+#endif // defined(RT_USE_PSEUDO_FILE_UNIT)
diff --git a/flang/runtime/reduce.cpp b/flang/runtime/reduce.cpp
new file mode 100644
index 000000000000..f8a5221a1ebf
--- /dev/null
+++ b/flang/runtime/reduce.cpp
@@ -0,0 +1,526 @@
+//===-- runtime/reduce.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REDUCE() implementation
+
+#include "flang/Runtime/reduce.h"
+#include "reduction-templates.h"
+#include "terminator.h"
+#include "tools.h"
+#include "flang/Runtime/descriptor.h"
+
+namespace Fortran::runtime {
+
+template <typename T> class ReduceAccumulator {
+public:
+ RT_API_ATTRS ReduceAccumulator(const Descriptor &array,
+ ReductionOperation<T> operation, const T *identity,
+ Terminator &terminator)
+ : array_{array}, operation_{operation}, identity_{identity},
+ terminator_{terminator} {}
+ RT_API_ATTRS void Reinitialize() { result_.reset(); }
+ template <typename A>
+ RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
+ const auto *operand{array_.Element<A>(at)};
+ if (result_) {
+ result_ = operation_(&*result_, operand);
+ } else {
+ result_ = *operand;
+ }
+ return true;
+ }
+ template <typename A>
+ RT_API_ATTRS void GetResult(A *to, int /*zeroBasedDim*/ = -1) {
+ if (result_) {
+ *to = *result_;
+ } else if (identity_) {
+ *to = *identity_;
+ } else {
+ terminator_.Crash("REDUCE() without IDENTITY= has no result");
+ }
+ }
+
+private:
+ const Descriptor &array_;
+ common::optional<T> result_;
+ ReductionOperation<T> operation_;
+ const T *identity_{nullptr};
+ Terminator &terminator_;
+};
+
+template <typename T, typename OP, bool hasLength>
+class BufferedReduceAccumulator {
+public:
+ RT_API_ATTRS BufferedReduceAccumulator(const Descriptor &array, OP operation,
+ const T *identity, Terminator &terminator)
+ : array_{array}, operation_{operation}, identity_{identity},
+ terminator_{terminator} {}
+ RT_API_ATTRS void Reinitialize() { activeTemp_ = -1; }
+ template <typename A>
+ RT_API_ATTRS bool AccumulateAt(const SubscriptValue at[]) {
+ const auto *operand{array_.Element<A>(at)};
+ if (activeTemp_ >= 0) {
+ if constexpr (hasLength) {
+ operation_(&*temp_[1 - activeTemp_], length_, &*temp_[activeTemp_],
+ operand, length_, length_);
+ } else {
+ operation_(&*temp_[1 - activeTemp_], &*temp_[activeTemp_], operand);
+ }
+ activeTemp_ = 1 - activeTemp_;
+ } else {
+ activeTemp_ = 0;
+ std::memcpy(&*temp_[activeTemp_], operand, elementBytes_);
+ }
+ return true;
+ }
+ template <typename A>
+ RT_API_ATTRS void GetResult(A *to, int /*zeroBasedDim*/ = -1) {
+ if (activeTemp_ >= 0) {
+ std::memcpy(to, &*temp_[activeTemp_], elementBytes_);
+ } else if (identity_) {
+ std::memcpy(to, identity_, elementBytes_);
+ } else {
+ terminator_.Crash("REDUCE() without IDENTITY= has no result");
+ }
+ }
+
+private:
+ const Descriptor &array_;
+ OP operation_;
+ const T *identity_{nullptr};
+ Terminator &terminator_;
+ std::size_t elementBytes_{array_.ElementBytes()};
+ OwningPtr<T> temp_[2]{SizedNew<T>{terminator_}(elementBytes_),
+ SizedNew<T>{terminator_}(elementBytes_)};
+ int activeTemp_{-1};
+ std::size_t length_{elementBytes_ / sizeof(T)};
+};
+
+extern "C" {
+RT_EXT_API_GROUP_BEGIN
+
+std::int8_t RTDEF(ReduceInteger1)(const Descriptor &array,
+ ReductionOperation<std::int8_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int8_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Integer, 1>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::int8_t>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceInteger1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int8_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int8_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::int8_t>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Integer, 1>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+std::int16_t RTDEF(ReduceInteger2)(const Descriptor &array,
+ ReductionOperation<std::int16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int16_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Integer, 2>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::int16_t>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceInteger2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int16_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::int16_t>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Integer, 2>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+std::int32_t RTDEF(ReduceInteger4)(const Descriptor &array,
+ ReductionOperation<std::int32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int32_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Integer, 4>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::int32_t>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceInteger4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int32_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::int32_t>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Integer, 4>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+std::int64_t RTDEF(ReduceInteger8)(const Descriptor &array,
+ ReductionOperation<std::int64_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int64_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Integer, 8>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::int64_t>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceInteger8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int64_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int64_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::int64_t>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Integer, 8>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#ifdef __SIZEOF_INT128__
+common::int128_t RTDEF(ReduceInteger16)(const Descriptor &array,
+ ReductionOperation<common::int128_t> operation, const char *source,
+ int line, int dim, const Descriptor *mask, const common::int128_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Integer, 16>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<common::int128_t>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceInteger16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<common::int128_t> operation, const char *source,
+ int line, int dim, const Descriptor *mask, const common::int128_t *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<common::int128_t>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Integer, 16>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#endif
+
+// TODO: real/complex(2 & 3)
+float RTDEF(ReduceReal4)(const Descriptor &array,
+ ReductionOperation<float> operation, const char *source, int line, int dim,
+ const Descriptor *mask, const float *identity, bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Real, 4>(array, source, line, dim,
+ mask, ReduceAccumulator<float>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceReal4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<float> operation, const char *source, int line, int dim,
+ const Descriptor *mask, const float *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<float>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Real, 4>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+double RTDEF(ReduceReal8)(const Descriptor &array,
+ ReductionOperation<double> operation, const char *source, int line, int dim,
+ const Descriptor *mask, const double *identity, bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Real, 8>(array, source, line, dim,
+ mask, ReduceAccumulator<double>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceReal8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<double> operation, const char *source, int line, int dim,
+ const Descriptor *mask, const double *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<double>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Real, 8>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#if LDBL_MANT_DIG == 64
+long double RTDEF(ReduceReal10)(const Descriptor &array,
+ ReductionOperation<long double> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const long double *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Real, 10>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<long double>{array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceReal10Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<long double> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const long double *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<long double>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Real, 10>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+CppFloat128Type RTDEF(ReduceReal16)(const Descriptor &array,
+ ReductionOperation<CppFloat128Type> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const CppFloat128Type *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ return GetTotalReduction<TypeCategory::Real, 16>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<CppFloat128Type>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(ReduceReal16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<CppFloat128Type> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const CppFloat128Type *identity,
+ bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<CppFloat128Type>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Real, 16>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#endif
+
+void RTDEF(CppReduceComplex4)(std::complex<float> &result,
+ const Descriptor &array, ReductionOperation<std::complex<float>> operation,
+ const char *source, int line, int dim, const Descriptor *mask,
+ const std::complex<float> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ result = GetTotalReduction<TypeCategory::Complex, 4>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::complex<float>>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(CppReduceComplex4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<float>> operation, const char *source,
+ int line, int dim, const Descriptor *mask,
+ const std::complex<float> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::complex<float>>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Complex, 4>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+void RTDEF(CppReduceComplex8)(std::complex<double> &result,
+ const Descriptor &array, ReductionOperation<std::complex<double>> operation,
+ const char *source, int line, int dim, const Descriptor *mask,
+ const std::complex<double> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ result = GetTotalReduction<TypeCategory::Complex, 8>(array, source, line, dim,
+ mask,
+ ReduceAccumulator<std::complex<double>>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(CppReduceComplex8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<double>> operation, const char *source,
+ int line, int dim, const Descriptor *mask,
+ const std::complex<double> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::complex<double>>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Complex, 8>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#if LDBL_MANT_DIG == 64
+void RTDEF(CppReduceComplex10)(std::complex<long double> &result,
+ const Descriptor &array,
+ ReductionOperation<std::complex<long double>> operation, const char *source,
+ int line, int dim, const Descriptor *mask,
+ const std::complex<long double> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ result = GetTotalReduction<TypeCategory::Complex, 10>(array, source, line,
+ dim, mask,
+ ReduceAccumulator<std::complex<long double>>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(CppReduceComplex10Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<long double>> operation, const char *source,
+ int line, int dim, const Descriptor *mask,
+ const std::complex<long double> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::complex<long double>>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Complex, 10>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#endif
+#if LDBL_MANT_DIG == 113 || HAS_FLOAT128
+void RTDEF(CppReduceComplex16)(std::complex<CppFloat128Type> &result,
+ const Descriptor &array,
+ ReductionOperation<std::complex<CppFloat128Type>> operation,
+ const char *source, int line, int dim, const Descriptor *mask,
+ const std::complex<CppFloat128Type> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ result = GetTotalReduction<TypeCategory::Complex, 16>(array, source, line,
+ dim, mask,
+ ReduceAccumulator<std::complex<CppFloat128Type>>{
+ array, operation, identity, terminator},
+ "REDUCE");
+}
+void RTDEF(CppReduceComplex16Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::complex<CppFloat128Type>> operation,
+ const char *source, int line, int dim, const Descriptor *mask,
+ const std::complex<CppFloat128Type> *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = ReduceAccumulator<std::complex<CppFloat128Type>>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Complex, 16>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+#endif
+
+bool RTDEF(ReduceLogical1)(const Descriptor &array,
+ ReductionOperation<std::int8_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int8_t *identity,
+ bool ordered) {
+ return RTNAME(ReduceInteger1)(
+ array, operation, source, line, dim, mask, identity, ordered) != 0;
+}
+void RTDEF(ReduceLogical1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int8_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int8_t *identity,
+ bool ordered) {
+ RTNAME(ReduceInteger1Dim)
+ (result, array, operation, source, line, dim, mask, identity, ordered);
+}
+bool RTDEF(ReduceLogical2)(const Descriptor &array,
+ ReductionOperation<std::int16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int16_t *identity,
+ bool ordered) {
+ return RTNAME(ReduceInteger2)(
+ array, operation, source, line, dim, mask, identity, ordered) != 0;
+}
+void RTDEF(ReduceLogical2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int16_t *identity,
+ bool ordered) {
+ RTNAME(ReduceInteger2Dim)
+ (result, array, operation, source, line, dim, mask, identity, ordered);
+}
+bool RTDEF(ReduceLogical4)(const Descriptor &array,
+ ReductionOperation<std::int32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int32_t *identity,
+ bool ordered) {
+ return RTNAME(ReduceInteger4)(
+ array, operation, source, line, dim, mask, identity, ordered) != 0;
+}
+void RTDEF(ReduceLogical4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int32_t *identity,
+ bool ordered) {
+ RTNAME(ReduceInteger4Dim)
+ (result, array, operation, source, line, dim, mask, identity, ordered);
+}
+bool RTDEF(ReduceLogical8)(const Descriptor &array,
+ ReductionOperation<std::int64_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int64_t *identity,
+ bool ordered) {
+ return RTNAME(ReduceInteger8)(
+ array, operation, source, line, dim, mask, identity, ordered) != 0;
+}
+void RTDEF(ReduceLogical8Dim)(Descriptor &result, const Descriptor &array,
+ ReductionOperation<std::int64_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const std::int64_t *identity,
+ bool ordered) {
+ RTNAME(ReduceInteger8Dim)
+ (result, array, operation, source, line, dim, mask, identity, ordered);
+}
+
+void RTDEF(ReduceChar1)(char *result, const Descriptor &array,
+ ReductionCharOperation<char> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char *identity, bool ordered) {
+ Terminator terminator{source, line};
+ BufferedReduceAccumulator<char, ReductionCharOperation<char>,
+ /*hasLength=*/true>
+ accumulator{array, operation, identity, terminator};
+ DoTotalReduction<char>(array, dim, mask, accumulator, "REDUCE", terminator);
+ accumulator.GetResult(result);
+}
+void RTDEF(ReduceCharacter1Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = BufferedReduceAccumulator<char,
+ ReductionCharOperation<char>, /*hasLength=*/true>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Character, 1>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+void RTDEF(ReduceChar2)(char16_t *result, const Descriptor &array,
+ ReductionCharOperation<char16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char16_t *identity, bool ordered) {
+ Terminator terminator{source, line};
+ BufferedReduceAccumulator<char16_t, ReductionCharOperation<char16_t>,
+ /*hasLength=*/true>
+ accumulator{array, operation, identity, terminator};
+ DoTotalReduction<char16_t>(
+ array, dim, mask, accumulator, "REDUCE", terminator);
+ accumulator.GetResult(result);
+}
+void RTDEF(ReduceCharacter2Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char16_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char16_t *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = BufferedReduceAccumulator<char16_t,
+ ReductionCharOperation<char16_t>, /*hasLength=*/true>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Character, 2>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+void RTDEF(ReduceChar4)(char32_t *result, const Descriptor &array,
+ ReductionCharOperation<char32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char32_t *identity, bool ordered) {
+ Terminator terminator{source, line};
+ BufferedReduceAccumulator<char32_t, ReductionCharOperation<char32_t>,
+ /*hasLength=*/true>
+ accumulator{array, operation, identity, terminator};
+ DoTotalReduction<char32_t>(
+ array, dim, mask, accumulator, "REDUCE", terminator);
+ accumulator.GetResult(result);
+}
+void RTDEF(ReduceCharacter4Dim)(Descriptor &result, const Descriptor &array,
+ ReductionCharOperation<char32_t> operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char32_t *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = BufferedReduceAccumulator<char32_t,
+ ReductionCharOperation<char32_t>, /*hasLength=*/true>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Character, 4>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+
+void RTDEF(ReduceDerivedType)(char *result, const Descriptor &array,
+ ReductionDerivedTypeOperation operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char *identity, bool ordered) {
+ Terminator terminator{source, line};
+ BufferedReduceAccumulator<char, ReductionDerivedTypeOperation,
+ /*hasLength=*/false>
+ accumulator{array, operation, identity, terminator};
+ DoTotalReduction<char>(array, dim, mask, accumulator, "REDUCE", terminator);
+ accumulator.GetResult(result);
+}
+void RTDEF(ReduceDerivedTypeDim)(Descriptor &result, const Descriptor &array,
+ ReductionDerivedTypeOperation operation, const char *source, int line,
+ int dim, const Descriptor *mask, const char *identity, bool ordered) {
+ Terminator terminator{source, line};
+ using Accumulator = BufferedReduceAccumulator<char,
+ ReductionDerivedTypeOperation, /*hasLength=*/false>;
+ Accumulator accumulator{array, operation, identity, terminator};
+ PartialReduction<Accumulator, TypeCategory::Derived, 0>(result, array,
+ array.ElementBytes(), dim, mask, terminator, "REDUCE", accumulator);
+}
+
+RT_EXT_API_GROUP_END
+} // extern "C"
+} // namespace Fortran::runtime
diff --git a/flang/runtime/reduction-templates.h b/flang/runtime/reduction-templates.h
index 5b793deb2a12..f8e6f6095509 100644
--- a/flang/runtime/reduction-templates.h
+++ b/flang/runtime/reduction-templates.h
@@ -53,9 +53,9 @@ inline RT_API_ATTRS void DoTotalReduction(const Descriptor &x, int dim,
x.GetLowerBounds(xAt);
if (mask) {
CheckConformability(x, *mask, terminator, intrinsic, "ARRAY", "MASK");
- SubscriptValue maskAt[maxRank];
- mask->GetLowerBounds(maskAt);
if (mask->rank() > 0) {
+ SubscriptValue maskAt[maxRank];
+ mask->GetLowerBounds(maskAt);
for (auto elements{x.Elements()}; elements--;
x.IncrementSubscripts(xAt), mask->IncrementSubscripts(maskAt)) {
if (IsLogicalElementTrue(*mask, maskAt)) {
@@ -65,7 +65,7 @@ inline RT_API_ATTRS void DoTotalReduction(const Descriptor &x, int dim,
}
}
return;
- } else if (!IsLogicalElementTrue(*mask, maskAt)) {
+ } else if (!IsLogicalScalarTrue(*mask)) {
// scalar MASK=.FALSE.: return identity value
return;
}
@@ -86,13 +86,22 @@ inline RT_API_ATTRS CppTypeFor<CAT, KIND> GetTotalReduction(const Descriptor &x,
RUNTIME_CHECK(terminator, TypeCode(CAT, KIND) == x.type());
using CppType = CppTypeFor<CAT, KIND>;
DoTotalReduction<CppType>(x, dim, mask, accumulator, intrinsic, terminator);
- CppType result;
+ if constexpr (std::is_void_v<CppType>) {
+ // Result is returned from accumulator, as in REDUCE() for derived type
#ifdef _MSC_VER // work around MSVC spurious error
- accumulator.GetResult(&result);
+ accumulator.GetResult();
#else
- accumulator.template GetResult(&result);
+ accumulator.template GetResult();
#endif
- return result;
+ } else {
+ CppType result;
+#ifdef _MSC_VER // work around MSVC spurious error
+ accumulator.GetResult(&result);
+#else
+ accumulator.template GetResult(&result);
+#endif
+ return result;
+ }
}
// For reductions on a dimension, e.g. SUM(array,DIM=2) where the shape
@@ -164,35 +173,6 @@ inline RT_API_ATTRS void ReduceDimMaskToScalar(const Descriptor &x,
#endif
}
-// Utility: establishes & allocates the result array for a partial
-// reduction (i.e., one with DIM=).
-static RT_API_ATTRS void CreatePartialReductionResult(Descriptor &result,
- const Descriptor &x, std::size_t resultElementSize, int dim,
- Terminator &terminator, const char *intrinsic, TypeCode typeCode) {
- int xRank{x.rank()};
- if (dim < 1 || dim > xRank) {
- terminator.Crash(
- "%s: bad DIM=%d for ARRAY with rank %d", intrinsic, dim, xRank);
- }
- int zeroBasedDim{dim - 1};
- SubscriptValue resultExtent[maxRank];
- for (int j{0}; j < zeroBasedDim; ++j) {
- resultExtent[j] = x.GetDimension(j).Extent();
- }
- for (int j{zeroBasedDim + 1}; j < xRank; ++j) {
- resultExtent[j - 1] = x.GetDimension(j).Extent();
- }
- result.Establish(typeCode, resultElementSize, nullptr, xRank - 1,
- resultExtent, CFI_attribute_allocatable);
- for (int j{0}; j + 1 < xRank; ++j) {
- result.GetDimension(j).SetBounds(1, resultExtent[j]);
- }
- if (int stat{result.Allocate()}) {
- terminator.Crash(
- "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
- }
-}
-
// Partial reductions with DIM=
template <typename ACCUMULATOR, TypeCategory CAT, int KIND>
@@ -208,7 +188,6 @@ inline RT_API_ATTRS void PartialReduction(Descriptor &result,
using CppType = CppTypeFor<CAT, KIND>;
if (mask) {
CheckConformability(x, *mask, terminator, intrinsic, "ARRAY", "MASK");
- SubscriptValue maskAt[maxRank]; // contents unused
if (mask->rank() > 0) {
for (auto n{result.Elements()}; n-- > 0; result.IncrementSubscripts(at)) {
accumulator.Reinitialize();
@@ -216,7 +195,7 @@ inline RT_API_ATTRS void PartialReduction(Descriptor &result,
x, dim - 1, at, *mask, result.Element<CppType>(at), accumulator);
}
return;
- } else if (!IsLogicalElementTrue(*mask, maskAt)) {
+ } else if (!IsLogicalScalarTrue(*mask)) {
// scalar MASK=.FALSE.
accumulator.Reinitialize();
for (auto n{result.Elements()}; n-- > 0; result.IncrementSubscripts(at)) {
diff --git a/flang/runtime/stat.h b/flang/runtime/stat.h
index 55cdac46eb3a..4f46f52ecb29 100644
--- a/flang/runtime/stat.h
+++ b/flang/runtime/stat.h
@@ -11,8 +11,8 @@
#ifndef FORTRAN_RUNTIME_STAT_H_
#define FORTRAN_RUNTIME_STAT_H_
+#include "flang/Common/api-attrs.h"
#include "flang/ISO_Fortran_binding_wrapper.h"
-#include "flang/Runtime/api-attrs.h"
#include "flang/Runtime/magic-numbers.h"
namespace Fortran::runtime {
diff --git a/flang/runtime/terminator.h b/flang/runtime/terminator.h
index 444c68d109ee..59a47ce93e7c 100644
--- a/flang/runtime/terminator.h
+++ b/flang/runtime/terminator.h
@@ -11,7 +11,7 @@
#ifndef FORTRAN_RUNTIME_TERMINATOR_H_
#define FORTRAN_RUNTIME_TERMINATOR_H_
-#include "flang/Runtime/api-attrs.h"
+#include "flang/Common/api-attrs.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
@@ -67,7 +67,7 @@ public:
template <typename... Args>
RT_API_ATTRS void PrintCrashArgs(const char *message, Args... args) const {
-#if RT_DEVICE_COMPILATION
+#if defined(RT_DEVICE_COMPILATION)
std::printf(message, args...);
#else
std::fprintf(stderr, message, args...);
diff --git a/flang/runtime/tools.cpp b/flang/runtime/tools.cpp
index 71022c7a8c17..73d6c2cf7e1d 100644
--- a/flang/runtime/tools.cpp
+++ b/flang/runtime/tools.cpp
@@ -238,5 +238,34 @@ template <int KIND> struct FitsInIntegerKind {
}
};
+// Utility: establishes & allocates the result array for a partial
+// reduction (i.e., one with DIM=).
+RT_API_ATTRS void CreatePartialReductionResult(Descriptor &result,
+ const Descriptor &x, std::size_t resultElementSize, int dim,
+ Terminator &terminator, const char *intrinsic, TypeCode typeCode) {
+ int xRank{x.rank()};
+ if (dim < 1 || dim > xRank) {
+ terminator.Crash(
+ "%s: bad DIM=%d for ARRAY with rank %d", intrinsic, dim, xRank);
+ }
+ int zeroBasedDim{dim - 1};
+ SubscriptValue resultExtent[maxRank];
+ for (int j{0}; j < zeroBasedDim; ++j) {
+ resultExtent[j] = x.GetDimension(j).Extent();
+ }
+ for (int j{zeroBasedDim + 1}; j < xRank; ++j) {
+ resultExtent[j - 1] = x.GetDimension(j).Extent();
+ }
+ result.Establish(typeCode, resultElementSize, nullptr, xRank - 1,
+ resultExtent, CFI_attribute_allocatable);
+ for (int j{0}; j + 1 < xRank; ++j) {
+ result.GetDimension(j).SetBounds(1, resultExtent[j]);
+ }
+ if (int stat{result.Allocate()}) {
+ terminator.Crash(
+ "%s: could not allocate memory for result; STAT=%d", intrinsic, stat);
+ }
+}
+
RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime
diff --git a/flang/runtime/tools.h b/flang/runtime/tools.h
index df25eb888233..5d7d99c08179 100644
--- a/flang/runtime/tools.h
+++ b/flang/runtime/tools.h
@@ -21,6 +21,27 @@
#include <map>
#include <type_traits>
+/// \macro RT_PRETTY_FUNCTION
+/// Gets a user-friendly looking function signature for the current scope
+/// using the best available method on each platform. The exact format of the
+/// resulting string is implementation specific and non-portable, so this should
+/// only be used, for example, for logging or diagnostics.
+/// Copy of LLVM_PRETTY_FUNCTION
+#if defined(_MSC_VER)
+#define RT_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__) || defined(__clang__)
+#define RT_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define RT_PRETTY_FUNCTION __func__
+#endif
+
+#if defined(RT_DEVICE_COMPILATION)
+// Use the pseudo lock and pseudo file unit implementations
+// for the device.
+#define RT_USE_PSEUDO_LOCK 1
+#define RT_USE_PSEUDO_FILE_UNIT 1
+#endif
+
namespace Fortran::runtime {
class Terminator;
@@ -41,7 +62,7 @@ RT_API_ATTRS int IdentifyValue(
RT_API_ATTRS void ToFortranDefaultCharacter(
char *to, std::size_t toLength, const char *from);
-// Utility for dealing with elemental LOGICAL arguments
+// Utilities for dealing with elemental LOGICAL arguments
inline RT_API_ATTRS bool IsLogicalElementTrue(
const Descriptor &logical, const SubscriptValue at[]) {
// A LOGICAL value is false if and only if all of its bytes are zero.
@@ -53,6 +74,16 @@ inline RT_API_ATTRS bool IsLogicalElementTrue(
}
return false;
}
+inline RT_API_ATTRS bool IsLogicalScalarTrue(const Descriptor &logical) {
+ // A LOGICAL value is false if and only if all of its bytes are zero.
+ const char *p{logical.OffsetElement<char>()};
+ for (std::size_t j{logical.ElementBytes()}; j-- > 0; ++p) {
+ if (*p) {
+ return true;
+ }
+ }
+ return false;
+}
// Check array conformability; a scalar 'x' conforms. Crashes on error.
RT_API_ATTRS void CheckConformability(const Descriptor &to, const Descriptor &x,
@@ -490,5 +521,9 @@ RT_API_ATTRS void CopyAndPad(
}
}
+RT_API_ATTRS void CreatePartialReductionResult(Descriptor &result,
+ const Descriptor &x, std::size_t resultElementSize, int dim, Terminator &,
+ const char *intrinsic, TypeCode);
+
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_TOOLS_H_
diff --git a/flang/runtime/unit.cpp b/flang/runtime/unit.cpp
index 82f0e68cc20a..6c648d3bd834 100644
--- a/flang/runtime/unit.cpp
+++ b/flang/runtime/unit.cpp
@@ -5,301 +5,38 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
+//
+// Implementation of ExternalFileUnit common for both
+// RT_USE_PSEUDO_FILE_UNIT=0 and RT_USE_PSEUDO_FILE_UNIT=1.
+//
+//===----------------------------------------------------------------------===//
#include "unit.h"
#include "io-error.h"
#include "lock.h"
#include "tools.h"
-#include "unit-map.h"
-#include "flang/Runtime/magic-numbers.h"
-#include <cstdio>
#include <limits>
#include <utility>
namespace Fortran::runtime::io {
-// The per-unit data structures are created on demand so that Fortran I/O
-// should work without a Fortran main program.
-static Lock unitMapLock;
-static Lock createOpenLock;
-static UnitMap *unitMap{nullptr};
-static ExternalFileUnit *defaultInput{nullptr}; // unit 5
-static ExternalFileUnit *defaultOutput{nullptr}; // unit 6
-static ExternalFileUnit *errorOutput{nullptr}; // unit 0 extension
-
-void FlushOutputOnCrash(const Terminator &terminator) {
- if (!defaultOutput && !errorOutput) {
- return;
- }
- IoErrorHandler handler{terminator};
- handler.HasIoStat(); // prevent nested crash if flush has error
- CriticalSection critical{unitMapLock};
- if (defaultOutput) {
- defaultOutput->FlushOutput(handler);
- }
- if (errorOutput) {
- errorOutput->FlushOutput(handler);
- }
-}
-
-ExternalFileUnit *ExternalFileUnit::LookUp(int unit) {
- return GetUnitMap().LookUp(unit);
-}
-
-ExternalFileUnit *ExternalFileUnit::LookUpOrCreate(
- int unit, const Terminator &terminator, bool &wasExtant) {
- return GetUnitMap().LookUpOrCreate(unit, terminator, wasExtant);
-}
-
-ExternalFileUnit *ExternalFileUnit::LookUpOrCreateAnonymous(int unit,
- Direction dir, Fortran::common::optional<bool> isUnformatted,
- const Terminator &terminator) {
- // Make sure that the returned anonymous unit has been opened
- // not just created in the unitMap.
- CriticalSection critical{createOpenLock};
- bool exists{false};
- ExternalFileUnit *result{
- GetUnitMap().LookUpOrCreate(unit, terminator, exists)};
- if (result && !exists) {
- IoErrorHandler handler{terminator};
- result->OpenAnonymousUnit(
- dir == Direction::Input ? OpenStatus::Unknown : OpenStatus::Replace,
- Action::ReadWrite, Position::Rewind, Convert::Unknown, handler);
- result->isUnformatted = isUnformatted;
- }
- return result;
-}
-
-ExternalFileUnit *ExternalFileUnit::LookUp(
- const char *path, std::size_t pathLen) {
- return GetUnitMap().LookUp(path, pathLen);
-}
-
-ExternalFileUnit &ExternalFileUnit::CreateNew(
- int unit, const Terminator &terminator) {
- bool wasExtant{false};
- ExternalFileUnit *result{
- GetUnitMap().LookUpOrCreate(unit, terminator, wasExtant)};
- RUNTIME_CHECK(terminator, result && !wasExtant);
- return *result;
-}
-
-ExternalFileUnit *ExternalFileUnit::LookUpForClose(int unit) {
- return GetUnitMap().LookUpForClose(unit);
-}
-
-ExternalFileUnit &ExternalFileUnit::NewUnit(
- const Terminator &terminator, bool forChildIo) {
- ExternalFileUnit &unit{GetUnitMap().NewUnit(terminator)};
- unit.createdForInternalChildIo_ = forChildIo;
- return unit;
-}
-
-bool ExternalFileUnit::OpenUnit(Fortran::common::optional<OpenStatus> status,
- Fortran::common::optional<Action> action, Position position,
- OwningPtr<char> &&newPath, std::size_t newPathLength, Convert convert,
- IoErrorHandler &handler) {
- if (convert == Convert::Unknown) {
- convert = executionEnvironment.conversion;
- }
- swapEndianness_ = convert == Convert::Swap ||
- (convert == Convert::LittleEndian && !isHostLittleEndian) ||
- (convert == Convert::BigEndian && isHostLittleEndian);
- bool impliedClose{false};
- if (IsConnected()) {
- bool isSamePath{newPath.get() && path() && pathLength() == newPathLength &&
- std::memcmp(path(), newPath.get(), newPathLength) == 0};
- if (status && *status != OpenStatus::Old && isSamePath) {
- handler.SignalError("OPEN statement for connected unit may not have "
- "explicit STATUS= other than 'OLD'");
- return impliedClose;
- }
- if (!newPath.get() || isSamePath) {
- // OPEN of existing unit, STATUS='OLD' or unspecified, not new FILE=
- newPath.reset();
- return impliedClose;
- }
- // Otherwise, OPEN on open unit with new FILE= implies CLOSE
- DoImpliedEndfile(handler);
- FlushOutput(handler);
- TruncateFrame(0, handler);
- Close(CloseStatus::Keep, handler);
- impliedClose = true;
- }
- if (newPath.get() && newPathLength > 0) {
- if (const auto *already{
- GetUnitMap().LookUp(newPath.get(), newPathLength)}) {
- handler.SignalError(IostatOpenAlreadyConnected,
- "OPEN(UNIT=%d,FILE='%.*s'): file is already connected to unit %d",
- unitNumber_, static_cast<int>(newPathLength), newPath.get(),
- already->unitNumber_);
- return impliedClose;
- }
- }
- set_path(std::move(newPath), newPathLength);
- Open(status.value_or(OpenStatus::Unknown), action, position, handler);
- auto totalBytes{knownSize()};
- if (access == Access::Direct) {
- if (!openRecl) {
- handler.SignalError(IostatOpenBadRecl,
- "OPEN(UNIT=%d,ACCESS='DIRECT'): record length is not known",
- unitNumber());
- } else if (*openRecl <= 0) {
- handler.SignalError(IostatOpenBadRecl,
- "OPEN(UNIT=%d,ACCESS='DIRECT',RECL=%jd): record length is invalid",
- unitNumber(), static_cast<std::intmax_t>(*openRecl));
- } else if (totalBytes && (*totalBytes % *openRecl != 0)) {
- handler.SignalError(IostatOpenBadRecl,
- "OPEN(UNIT=%d,ACCESS='DIRECT',RECL=%jd): record length is not an "
- "even divisor of the file size %jd",
- unitNumber(), static_cast<std::intmax_t>(*openRecl),
- static_cast<std::intmax_t>(*totalBytes));
- }
- recordLength = openRecl;
- }
- endfileRecordNumber.reset();
- currentRecordNumber = 1;
- if (totalBytes && access == Access::Direct && openRecl.value_or(0) > 0) {
- endfileRecordNumber = 1 + (*totalBytes / *openRecl);
- }
- if (position == Position::Append) {
- if (totalBytes) {
- frameOffsetInFile_ = *totalBytes;
- }
- if (access != Access::Stream) {
- if (!endfileRecordNumber) {
- // Fake it so that we can backspace relative from the end
- endfileRecordNumber = std::numeric_limits<std::int64_t>::max() - 2;
- }
- currentRecordNumber = *endfileRecordNumber;
- }
- }
- return impliedClose;
-}
-
-void ExternalFileUnit::OpenAnonymousUnit(
- Fortran::common::optional<OpenStatus> status,
- Fortran::common::optional<Action> action, Position position,
- Convert convert, IoErrorHandler &handler) {
- // I/O to an unconnected unit reads/creates a local file, e.g. fort.7
- std::size_t pathMaxLen{32};
- auto path{SizedNew<char>{handler}(pathMaxLen)};
- std::snprintf(path.get(), pathMaxLen, "fort.%d", unitNumber_);
- OpenUnit(status, action, position, std::move(path), std::strlen(path.get()),
- convert, handler);
-}
-
-void ExternalFileUnit::CloseUnit(CloseStatus status, IoErrorHandler &handler) {
- DoImpliedEndfile(handler);
- FlushOutput(handler);
- Close(status, handler);
-}
-
-void ExternalFileUnit::DestroyClosed() {
- GetUnitMap().DestroyClosed(*this); // destroys *this
-}
-
-Iostat ExternalFileUnit::SetDirection(Direction direction) {
- if (direction == Direction::Input) {
- if (mayRead()) {
- direction_ = Direction::Input;
- return IostatOk;
- } else {
- return IostatReadFromWriteOnly;
- }
- } else {
- if (mayWrite()) {
- direction_ = Direction::Output;
- return IostatOk;
- } else {
- return IostatWriteToReadOnly;
- }
- }
-}
-
-UnitMap &ExternalFileUnit::CreateUnitMap() {
- Terminator terminator{__FILE__, __LINE__};
- IoErrorHandler handler{terminator};
- UnitMap &newUnitMap{*New<UnitMap>{terminator}().release()};
-
- bool wasExtant{false};
- ExternalFileUnit &out{*newUnitMap.LookUpOrCreate(
- FORTRAN_DEFAULT_OUTPUT_UNIT, terminator, wasExtant)};
- RUNTIME_CHECK(terminator, !wasExtant);
- out.Predefine(1);
- handler.SignalError(out.SetDirection(Direction::Output));
- out.isUnformatted = false;
- defaultOutput = &out;
-
- ExternalFileUnit &in{*newUnitMap.LookUpOrCreate(
- FORTRAN_DEFAULT_INPUT_UNIT, terminator, wasExtant)};
- RUNTIME_CHECK(terminator, !wasExtant);
- in.Predefine(0);
- handler.SignalError(in.SetDirection(Direction::Input));
- in.isUnformatted = false;
- defaultInput = &in;
-
- ExternalFileUnit &error{
- *newUnitMap.LookUpOrCreate(FORTRAN_ERROR_UNIT, terminator, wasExtant)};
- RUNTIME_CHECK(terminator, !wasExtant);
- error.Predefine(2);
- handler.SignalError(error.SetDirection(Direction::Output));
- error.isUnformatted = false;
- errorOutput = &error;
+RT_OFFLOAD_VAR_GROUP_BEGIN
+RT_VAR_ATTRS ExternalFileUnit *defaultInput{nullptr}; // unit 5
+RT_VAR_ATTRS ExternalFileUnit *defaultOutput{nullptr}; // unit 6
+RT_VAR_ATTRS ExternalFileUnit *errorOutput{nullptr}; // unit 0 extension
+RT_OFFLOAD_VAR_GROUP_END
- return newUnitMap;
-}
-
-// A back-up atexit() handler for programs that don't terminate with a main
-// program END or a STOP statement or other Fortran-initiated program shutdown,
-// such as programs with a C main() that terminate normally. It flushes all
-// external I/O units. It is registered once the first time that any external
-// I/O is attempted.
-static void CloseAllExternalUnits() {
- IoErrorHandler handler{"Fortran program termination"};
- ExternalFileUnit::CloseAll(handler);
-}
+RT_OFFLOAD_API_GROUP_BEGIN
-UnitMap &ExternalFileUnit::GetUnitMap() {
- if (unitMap) {
- return *unitMap;
- }
- {
- CriticalSection critical{unitMapLock};
- if (unitMap) {
- return *unitMap;
- }
- unitMap = &CreateUnitMap();
- }
- std::atexit(CloseAllExternalUnits);
- return *unitMap;
-}
-
-void ExternalFileUnit::CloseAll(IoErrorHandler &handler) {
- CriticalSection critical{unitMapLock};
- if (unitMap) {
- unitMap->CloseAll(handler);
- FreeMemoryAndNullify(unitMap);
- }
- defaultOutput = nullptr;
- defaultInput = nullptr;
- errorOutput = nullptr;
-}
-
-void ExternalFileUnit::FlushAll(IoErrorHandler &handler) {
- CriticalSection critical{unitMapLock};
- if (unitMap) {
- unitMap->FlushAll(handler);
- }
-}
-
-static inline void SwapEndianness(
+static inline RT_API_ATTRS void SwapEndianness(
char *data, std::size_t bytes, std::size_t elementBytes) {
if (elementBytes > 1) {
auto half{elementBytes >> 1};
for (std::size_t j{0}; j + elementBytes <= bytes; j += elementBytes) {
for (std::size_t k{0}; k < half; ++k) {
+ RT_DIAG_PUSH
+ RT_DIAG_DISABLE_CALL_HOST_FROM_DEVICE_WARN
std::swap(data[j + k], data[j + elementBytes - 1 - k]);
+ RT_DIAG_POP
}
}
}
@@ -870,7 +607,8 @@ void ExternalFileUnit::BackspaceVariableUnformattedRecord(
// There's no portable memrchr(), unfortunately, and strrchr() would
// fail on a record with a NUL, so we have to do it the hard way.
-static const char *FindLastNewline(const char *str, std::size_t length) {
+static RT_API_ATTRS const char *FindLastNewline(
+ const char *str, std::size_t length) {
for (const char *p{str + length}; p >= str; p--) {
if (*p == '\n') {
return p;
@@ -999,39 +737,6 @@ void ExternalFileUnit::PopChildIo(ChildIo &child) {
child_.reset(child.AcquirePrevious().release()); // deletes top child
}
-int ExternalFileUnit::GetAsynchronousId(IoErrorHandler &handler) {
- if (!mayAsynchronous()) {
- handler.SignalError(IostatBadAsynchronous);
- return -1;
- } else {
- for (int j{0}; 64 * j < maxAsyncIds; ++j) {
- if (auto least{asyncIdAvailable_[j].LeastElement()}) {
- asyncIdAvailable_[j].reset(*least);
- return 64 * j + static_cast<int>(*least);
- }
- }
- handler.SignalError(IostatTooManyAsyncOps);
- return -1;
- }
-}
-
-bool ExternalFileUnit::Wait(int id) {
- if (static_cast<std::size_t>(id) >= maxAsyncIds ||
- asyncIdAvailable_[id / 64].test(id % 64)) {
- return false;
- } else {
- if (id == 0) { // means "all IDs"
- for (int j{0}; 64 * j < maxAsyncIds; ++j) {
- asyncIdAvailable_[j].set();
- }
- asyncIdAvailable_[0].reset(0);
- } else {
- asyncIdAvailable_[id / 64].set(id % 64);
- }
- return true;
- }
-}
-
std::int32_t ExternalFileUnit::ReadHeaderOrFooter(std::int64_t frameOffset) {
std::int32_t word;
char *wordPtr{reinterpret_cast<char *>(&word)};
@@ -1067,4 +772,5 @@ Iostat ChildIo::CheckFormattingAndDirection(
}
}
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime::io
diff --git a/flang/runtime/unit.h b/flang/runtime/unit.h
index fc5bead7e1d9..a6ee5971a165 100644
--- a/flang/runtime/unit.h
+++ b/flang/runtime/unit.h
@@ -25,57 +25,125 @@
#include "flang/Runtime/memory.h"
#include <cstdlib>
#include <cstring>
-#include <variant>
+#include <flang/Common/variant.h>
namespace Fortran::runtime::io {
class UnitMap;
class ChildIo;
+class ExternalFileUnit;
+
+RT_OFFLOAD_VAR_GROUP_BEGIN
+// Predefined file units.
+extern RT_VAR_ATTRS ExternalFileUnit *defaultInput; // unit 5
+extern RT_VAR_ATTRS ExternalFileUnit *defaultOutput; // unit 6
+extern RT_VAR_ATTRS ExternalFileUnit *errorOutput; // unit 0 extension
+RT_OFFLOAD_VAR_GROUP_END
+
+#if defined(RT_USE_PSEUDO_FILE_UNIT)
+// A flavor of OpenFile class that pretends to be a terminal,
+// and only provides basic buffering of the output
+// in an internal buffer, and Write's the output
+// using std::printf(). Since it does not rely on file system
+// APIs, it can be used to implement external output
+// for offload devices.
+class PseudoOpenFile {
+public:
+ using FileOffset = std::int64_t;
+
+ RT_API_ATTRS const char *path() const { return nullptr; }
+ RT_API_ATTRS std::size_t pathLength() const { return 0; }
+ RT_API_ATTRS void set_path(OwningPtr<char> &&, std::size_t bytes) {}
+ RT_API_ATTRS bool mayRead() const { return false; }
+ RT_API_ATTRS bool mayWrite() const { return true; }
+ RT_API_ATTRS bool mayPosition() const { return false; }
+ RT_API_ATTRS bool mayAsynchronous() const { return false; }
+ RT_API_ATTRS void set_mayAsynchronous(bool yes);
+ // Pretend to be a terminal to force the output
+ // at the end of IO statement.
+ RT_API_ATTRS bool isTerminal() const { return true; }
+ RT_API_ATTRS bool isWindowsTextFile() const { return false; }
+ RT_API_ATTRS Fortran::common::optional<FileOffset> knownSize() const;
+ RT_API_ATTRS bool IsConnected() const { return false; }
+ RT_API_ATTRS void Open(OpenStatus, Fortran::common::optional<Action>,
+ Position, IoErrorHandler &);
+ RT_API_ATTRS void Predefine(int fd) {}
+ RT_API_ATTRS void Close(CloseStatus, IoErrorHandler &);
+ RT_API_ATTRS std::size_t Read(FileOffset, char *, std::size_t minBytes,
+ std::size_t maxBytes, IoErrorHandler &);
+ RT_API_ATTRS std::size_t Write(
+ FileOffset, const char *, std::size_t, IoErrorHandler &);
+ RT_API_ATTRS void Truncate(FileOffset, IoErrorHandler &);
+ RT_API_ATTRS int ReadAsynchronously(
+ FileOffset, char *, std::size_t, IoErrorHandler &);
+ RT_API_ATTRS int WriteAsynchronously(
+ FileOffset, const char *, std::size_t, IoErrorHandler &);
+ RT_API_ATTRS void Wait(int id, IoErrorHandler &);
+ RT_API_ATTRS void WaitAll(IoErrorHandler &);
+ RT_API_ATTRS Position InquirePosition() const;
+};
+#endif // defined(RT_USE_PSEUDO_FILE_UNIT)
+
+#if !defined(RT_USE_PSEUDO_FILE_UNIT)
+using OpenFileClass = OpenFile;
+using FileFrameClass = FileFrame<ExternalFileUnit>;
+#else // defined(RT_USE_PSEUDO_FILE_UNIT)
+using OpenFileClass = PseudoOpenFile;
+// Use not so big buffer for the pseudo file unit frame.
+using FileFrameClass = FileFrame<ExternalFileUnit, 1024>;
+#endif // defined(RT_USE_PSEUDO_FILE_UNIT)
class ExternalFileUnit : public ConnectionState,
- public OpenFile,
- public FileFrame<ExternalFileUnit> {
+ public OpenFileClass,
+ public FileFrameClass {
public:
static constexpr int maxAsyncIds{64 * 16};
- explicit ExternalFileUnit(int unitNumber) : unitNumber_{unitNumber} {
+ explicit RT_API_ATTRS ExternalFileUnit(int unitNumber)
+ : unitNumber_{unitNumber} {
isUTF8 = executionEnvironment.defaultUTF8;
for (int j{0}; 64 * j < maxAsyncIds; ++j) {
asyncIdAvailable_[j].set();
}
asyncIdAvailable_[0].reset(0);
}
- ~ExternalFileUnit() {}
+ RT_API_ATTRS ~ExternalFileUnit() {}
- int unitNumber() const { return unitNumber_; }
- bool swapEndianness() const { return swapEndianness_; }
- bool createdForInternalChildIo() const { return createdForInternalChildIo_; }
+ RT_API_ATTRS int unitNumber() const { return unitNumber_; }
+ RT_API_ATTRS bool swapEndianness() const { return swapEndianness_; }
+ RT_API_ATTRS bool createdForInternalChildIo() const {
+ return createdForInternalChildIo_;
+ }
- static ExternalFileUnit *LookUp(int unit);
- static ExternalFileUnit *LookUpOrCreate(
+ static RT_API_ATTRS ExternalFileUnit *LookUp(int unit);
+ static RT_API_ATTRS ExternalFileUnit *LookUpOrCreate(
int unit, const Terminator &, bool &wasExtant);
- static ExternalFileUnit *LookUpOrCreateAnonymous(int unit, Direction,
- Fortran::common::optional<bool> isUnformatted, const Terminator &);
- static ExternalFileUnit *LookUp(const char *path, std::size_t pathLen);
- static ExternalFileUnit &CreateNew(int unit, const Terminator &);
- static ExternalFileUnit *LookUpForClose(int unit);
- static ExternalFileUnit &NewUnit(const Terminator &, bool forChildIo);
- static void CloseAll(IoErrorHandler &);
- static void FlushAll(IoErrorHandler &);
+ static RT_API_ATTRS ExternalFileUnit *LookUpOrCreateAnonymous(int unit,
+ Direction, Fortran::common::optional<bool> isUnformatted,
+ const Terminator &);
+ static RT_API_ATTRS ExternalFileUnit *LookUp(
+ const char *path, std::size_t pathLen);
+ static RT_API_ATTRS ExternalFileUnit &CreateNew(int unit, const Terminator &);
+ static RT_API_ATTRS ExternalFileUnit *LookUpForClose(int unit);
+ static RT_API_ATTRS ExternalFileUnit &NewUnit(
+ const Terminator &, bool forChildIo);
+ static RT_API_ATTRS void CloseAll(IoErrorHandler &);
+ static RT_API_ATTRS void FlushAll(IoErrorHandler &);
// Returns true if an existing unit was closed
- bool OpenUnit(Fortran::common::optional<OpenStatus>,
+ RT_API_ATTRS bool OpenUnit(Fortran::common::optional<OpenStatus>,
Fortran::common::optional<Action>, Position, OwningPtr<char> &&path,
std::size_t pathLength, Convert, IoErrorHandler &);
- void OpenAnonymousUnit(Fortran::common::optional<OpenStatus>,
+ RT_API_ATTRS void OpenAnonymousUnit(Fortran::common::optional<OpenStatus>,
Fortran::common::optional<Action>, Position, Convert, IoErrorHandler &);
- void CloseUnit(CloseStatus, IoErrorHandler &);
- void DestroyClosed();
+ RT_API_ATTRS void CloseUnit(CloseStatus, IoErrorHandler &);
+ RT_API_ATTRS void DestroyClosed();
- Iostat SetDirection(Direction);
+ RT_API_ATTRS Iostat SetDirection(Direction);
template <typename A, typename... X>
- IoStatementState &BeginIoStatement(const Terminator &terminator, X &&...xs) {
+ RT_API_ATTRS IoStatementState &BeginIoStatement(
+ const Terminator &terminator, X &&...xs) {
// Take lock_ and hold it until EndIoStatement().
#if USE_PTHREADS
if (!lock_.TakeIfNoDeadlock()) {
@@ -93,50 +161,54 @@ public:
return *io_;
}
- bool Emit(
+ RT_API_ATTRS bool Emit(
const char *, std::size_t, std::size_t elementBytes, IoErrorHandler &);
- bool Receive(char *, std::size_t, std::size_t elementBytes, IoErrorHandler &);
- std::size_t GetNextInputBytes(const char *&, IoErrorHandler &);
- bool BeginReadingRecord(IoErrorHandler &);
- void FinishReadingRecord(IoErrorHandler &);
- bool AdvanceRecord(IoErrorHandler &);
- void BackspaceRecord(IoErrorHandler &);
- void FlushOutput(IoErrorHandler &);
- void FlushIfTerminal(IoErrorHandler &);
- void Endfile(IoErrorHandler &);
- void Rewind(IoErrorHandler &);
- void EndIoStatement();
- bool SetStreamPos(std::int64_t, IoErrorHandler &); // one-based, for POS=
- bool SetDirectRec(std::int64_t, IoErrorHandler &); // one-based, for REC=
- std::int64_t InquirePos() const {
+ RT_API_ATTRS bool Receive(
+ char *, std::size_t, std::size_t elementBytes, IoErrorHandler &);
+ RT_API_ATTRS std::size_t GetNextInputBytes(const char *&, IoErrorHandler &);
+ RT_API_ATTRS bool BeginReadingRecord(IoErrorHandler &);
+ RT_API_ATTRS void FinishReadingRecord(IoErrorHandler &);
+ RT_API_ATTRS bool AdvanceRecord(IoErrorHandler &);
+ RT_API_ATTRS void BackspaceRecord(IoErrorHandler &);
+ RT_API_ATTRS void FlushOutput(IoErrorHandler &);
+ RT_API_ATTRS void FlushIfTerminal(IoErrorHandler &);
+ RT_API_ATTRS void Endfile(IoErrorHandler &);
+ RT_API_ATTRS void Rewind(IoErrorHandler &);
+ RT_API_ATTRS void EndIoStatement();
+ RT_API_ATTRS bool SetStreamPos(
+ std::int64_t, IoErrorHandler &); // one-based, for POS=
+ RT_API_ATTRS bool SetDirectRec(
+ std::int64_t, IoErrorHandler &); // one-based, for REC=
+ RT_API_ATTRS std::int64_t InquirePos() const {
// 12.6.2.11 defines POS=1 as the beginning of file
return frameOffsetInFile_ + recordOffsetInFrame_ + positionInRecord + 1;
}
- ChildIo *GetChildIo() { return child_.get(); }
- ChildIo &PushChildIo(IoStatementState &);
- void PopChildIo(ChildIo &);
+ RT_API_ATTRS ChildIo *GetChildIo() { return child_.get(); }
+ RT_API_ATTRS ChildIo &PushChildIo(IoStatementState &);
+ RT_API_ATTRS void PopChildIo(ChildIo &);
- int GetAsynchronousId(IoErrorHandler &);
- bool Wait(int);
+ RT_API_ATTRS int GetAsynchronousId(IoErrorHandler &);
+ RT_API_ATTRS bool Wait(int);
private:
- static UnitMap &CreateUnitMap();
- static UnitMap &GetUnitMap();
- const char *FrameNextInput(IoErrorHandler &, std::size_t);
- void SetPosition(std::int64_t, IoErrorHandler &); // zero-based
- void BeginSequentialVariableUnformattedInputRecord(IoErrorHandler &);
- void BeginVariableFormattedInputRecord(IoErrorHandler &);
- void BackspaceFixedRecord(IoErrorHandler &);
- void BackspaceVariableUnformattedRecord(IoErrorHandler &);
- void BackspaceVariableFormattedRecord(IoErrorHandler &);
- bool SetVariableFormattedRecordLength();
- void DoImpliedEndfile(IoErrorHandler &);
- void DoEndfile(IoErrorHandler &);
- void CommitWrites();
- bool CheckDirectAccess(IoErrorHandler &);
- void HitEndOnRead(IoErrorHandler &);
- std::int32_t ReadHeaderOrFooter(std::int64_t frameOffset);
+ static RT_API_ATTRS UnitMap &CreateUnitMap();
+ static RT_API_ATTRS UnitMap &GetUnitMap();
+ RT_API_ATTRS const char *FrameNextInput(IoErrorHandler &, std::size_t);
+ RT_API_ATTRS void SetPosition(std::int64_t, IoErrorHandler &); // zero-based
+ RT_API_ATTRS void BeginSequentialVariableUnformattedInputRecord(
+ IoErrorHandler &);
+ RT_API_ATTRS void BeginVariableFormattedInputRecord(IoErrorHandler &);
+ RT_API_ATTRS void BackspaceFixedRecord(IoErrorHandler &);
+ RT_API_ATTRS void BackspaceVariableUnformattedRecord(IoErrorHandler &);
+ RT_API_ATTRS void BackspaceVariableFormattedRecord(IoErrorHandler &);
+ RT_API_ATTRS bool SetVariableFormattedRecordLength();
+ RT_API_ATTRS void DoImpliedEndfile(IoErrorHandler &);
+ RT_API_ATTRS void DoEndfile(IoErrorHandler &);
+ RT_API_ATTRS void CommitWrites();
+ RT_API_ATTRS bool CheckDirectAccess(IoErrorHandler &);
+ RT_API_ATTRS void HitEndOnRead(IoErrorHandler &);
+ RT_API_ATTRS std::int32_t ReadHeaderOrFooter(std::int64_t frameOffset);
Lock lock_;
@@ -181,23 +253,25 @@ private:
// be a child I/O statement.
class ChildIo {
public:
- ChildIo(IoStatementState &parent, OwningPtr<ChildIo> &&previous)
+ RT_API_ATTRS ChildIo(IoStatementState &parent, OwningPtr<ChildIo> &&previous)
: parent_{parent}, previous_{std::move(previous)} {}
- IoStatementState &parent() const { return parent_; }
+ RT_API_ATTRS IoStatementState &parent() const { return parent_; }
- void EndIoStatement();
+ RT_API_ATTRS void EndIoStatement();
template <typename A, typename... X>
- IoStatementState &BeginIoStatement(X &&...xs) {
+ RT_API_ATTRS IoStatementState &BeginIoStatement(X &&...xs) {
A &state{u_.emplace<A>(std::forward<X>(xs)...)};
io_.emplace(state);
return *io_;
}
- OwningPtr<ChildIo> AcquirePrevious() { return std::move(previous_); }
+ RT_API_ATTRS OwningPtr<ChildIo> AcquirePrevious() {
+ return std::move(previous_);
+ }
- Iostat CheckFormattingAndDirection(bool unformatted, Direction);
+ RT_API_ATTRS Iostat CheckFormattingAndDirection(bool unformatted, Direction);
private:
IoStatementState &parent_;
diff --git a/flang/runtime/utf.cpp b/flang/runtime/utf.cpp
index e9ccc2c04b6b..9945dc6509ec 100644
--- a/flang/runtime/utf.cpp
+++ b/flang/runtime/utf.cpp
@@ -11,7 +11,8 @@
namespace Fortran::runtime {
// clang-format off
-const std::uint8_t UTF8FirstByteTable[256]{
+RT_OFFLOAD_VAR_GROUP_BEGIN
+const RT_CONST_VAR_ATTRS std::uint8_t UTF8FirstByteTable[256]{
/* 00 - 7F: 7 bit payload in single byte */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -37,8 +38,10 @@ const std::uint8_t UTF8FirstByteTable[256]{
/* FE: 32 bit payload */ 7,
/* FF: invalid */ 0
};
+RT_OFFLOAD_VAR_GROUP_END
// clang-format on
+RT_OFFLOAD_API_GROUP_BEGIN
// Non-minimal encodings are accepted.
Fortran::common::optional<char32_t> DecodeUTF8(const char *p0) {
const std::uint8_t *p{reinterpret_cast<const std::uint8_t *>(p0)};
@@ -107,5 +110,6 @@ std::size_t EncodeUTF8(char *p0, char32_t ucs) {
return 7;
}
}
+RT_OFFLOAD_API_GROUP_END
} // namespace Fortran::runtime
diff --git a/flang/runtime/utf.h b/flang/runtime/utf.h
index 2b4e4f9a1887..29670d54b3eb 100644
--- a/flang/runtime/utf.h
+++ b/flang/runtime/utf.h
@@ -49,20 +49,22 @@ namespace Fortran::runtime {
// Derive the length of a UTF-8 character encoding from its first byte.
// A zero result signifies an invalid encoding.
-extern const std::uint8_t UTF8FirstByteTable[256];
-static inline std::size_t MeasureUTF8Bytes(char first) {
+RT_OFFLOAD_VAR_GROUP_BEGIN
+extern const RT_CONST_VAR_ATTRS std::uint8_t UTF8FirstByteTable[256];
+static constexpr std::size_t maxUTF8Bytes{7};
+RT_OFFLOAD_VAR_GROUP_END
+
+static inline RT_API_ATTRS std::size_t MeasureUTF8Bytes(char first) {
return UTF8FirstByteTable[static_cast<std::uint8_t>(first)];
}
-static constexpr std::size_t maxUTF8Bytes{7};
-
// Ensure that all bytes are present in sequence in the input buffer
// before calling; use MeasureUTF8Bytes(first byte) to count them.
-Fortran::common::optional<char32_t> DecodeUTF8(const char *);
+RT_API_ATTRS Fortran::common::optional<char32_t> DecodeUTF8(const char *);
// Ensure that at least maxUTF8Bytes remain in the output
// buffer before calling.
-std::size_t EncodeUTF8(char *, char32_t);
+RT_API_ATTRS std::size_t EncodeUTF8(char *, char32_t);
} // namespace Fortran::runtime
#endif // FORTRAN_RUNTIME_UTF_H_
diff --git a/flang/test/Fir/boxproc-2.fir b/flang/test/Fir/boxproc-2.fir
index 3279bb9bed8a..84132f89afeb 100644
--- a/flang/test/Fir/boxproc-2.fir
+++ b/flang/test/Fir/boxproc-2.fir
@@ -108,3 +108,10 @@ func.func @very_nested_type(%arg0: !fir.type<t0{t01:!fir.type<t01{t02:!fir.type<
}
// CHECK-LABEL: func.func @very_nested_type(
// CHECK-SAME: !fir.type<t0UnboxProc{t01:!fir.type<t01UnboxProc{t02:!fir.type<t02UnboxProc{t3:!fir.type<t3UnboxProc{t4:!fir.type<t4UnboxProc{t5:!fir.type<t5UnboxProc{t6:!fir.type<t6UnboxProc{t7:!fir.type<t7UnboxProc{t8:!fir.type<t8UnboxProc{t9:!fir.type<t9UnboxProc{t10:!fir.type<t10UnboxProc{t11:!fir.type<t11UnboxProc{t12:!fir.type<t12UnboxProc{t13:!fir.type<t13UnboxProc{t14:!fir.type<t14UnboxProc{t15:!fir.type<t15UnboxProc{t16:!fir.type<t16UnboxProc{t17:!fir.type<t17UnboxProc{t18:!fir.type<t18UnboxProc{t19:!fir.type<t19UnboxProc{b:() -> ()}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>}>)
+
+func.func @test_indirect_type_recursion() {
+ %0 = fir.zero_bits !fir.ptr<!fir.type<t001{x:!fir.ptr<!fir.type<t002{x:!fir.ptr<!fir.type<t001>>}>>,p:!fir.boxproc<() -> ()>}>>
+ return
+}
+// CHECK-LABEL: func.func @test_indirect_type_recursion(
+// CHECK: fir.zero_bits !fir.ptr<!fir.type<t001UnboxProc{x:!fir.ptr<!fir.type<t002UnboxProc{x:!fir.ptr<!fir.type<t001UnboxProc>>}>>,p:() -> ()}>>
diff --git a/flang/test/Lower/CUDA/cuda-data-transfer.cuf b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
new file mode 100644
index 000000000000..54226b8623e6
--- /dev/null
+++ b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
@@ -0,0 +1,57 @@
+! RUN: bbc -emit-hlfir -fcuda %s -o - | FileCheck %s
+
+! Test CUDA Fortran data transfer using assignment statements.
+
+subroutine sub1()
+ integer, device :: m
+ integer, device :: adev(10)
+ integer :: i, ahost(10), bhost(10)
+
+ m = 1 + i
+
+ m = 1
+
+ adev = ahost
+
+ adev = ahost + 1
+
+ adev(1:5) = ahost(1:5)
+
+ adev = ahost + bhost
+
+end
+
+! CHECK-LABEL: func.func @_QPsub1()
+
+! CHECK: %[[ADEV:.*]]:2 = hlfir.declare %{{.*}}(%{{.*}}) {cuda_attr = #fir.cuda<device>, uniq_name = "_QFsub1Eadev"} : (!fir.ref<!fir.array<10xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>)
+! CHECK: %[[AHOST:.*]]:2 = hlfir.declare %{{.*}}(%{{.*}}) {uniq_name = "_QFsub1Eahost"} : (!fir.ref<!fir.array<10xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>)
+! CHECK: %[[I:.*]]:2 = hlfir.declare %{{.*}} {uniq_name = "_QFsub1Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[M:.*]]:2 = hlfir.declare %{{.*}} {cuda_attr = #fir.cuda<device>, uniq_name = "_QFsub1Em"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+
+! CHECK: %[[C1:.*]] = arith.constant 1 : i32
+! CHECK: %[[LOADED_I:.*]] = fir.load %[[I]]#0 : !fir.ref<i32>
+! CHECK: %[[ADD:.*]] = arith.addi %[[C1]], %[[LOADED_I]] : i32
+! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[ADD]] {uniq_name = ".cuf_host_tmp"} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: fir.cuda_data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
+! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
+
+! CHECK: %[[C1:.*]] = arith.constant 1 : i32
+! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[C1]] {uniq_name = ".cuf_host_tmp"} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
+! CHECK: fir.cuda_data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
+! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
+
+! CHECK: fir.cuda_data_transfer %[[AHOST]]#0 to %[[ADEV]]#0 {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
+
+! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<10xi32> {
+! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[ELEMENTAL]](%{{.*}}) {uniq_name = ".cuf_host_tmp"} : (!hlfir.expr<10xi32>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>, i1)
+! CHECK: fir.cuda_data_transfer %[[ASSOC]]#0 to %[[ADEV]]#0 {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
+! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<!fir.array<10xi32>>, i1
+
+! CHECK: %[[DES_AHOST:.*]] = hlfir.designate %[[AHOST]]#0 (%c1{{.*}}:%c5{{.*}}:%c1{{.*}}) shape %{{.*}} : (!fir.ref<!fir.array<10xi32>>, index, index, index, !fir.shape<1>) -> !fir.ref<!fir.array<5xi32>>
+! CHECK: %[[DES_ADEV:.*]] = hlfir.designate %[[ADEV]]#0 (%c1{{.*}}:%c5{{.*}}:%c1{{.*}}) shape %{{.*}} : (!fir.ref<!fir.array<10xi32>>, index, index, index, !fir.shape<1>) -> !fir.ref<!fir.array<5xi32>>
+! CHECK: fir.cuda_data_transfer %[[DES_AHOST]] to %[[DES_ADEV]] {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<!fir.array<5xi32>>, !fir.ref<!fir.array<5xi32>>
+
+! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<10xi32>
+! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[ELEMENTAL]](%{{.*}}) {uniq_name = ".cuf_host_tmp"} : (!hlfir.expr<10xi32>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>, i1)
+! CHECK: fir.cuda_data_transfer %[[ASSOC]]#0 to %[[ADEV]]#0 {transfer_kind = #fir.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
+! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<!fir.array<10xi32>>, i1
diff --git a/flang/test/Lower/HLFIR/cray-pointers.f90 b/flang/test/Lower/HLFIR/cray-pointers.f90
index d1f1a5647ff1..d969aa5d747a 100644
--- a/flang/test/Lower/HLFIR/cray-pointers.f90
+++ b/flang/test/Lower/HLFIR/cray-pointers.f90
@@ -204,14 +204,14 @@ end subroutine test7
! CHECK: %[[VAL_3:.*]] = fir.alloca !fir.array<5xi32> {bindc_name = "arr", uniq_name = "_QFtest7Earr"}
! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_3]](%[[VAL_4]]) {uniq_name = "_QFtest7Earr"} : (!fir.ref<!fir.array<5xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<5xi32>>, !fir.ref<!fir.array<5xi32>>)
+! CHECK: %[[VAL_12:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest7Eptr"}
+! CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFtest7Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_6:.*]] = arith.constant 5 : index
! CHECK: %[[VAL_8:.*]] = fir.shape %[[VAL_6]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_1]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest7Epte"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>)
! CHECK: %[[VAL_10:.*]] = fir.zero_bits !fir.ptr<!fir.array<?xi32>>
! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_10]](%[[VAL_8]]) : (!fir.ptr<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.ptr<!fir.array<?xi32>>>
! CHECK: fir.store %[[VAL_11]] to %[[VAL_9]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[VAL_12:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest7Eptr"}
-! CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFtest7Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_14]] : !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_9]]#0 : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
@@ -226,14 +226,14 @@ subroutine test8()
end subroutine test8
! CHECK-LABEL: func.func @_QPtest8(
! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xi32>>>
+! CHECK: %[[VAL_8:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest8Eptr"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFtest8Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_2:.*]] = arith.constant 5 : index
! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_1]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest8Epte"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>)
! CHECK: %[[VAL_6:.*]] = fir.zero_bits !fir.ptr<!fir.array<?xi32>>
! CHECK: %[[VAL_7:.*]] = fir.embox %[[VAL_6]](%[[VAL_4]]) : (!fir.ptr<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.ptr<!fir.array<?xi32>>>
! CHECK: fir.store %[[VAL_7]] to %[[VAL_5]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[VAL_8:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest8Eptr"}
-! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFtest8Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_5]]#0 : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
@@ -256,14 +256,14 @@ subroutine test9()
end subroutine test9
! CHECK-LABEL: func.func @_QPtest9(
! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xi32>>>
+! CHECK: %[[VAL_8:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest9Eptr"}
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFtest9Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_2:.*]] = arith.constant 5 : index
! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_1]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest9Epte"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>)
! CHECK: %[[VAL_6:.*]] = fir.zero_bits !fir.ptr<!fir.array<?xi32>>
! CHECK: %[[VAL_7:.*]] = fir.embox %[[VAL_6]](%[[VAL_4]]) : (!fir.ptr<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.ptr<!fir.array<?xi32>>>
! CHECK: fir.store %[[VAL_7]] to %[[VAL_5]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[VAL_8:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest9Eptr"}
-! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFtest9Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_5]]#0 : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<none>>
@@ -287,12 +287,12 @@ subroutine test10()
end subroutine test10
! CHECK-LABEL: func.func @_QPtest10(
! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<i32>>
+! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest10Eptr"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFtest10Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_1]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest10Epte"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
! CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ptr<i32>
! CHECK: %[[VAL_5:.*]] = fir.embox %[[VAL_4]] : (!fir.ptr<i32>) -> !fir.box<!fir.ptr<i32>>
! CHECK: fir.store %[[VAL_5]] to %[[VAL_3]]#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
-! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest10Eptr"}
-! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFtest10Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_8]] : !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> !fir.ref<!fir.box<none>>
@@ -315,12 +315,12 @@ subroutine test11()
end subroutine test11
! CHECK-LABEL: func.func @_QPtest11(
! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<i32>>
+! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest11Eptr"}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFtest11Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_1]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest11Epte"} : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> (!fir.ref<!fir.box<!fir.ptr<i32>>>, !fir.ref<!fir.box<!fir.ptr<i32>>>)
! CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ptr<i32>
! CHECK: %[[VAL_5:.*]] = fir.embox %[[VAL_4]] : (!fir.ptr<i32>) -> !fir.box<!fir.ptr<i32>>
! CHECK: fir.store %[[VAL_5]] to %[[VAL_3]]#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
-! CHECK: %[[VAL_6:.*]] = fir.alloca i64 {bindc_name = "ptr", uniq_name = "_QFtest11Eptr"}
-! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFtest11Eptr"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_7]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_8]] : !fir.ref<!fir.ptr<i64>>
! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref<!fir.box<!fir.ptr<i32>>>) -> !fir.ref<!fir.box<none>>
@@ -330,3 +330,97 @@ end subroutine test11
! CHECK: %[[VAL_14:.*]] = fir.box_addr %[[VAL_13]] : (!fir.box<!fir.ptr<i32>>) -> !fir.ptr<i32>
! CHECK: %[[VAL_15:.*]] = fir.load %[[VAL_14]] : !fir.ptr<i32>
! CHECK: fir.call @_QPsub2(%[[VAL_15]]) fastmath<contract> : (i32) -> ()
+
+module test_mod
+ integer(8) :: cray_pointer
+ real :: cray_pointee
+ pointer(cray_pointer, cray_pointee)
+end module
+
+subroutine test_hidden_pointer
+ ! Only the pointee is accessed, yet the pointer is needed
+ ! for lowering.
+ use test_mod, only : cray_pointee
+ call takes_real(cray_pointee)
+end
+! CHECK-LABEL: func.func @_QPtest_hidden_pointer() {
+! CHECK: %[[VAL_0:.*]] = fir.alloca !fir.box<!fir.ptr<f32>>
+! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QMtest_modEcray_pointer) : !fir.ref<i64>
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QMtest_modEcray_pointer"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QMtest_modEcray_pointee"} : (!fir.ref<!fir.box<!fir.ptr<f32>>>) -> (!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.ptr<f32>>>)
+! CHECK: %[[VAL_4:.*]] = fir.zero_bits !fir.ptr<f32>
+! CHECK: %[[VAL_5:.*]] = fir.embox %[[VAL_4]] : (!fir.ptr<f32>) -> !fir.box<!fir.ptr<f32>>
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_3]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
+! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_2]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
+! CHECK: %[[VAL_7:.*]] = fir.load %[[VAL_6]] : !fir.ref<!fir.ptr<i64>>
+! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref<!fir.box<!fir.ptr<f32>>>) -> !fir.ref<!fir.box<none>>
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_7]] : (!fir.ptr<i64>) -> !fir.llvm_ptr<i8>
+! CHECK: %[[VAL_10:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_8]], %[[VAL_9]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.llvm_ptr<i8>) -> none
+! CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
+! CHECK: %[[VAL_12:.*]] = fir.box_addr %[[VAL_11]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_12]] : (!fir.ptr<f32>) -> !fir.ref<f32>
+! CHECK: fir.call @_QPtakes_real(%[[VAL_13]]) fastmath<contract> : (!fir.ref<f32>) -> ()
+! CHECK: return
+! CHECK: }
+
+
+
+subroutine test_craypointer_capture(n)
+ integer :: n
+ character(n) :: cray_pointee
+ integer(8) :: cray_pointer
+ pointer(cray_pointer, cray_pointee)
+ call internal()
+ contains
+subroutine internal()
+ call takes_character(cray_pointee)
+end subroutine
+end subroutine
+! CHECK-LABEL: func.func @_QPtest_craypointer_capture(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "n"}) {
+! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.char<1,?>>>
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFtest_craypointer_captureEn"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[VAL_3:.*]] = fir.alloca i64 {bindc_name = "cray_pointer", uniq_name = "_QFtest_craypointer_captureEcray_pointer"}
+! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFtest_craypointer_captureEcray_pointer"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_2]]#0 : !fir.ref<i32>
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_7:.*]] = arith.cmpi sgt, %[[VAL_5]], %[[VAL_6]] : i32
+! CHECK: %[[VAL_8:.*]] = arith.select %[[VAL_7]], %[[VAL_5]], %[[VAL_6]] : i32
+! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_1]] typeparams %[[VAL_8]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFtest_craypointer_captureEcray_pointee"} : (!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, i32) -> (!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>)
+! CHECK: %[[VAL_10:.*]] = fir.zero_bits !fir.ptr<!fir.char<1,?>>
+! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_10]] typeparams %[[VAL_8]] : (!fir.ptr<!fir.char<1,?>>, i32) -> !fir.box<!fir.ptr<!fir.char<1,?>>>
+! CHECK: fir.store %[[VAL_11]] to %[[VAL_9]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>
+! CHECK: %[[VAL_12:.*]] = fir.alloca tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>
+! CHECK: %[[VAL_13:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_14:.*]] = fir.coordinate_of %[[VAL_12]], %[[VAL_13]] : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>>
+! CHECK: fir.store %[[VAL_9]]#1 to %[[VAL_14]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>>
+! CHECK: %[[VAL_15:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_16:.*]] = fir.coordinate_of %[[VAL_12]], %[[VAL_15]] : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>>, i32) -> !fir.llvm_ptr<!fir.ref<i64>>
+! CHECK: fir.store %[[VAL_4]]#1 to %[[VAL_16]] : !fir.llvm_ptr<!fir.ref<i64>>
+! CHECK: fir.call @_QFtest_craypointer_capturePinternal(%[[VAL_12]]) fastmath<contract> : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>>) -> ()
+! CHECK: return
+! CHECK: }
+
+! CHECK-LABEL: func.func private @_QFtest_craypointer_capturePinternal(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>> {fir.host_assoc})
+! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>>
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>>
+! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_3]] : !fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>
+! CHECK: %[[VAL_5:.*]] = fir.box_elesize %[[VAL_4]] : (!fir.box<!fir.ptr<!fir.char<1,?>>>) -> index
+! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_3]] typeparams %[[VAL_5]] {fortran_attrs = #fir.var_attrs<pointer, host_assoc>, uniq_name = "_QFtest_craypointer_captureEcray_pointee"} : (!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, index) -> (!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>)
+! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
+! CHECK: %[[VAL_8:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_7]] : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>, !fir.ref<i64>>>, i32) -> !fir.llvm_ptr<!fir.ref<i64>>
+! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_8]] : !fir.llvm_ptr<!fir.ref<i64>>
+! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_9]] {fortran_attrs = #fir.var_attrs<host_assoc>, uniq_name = "_QFtest_craypointer_captureEcray_pointer"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
+! CHECK: %[[VAL_11:.*]] = fir.convert %[[VAL_10]]#0 : (!fir.ref<i64>) -> !fir.ref<!fir.ptr<i64>>
+! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_11]] : !fir.ref<!fir.ptr<i64>>
+! CHECK: %[[VAL_13:.*]] = fir.convert %[[VAL_6]]#0 : (!fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
+! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_12]] : (!fir.ptr<i64>) -> !fir.llvm_ptr<i8>
+! CHECK: %[[VAL_15:.*]] = fir.call @_FortranAPointerAssociateScalar(%[[VAL_13]], %[[VAL_14]]) fastmath<contract> : (!fir.ref<!fir.box<none>>, !fir.llvm_ptr<i8>) -> none
+! CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_6]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.char<1,?>>>>
+! CHECK: %[[VAL_17:.*]] = fir.box_addr %[[VAL_16]] : (!fir.box<!fir.ptr<!fir.char<1,?>>>) -> !fir.ptr<!fir.char<1,?>>
+! CHECK: %[[VAL_18:.*]] = fir.emboxchar %[[VAL_17]], %[[VAL_5]] : (!fir.ptr<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+! CHECK: fir.call @_QPtakes_character(%[[VAL_18]]) fastmath<contract> : (!fir.boxchar<1>) -> ()
+! CHECK: return
+! CHECK: }
diff --git a/flang/test/Lower/HLFIR/procedure-pointer-component-structure-constructor.f90 b/flang/test/Lower/HLFIR/procedure-pointer-component-structure-constructor.f90
new file mode 100644
index 000000000000..7b64634d10d4
--- /dev/null
+++ b/flang/test/Lower/HLFIR/procedure-pointer-component-structure-constructor.f90
@@ -0,0 +1,71 @@
+! Test passing
+! 1. NULL(),
+! 2. procedure,
+! 3. procedure pointer,
+! 4. reference to a function that returns a procedure pointer.
+! to a derived type structure constructor.
+! RUN: bbc -emit-hlfir -o - %s | FileCheck %s
+
+ MODULE M
+ TYPE :: DT
+ PROCEDURE(Fun), POINTER, NOPASS :: pp1
+ END TYPE
+
+ CONTAINS
+
+ INTEGER FUNCTION Fun(Arg)
+ INTEGER :: Arg
+ Fun = Arg
+ END FUNCTION
+
+ END MODULE
+
+ PROGRAM MAIN
+ USE M
+ IMPLICIT NONE
+ TYPE (DT), PARAMETER :: v1 = DT(NULL())
+ TYPE (DT) :: v2
+ PROCEDURE(FUN), POINTER :: pp2
+ v2 = DT(fun)
+ v2 = DT(pp2)
+ v2 = DT(bar())
+ CONTAINS
+ FUNCTION BAR() RESULT(res)
+ PROCEDURE(FUN), POINTER :: res
+ END
+ END
+
+! CHECK-LABEL: func.func @_QQmain() attributes {fir.bindc_name = "main"} {
+! CHECK: %[[VAL_0:.*]] = fir.alloca !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_2:.*]] = fir.alloca !fir.boxproc<(!fir.ref<i32>) -> i32> {bindc_name = "pp2", uniq_name = "_QFEpp2"}
+! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_2]] {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFEpp2"} : (!fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>) -> (!fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>, !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>)
+! CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "ctor.temp"} : (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>) -> (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>, !fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>)
+! CHECK: %[[VAL_23:.*]] = hlfir.designate %[[VAL_17]]#0{"pp1"} {fortran_attrs = #fir.var_attrs<pointer>} : (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>) -> !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>
+! CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_5]]#0 : !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>
+! CHECK: fir.store %[[VAL_24]] to %[[VAL_23]] : !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>
+! CHECK: %[[VAL_25:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "ctor.temp"} : (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>) -> (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>, !fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>)
+! CHECK: %[[VAL_31:.*]] = hlfir.designate %[[VAL_25]]#0{"pp1"} {fortran_attrs = #fir.var_attrs<pointer>} : (!fir.ref<!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>>) -> !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>
+! CHECK: %[[VAL_32:.*]] = fir.call @_QFPbar() fastmath<contract> : () -> !fir.boxproc<(!fir.ref<i32>) -> i32>
+! CHECK: fir.store %[[VAL_32]] to %[[VAL_31]] : !fir.ref<!fir.boxproc<(!fir.ref<i32>) -> i32>>
+! CHECK: return
+! CHECK: }
+
+! CHECK-LABEL: fir.global internal @_QFECv1 constant : !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}> {
+! CHECK: %[[VAL_0:.*]] = fir.undefined !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_1:.*]] = fir.field_index pp1, !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_2:.*]] = fir.zero_bits (!fir.ref<i32>) -> i32
+! CHECK: %[[VAL_3:.*]] = fir.emboxproc %[[VAL_2]] : ((!fir.ref<i32>) -> i32) -> !fir.boxproc<(!fir.ref<i32>) -> i32>
+! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_0]], %[[VAL_3]], ["pp1", !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>] : (!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>, !fir.boxproc<(!fir.ref<i32>) -> i32>) -> !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: fir.has_value %[[VAL_4]] : !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: }
+
+! CHECK-LABEL: fir.global internal @_QQro._QMmTdt.0 constant : !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}> {
+! CHECK: %[[VAL_0:.*]] = fir.undefined !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_1:.*]] = fir.field_index pp1, !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QMmPfun) : (!fir.ref<i32>) -> i32
+! CHECK: %[[VAL_3:.*]] = fir.emboxproc %[[VAL_2]] : ((!fir.ref<i32>) -> i32) -> !fir.boxproc<() -> ()>
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (!fir.boxproc<() -> ()>) -> !fir.boxproc<(!fir.ref<i32>) -> i32>
+! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_0]], %[[VAL_4]], ["pp1", !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>] : (!fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>, !fir.boxproc<(!fir.ref<i32>) -> i32>) -> !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: fir.has_value %[[VAL_5]] : !fir.type<_QMmTdt{pp1:!fir.boxproc<(!fir.ref<i32>) -> i32>}>
+! CHECK: }
diff --git a/flang/test/Lower/OpenACC/acc-kernels-loop.f90 b/flang/test/Lower/OpenACC/acc-kernels-loop.f90
index e85065e54bc3..e5791f0e5b39 100644
--- a/flang/test/Lower/OpenACC/acc-kernels-loop.f90
+++ b/flang/test/Lower/OpenACC/acc-kernels-loop.f90
@@ -37,13 +37,27 @@ subroutine acc_kernels_loop
! CHECK: %[[IFCONDITION:.*]] = fir.address_of(@{{.*}}ifcondition) : !fir.ref<!fir.logical<4>>
! CHECK: %[[DECLIFCONDITION:.*]]:2 = hlfir.declare %[[IFCONDITION]]
- !$acc kernels loop
+ !$acc kernels
+ !$acc loop
DO i = 1, n
a(i) = b(i)
END DO
+ !$acc end kernels
! CHECK: acc.kernels {
-! CHECK: acc.loop {{.*}} {
+! CHECK: acc.loop private{{.*}} {
+! CHECK: acc.yield
+! CHECK-NEXT: }{{$}}
+! CHECK: acc.terminator
+! CHECK-NEXT: }{{$}}
+
+ !$acc kernels loop
+ DO i = 1, n
+ a(i) = b(i)
+ END DO
+
+! CHECK: acc.kernels combined(loop) {
+! CHECK: acc.loop combined(kernels) private{{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -55,7 +69,7 @@ subroutine acc_kernels_loop
END DO
!$acc end kernels loop
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -68,7 +82,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.kernels async([[ASYNC1]] : i32) {
+! CHECK: acc.kernels {{.*}} async([[ASYNC1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -81,7 +95,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.kernels async([[ASYNC2]] : i32) {
+! CHECK: acc.kernels {{.*}} async([[ASYNC2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -93,7 +107,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels wait {
+! CHECK: acc.kernels {{.*}} wait {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -106,7 +120,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.kernels wait({[[WAIT1]] : i32}) {
+! CHECK: acc.kernels {{.*}} wait({[[WAIT1]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -120,7 +134,7 @@ subroutine acc_kernels_loop
! CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32
! CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32
-! CHECK: acc.kernels wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
+! CHECK: acc.kernels {{.*}} wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -134,7 +148,7 @@ subroutine acc_kernels_loop
! CHECK: [[WAIT4:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: [[WAIT5:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.kernels wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
+! CHECK: acc.kernels {{.*}} wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -147,7 +161,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[NUMGANGS1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.kernels num_gangs({[[NUMGANGS1]] : i32}) {
+! CHECK: acc.kernels {{.*}} num_gangs({[[NUMGANGS1]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -160,7 +174,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[NUMGANGS2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.kernels num_gangs({[[NUMGANGS2]] : i32}) {
+! CHECK: acc.kernels {{.*}} num_gangs({[[NUMGANGS2]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -173,7 +187,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[NUMWORKERS1:%.*]] = arith.constant 10 : i32
-! CHECK: acc.kernels num_workers([[NUMWORKERS1]] : i32) {
+! CHECK: acc.kernels {{.*}} num_workers([[NUMWORKERS1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -186,7 +200,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[NUMWORKERS2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.kernels num_workers([[NUMWORKERS2]] : i32) {
+! CHECK: acc.kernels {{.*}} num_workers([[NUMWORKERS2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -199,7 +213,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[VECTORLENGTH1:%.*]] = arith.constant 128 : i32
-! CHECK: acc.kernels vector_length([[VECTORLENGTH1]] : i32) {
+! CHECK: acc.kernels {{.*}} vector_length([[VECTORLENGTH1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -212,7 +226,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[VECTORLENGTH2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.kernels vector_length([[VECTORLENGTH2]] : i32) {
+! CHECK: acc.kernels {{.*}} vector_length([[VECTORLENGTH2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -225,7 +239,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[IF1:%.*]] = arith.constant true
-! CHECK: acc.kernels if([[IF1]]) {
+! CHECK: acc.kernels {{.*}} if([[IF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -239,7 +253,7 @@ subroutine acc_kernels_loop
! CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
! CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1
-! CHECK: acc.kernels if([[IF2]]) {
+! CHECK: acc.kernels {{.*}} if([[IF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -252,7 +266,7 @@ subroutine acc_kernels_loop
END DO
! CHECK: [[SELF1:%.*]] = arith.constant true
-! CHECK: acc.kernels self([[SELF1]]) {
+! CHECK: acc.kernels {{.*}} self([[SELF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -264,7 +278,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}}{
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -278,7 +292,7 @@ subroutine acc_kernels_loop
! CHECK: %[[SELF2:.*]] = fir.convert %[[DECLIFCONDITION]]#1 : (!fir.ref<!fir.logical<4>>) -> i1
-! CHECK: acc.kernels self(%[[SELF2]]) {
+! CHECK: acc.kernels {{.*}} self(%[[SELF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -293,7 +307,7 @@ subroutine acc_kernels_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.kernels dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -309,7 +323,7 @@ subroutine acc_kernels_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.kernels dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -325,7 +339,7 @@ subroutine acc_kernels_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: acc.kernels dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -339,7 +353,7 @@ subroutine acc_kernels_loop
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a"}
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "b"}
-! CHECK: acc.kernels dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -355,7 +369,7 @@ subroutine acc_kernels_loop
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "a"}
-! CHECK: acc.kernels dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -371,7 +385,7 @@ subroutine acc_kernels_loop
! CHECK: %[[NOCREATE_A:.*]] = acc.nocreate varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[NOCREATE_B:.*]] = acc.nocreate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.kernels dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -385,7 +399,7 @@ subroutine acc_kernels_loop
! CHECK: %[[PRESENT_A:.*]] = acc.present varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[PRESENT_B:.*]] = acc.present varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.kernels dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -399,7 +413,7 @@ subroutine acc_kernels_loop
! CHECK: %[[DEVICEPTR_A:.*]] = acc.deviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[DEVICEPTR_B:.*]] = acc.deviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.kernels dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -417,7 +431,7 @@ subroutine acc_kernels_loop
! CHECK: %[[BOX_G:.*]] = fir.load %[[DECLG]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
! CHECK: %[[BOX_ADDR_G:.*]] = fir.box_addr %[[BOX_G]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
! CHECK: %[[ATTACH_G:.*]] = acc.attach varPtr(%[[BOX_ADDR_G]] : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "g"}
-! CHECK: acc.kernels dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -429,7 +443,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, seq = [#acc.device_type<none>]}
@@ -441,7 +455,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {auto_ = [#acc.device_type<none>], inclusiveUpperbound = array<i1: true>}
@@ -453,7 +467,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, independent = [#acc.device_type<none>]}
@@ -465,8 +479,8 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
-! CHECK: acc.loop gang {{.*}} {
+! CHECK: acc.kernels {{.*}} {
+! CHECK: acc.loop {{.*}} gang {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.terminator
@@ -477,9 +491,9 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[GANGNUM1:%.*]] = arith.constant 8 : i32
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM1]] : i32}) {{.*}} {
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM1]] : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -490,9 +504,9 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[GANGNUM2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM2]] : i32}) {{.*}} {
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM2]] : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -503,8 +517,8 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
-! CHECK: acc.loop gang({num=%{{.*}} : i32, static=%{{.*}} : i32})
+! CHECK: acc.kernels {{.*}} {
+! CHECK: acc.loop {{.*}} gang({num=%{{.*}} : i32, static=%{{.*}} : i32})
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -514,8 +528,9 @@ subroutine acc_kernels_loop
DO i = 1, n
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
-! CHECK: acc.loop vector {{.*}} {
+
+! CHECK: acc.kernels {{.*}} {
+! CHECK: acc.loop {{.*}} vector {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.terminator
@@ -526,9 +541,9 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[CONSTANT128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop vector([[CONSTANT128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[CONSTANT128]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -539,9 +554,9 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[VECTORLENGTH:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.loop vector([[VECTORLENGTH]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[VECTORLENGTH]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -552,8 +567,8 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
-! CHECK: acc.loop worker {{.*}} {
+! CHECK: acc.kernels {{.*}} {
+! CHECK: acc.loop {{.*}} worker {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.terminator
@@ -564,9 +579,9 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[WORKER128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop worker([[WORKER128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} worker([[WORKER128]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.terminator
@@ -579,7 +594,7 @@ subroutine acc_kernels_loop
END DO
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {collapse = [2], collapseDeviceType = [#acc.device_type<none>], inclusiveUpperbound = array<i1: true, true>}
@@ -594,7 +609,7 @@ subroutine acc_kernels_loop
END DO
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
@@ -609,7 +624,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[TILESIZE:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -622,7 +637,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[TILESIZEM1:%.*]] = arith.constant -1 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZEM1]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -637,7 +652,7 @@ subroutine acc_kernels_loop
END DO
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: [[TILESIZE1:%.*]] = arith.constant 2 : i32
! CHECK: [[TILESIZE2:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE1]] : i32, [[TILESIZE2]] : i32}) {{.*}} {
@@ -651,7 +666,7 @@ subroutine acc_kernels_loop
a(i) = b(i)
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -665,7 +680,7 @@ subroutine acc_kernels_loop
END DO
END DO
-! CHECK: acc.kernels {
+! CHECK: acc.kernels {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32, %{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -680,7 +695,7 @@ subroutine acc_kernels_loop
! CHECK: %[[COPYINREDR:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<f32>) -> !fir.ref<f32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_r"}
! CHECK: %[[COPYINREDI:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_i"}
-! CHECK: acc.kernels dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
+! CHECK: acc.kernels {{.*}} dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_f32 -> %{{.*}} : !fir.ref<f32>, @reduction_mul_ref_i32 -> %{{.*}} : !fir.ref<i32>) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
diff --git a/flang/test/Lower/OpenACC/acc-loop.f90 b/flang/test/Lower/OpenACC/acc-loop.f90
index 1c64fab3536b..fa910e79bd76 100644
--- a/flang/test/Lower/OpenACC/acc-loop.f90
+++ b/flang/test/Lower/OpenACC/acc-loop.f90
@@ -324,5 +324,5 @@ end subroutine
! CHECK: %[[P_I:.*]] = acc.private varPtr(%[[DC_I]] : !fir.ref<i32>) -> !fir.ref<i32> {implicit = true, name = ""}
! CHECK: %[[P_J:.*]] = acc.private varPtr(%[[DC_J]] : !fir.ref<i32>) -> !fir.ref<i32> {implicit = true, name = ""}
! CHECK: %[[P_K:.*]] = acc.private varPtr(%[[DC_K]] : !fir.ref<i32>) -> !fir.ref<i32> {implicit = true, name = ""}
-! CHECK: acc.loop private(@privatization_ref_i32 -> %[[P_I]] : !fir.ref<i32>, @privatization_ref_i32 -> %[[P_J]] : !fir.ref<i32>, @privatization_ref_i32 -> %[[P_K]] : !fir.ref<i32>) control(%{{.*}} : i32, %{{.*}} : i32, %{{.*}} : i32) = (%c1{{.*}}, %c1{{.*}}, %c1{{.*}} : i32, i32, i32) to (%c10{{.*}}, %c100{{.*}}, %c200{{.*}} : i32, i32, i32) step (%c1{{.*}}, %c1{{.*}}, %c1{{.*}} : i32, i32, i32)
+! CHECK: acc.loop combined(parallel) private(@privatization_ref_i32 -> %[[P_I]] : !fir.ref<i32>, @privatization_ref_i32 -> %[[P_J]] : !fir.ref<i32>, @privatization_ref_i32 -> %[[P_K]] : !fir.ref<i32>) control(%{{.*}} : i32, %{{.*}} : i32, %{{.*}} : i32) = (%c1{{.*}}, %c1{{.*}}, %c1{{.*}} : i32, i32, i32) to (%c10{{.*}}, %c100{{.*}}, %c200{{.*}} : i32, i32, i32) step (%c1{{.*}}, %c1{{.*}}, %c1{{.*}} : i32, i32, i32)
! CHECK: } attributes {inclusiveUpperbound = array<i1: true, true, true>}
diff --git a/flang/test/Lower/OpenACC/acc-parallel-loop.f90 b/flang/test/Lower/OpenACC/acc-parallel-loop.f90
index 39f18307486a..48ceda0710e8 100644
--- a/flang/test/Lower/OpenACC/acc-parallel-loop.f90
+++ b/flang/test/Lower/OpenACC/acc-parallel-loop.f90
@@ -39,13 +39,27 @@ subroutine acc_parallel_loop
! CHECK: %[[IFCONDITION:.*]] = fir.address_of(@{{.*}}ifcondition) : !fir.ref<!fir.logical<4>>
! CHECK: %[[DECLIFCONDITION:.*]]:2 = hlfir.declare %[[IFCONDITION]]
- !$acc parallel loop
+ !$acc parallel
+ !$acc loop
DO i = 1, n
a(i) = b(i)
END DO
+ !$acc end parallel
! CHECK: acc.parallel {
-! CHECK: acc.loop {{.*}} {
+! CHECK: acc.loop private{{.*}} {
+! CHECK: acc.yield
+! CHECK-NEXT: }{{$}}
+! CHECK: acc.yield
+! CHECK-NEXT: }{{$}}
+
+ !$acc parallel loop
+ DO i = 1, n
+ a(i) = b(i)
+ END DO
+
+! CHECK: acc.parallel combined(loop) {
+! CHECK: acc.loop combined(parallel) private{{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -57,7 +71,7 @@ subroutine acc_parallel_loop
END DO
!$acc end parallel loop
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -70,7 +84,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.parallel async([[ASYNC1]] : i32) {
+! CHECK: acc.parallel {{.*}} async([[ASYNC1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -83,7 +97,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel async([[ASYNC2]] : i32) {
+! CHECK: acc.parallel {{.*}} async([[ASYNC2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -95,7 +109,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel wait {
+! CHECK: acc.parallel {{.*}} wait {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -108,7 +122,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.parallel wait({[[WAIT1]] : i32}) {
+! CHECK: acc.parallel {{.*}} wait({[[WAIT1]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -122,7 +136,7 @@ subroutine acc_parallel_loop
! CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32
! CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32
-! CHECK: acc.parallel wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
+! CHECK: acc.parallel {{.*}} wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -136,7 +150,7 @@ subroutine acc_parallel_loop
! CHECK: [[WAIT4:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: [[WAIT5:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
+! CHECK: acc.parallel {{.*}} wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -149,7 +163,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[NUMGANGS1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.parallel num_gangs({[[NUMGANGS1]] : i32}) {
+! CHECK: acc.parallel {{.*}} num_gangs({[[NUMGANGS1]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -162,7 +176,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[NUMGANGS2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel num_gangs({[[NUMGANGS2]] : i32}) {
+! CHECK: acc.parallel {{.*}} num_gangs({[[NUMGANGS2]] : i32}) {
! CHECK: acc.loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -175,7 +189,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[NUMWORKERS1:%.*]] = arith.constant 10 : i32
-! CHECK: acc.parallel num_workers([[NUMWORKERS1]] : i32) {
+! CHECK: acc.parallel {{.*}} num_workers([[NUMWORKERS1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -188,7 +202,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[NUMWORKERS2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel num_workers([[NUMWORKERS2]] : i32) {
+! CHECK: acc.parallel {{.*}} num_workers([[NUMWORKERS2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -201,7 +215,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[VECTORLENGTH1:%.*]] = arith.constant 128 : i32
-! CHECK: acc.parallel vector_length([[VECTORLENGTH1]] : i32) {
+! CHECK: acc.parallel {{.*}} vector_length([[VECTORLENGTH1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -214,7 +228,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[VECTORLENGTH2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.parallel vector_length([[VECTORLENGTH2]] : i32) {
+! CHECK: acc.parallel {{.*}} vector_length([[VECTORLENGTH2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -227,7 +241,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[IF1:%.*]] = arith.constant true
-! CHECK: acc.parallel if([[IF1]]) {
+! CHECK: acc.parallel {{.*}} if([[IF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -241,7 +255,7 @@ subroutine acc_parallel_loop
! CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
! CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1
-! CHECK: acc.parallel if([[IF2]]) {
+! CHECK: acc.parallel {{.*}} if([[IF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -254,7 +268,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: [[SELF1:%.*]] = arith.constant true
-! CHECK: acc.parallel self([[SELF1]]) {
+! CHECK: acc.parallel {{.*}} self([[SELF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -266,7 +280,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -279,7 +293,7 @@ subroutine acc_parallel_loop
END DO
! CHECK: %[[SELF2:.*]] = fir.convert %[[DECLIFCONDITION]]#1 : (!fir.ref<!fir.logical<4>>) -> i1
-! CHECK: acc.parallel self(%[[SELF2]]) {
+! CHECK: acc.parallel {{.*}} self(%[[SELF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -293,7 +307,7 @@ subroutine acc_parallel_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.parallel dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -309,7 +323,7 @@ subroutine acc_parallel_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.parallel dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -325,7 +339,7 @@ subroutine acc_parallel_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: acc.parallel dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -339,7 +353,7 @@ subroutine acc_parallel_loop
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a"}
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "b"}
-! CHECK: acc.parallel dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -355,7 +369,7 @@ subroutine acc_parallel_loop
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "a"}
-! CHECK: acc.parallel dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -371,7 +385,7 @@ subroutine acc_parallel_loop
! CHECK: %[[NOCREATE_A:.*]] = acc.nocreate varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[NOCREATE_B:.*]] = acc.nocreate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.parallel dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -385,7 +399,7 @@ subroutine acc_parallel_loop
! CHECK: %[[PRESENT_A:.*]] = acc.present varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[PRESENT_B:.*]] = acc.present varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.parallel dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -399,7 +413,7 @@ subroutine acc_parallel_loop
! CHECK: %[[DEVICEPTR_A:.*]] = acc.deviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[DEVICEPTR_B:.*]] = acc.deviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.parallel dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -417,7 +431,7 @@ subroutine acc_parallel_loop
! CHECK: %[[BOX_G:.*]] = fir.load %[[DECLG]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
! CHECK: %[[BOX_ADDR_G:.*]] = fir.box_addr %[[BOX_G]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
! CHECK: %[[ATTACH_G:.*]] = acc.attach varPtr(%[[BOX_ADDR_G]] : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "g"}
-! CHECK: acc.parallel dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -430,9 +444,9 @@ subroutine acc_parallel_loop
END DO
! CHECK: %[[ACC_PRIVATE_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.parallel firstprivate(@firstprivatization_section_ext10_ref_10xf32 -> %[[ACC_PRIVATE_B]] : !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_ext10_ref_10xf32 -> %[[ACC_PRIVATE_B]] : !fir.ref<!fir.array<10xf32>>) {
! CHECK: %[[ACC_PRIVATE_A:.*]] = acc.private varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_ref_10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10xf32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_ref_10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10xf32>>)
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -444,7 +458,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, seq = [#acc.device_type<none>]}
@@ -456,7 +470,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {auto_ = [#acc.device_type<none>], inclusiveUpperbound = array<i1: true>}
@@ -468,7 +482,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, independent = [#acc.device_type<none>]}
@@ -480,8 +494,8 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
-! CHECK: acc.loop gang
+! CHECK: acc.parallel {{.*}} {
+! CHECK: acc.loop {{.*}} gang
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.yield
@@ -492,9 +506,9 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[GANGNUM1:%.*]] = arith.constant 8 : i32
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM1]] : i32})
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM1]] : i32})
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -505,9 +519,9 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[GANGNUM2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM2]] : i32})
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM2]] : i32})
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -518,8 +532,8 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
-! CHECK: acc.loop gang({num=%{{.*}} : i32, static=%{{.*}} : i32})
+! CHECK: acc.parallel {{.*}} {
+! CHECK: acc.loop {{.*}} gang({num=%{{.*}} : i32, static=%{{.*}} : i32})
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -529,8 +543,9 @@ subroutine acc_parallel_loop
DO i = 1, n
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
-! CHECK: acc.loop vector
+
+! CHECK: acc.parallel {{.*}} {
+! CHECK: acc.loop {{.*}} vector
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.yield
@@ -541,9 +556,9 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[CONSTANT128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop vector([[CONSTANT128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[CONSTANT128]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -554,9 +569,9 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[VECTORLENGTH:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.loop vector([[VECTORLENGTH]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[VECTORLENGTH]] : i32) {{.*}} {
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -568,8 +583,8 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
-! CHECK: acc.loop worker {{.*}} {
+! CHECK: acc.parallel {{.*}} {
+! CHECK: acc.loop {{.*}} worker {{.*}} {
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
@@ -581,9 +596,9 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}}{
! CHECK: [[WORKER128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop worker([[WORKER128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} worker([[WORKER128]] : i32) {{.*}} {
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -597,7 +612,7 @@ subroutine acc_parallel_loop
END DO
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {collapse = [2], collapseDeviceType = [#acc.device_type<none>], inclusiveUpperbound = array<i1: true, true>}
@@ -612,7 +627,7 @@ subroutine acc_parallel_loop
END DO
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
@@ -627,7 +642,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[TILESIZE:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -640,7 +655,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[TILESIZEM1:%.*]] = arith.constant -1 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZEM1]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -655,7 +670,7 @@ subroutine acc_parallel_loop
END DO
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: [[TILESIZE1:%.*]] = arith.constant 2 : i32
! CHECK: [[TILESIZE2:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE1]] : i32, [[TILESIZE2]] : i32}) {{.*}} {
@@ -669,7 +684,7 @@ subroutine acc_parallel_loop
a(i) = b(i)
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -683,7 +698,7 @@ subroutine acc_parallel_loop
END DO
END DO
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32, %{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -698,7 +713,7 @@ subroutine acc_parallel_loop
! CHECK: %[[COPYINREDR:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<f32>) -> !fir.ref<f32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_r"}
! CHECK: %[[COPYINREDI:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_i"}
-! CHECK: acc.parallel dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
+! CHECK: acc.parallel {{.*}} dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_f32 -> %{{.*}} : !fir.ref<f32>, @reduction_mul_ref_i32 -> %{{.*}} : !fir.ref<i32>) {{.*}}
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
diff --git a/flang/test/Lower/OpenACC/acc-private.f90 b/flang/test/Lower/OpenACC/acc-private.f90
index ba582d40f0f5..4d9f84b1fa74 100644
--- a/flang/test/Lower/OpenACC/acc-private.f90
+++ b/flang/test/Lower/OpenACC/acc-private.f90
@@ -228,7 +228,7 @@ program acc_private
END DO
! CHECK: %[[FP_C:.*]] = acc.firstprivate varPtr(%[[DECLC]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "c"}
-! CHECK: acc.parallel firstprivate(@firstprivatization_ref_i32 -> %[[FP_C]] : !fir.ref<i32>)
+! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_ref_i32 -> %[[FP_C]] : !fir.ref<i32>)
! CHECK: acc.yield
!$acc parallel loop firstprivate(b)
@@ -242,7 +242,7 @@ program acc_private
! CHECK: %[[UB:.*]] = arith.subi %{{.*}}, %[[C1]] : index
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.parallel firstprivate(@firstprivatization_section_ext100_ref_100xf32 -> %[[FP_B]] : !fir.ref<!fir.array<100xf32>>)
+! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_ext100_ref_100xf32 -> %[[FP_B]] : !fir.ref<!fir.array<100xf32>>)
! CHECK: acc.yield
!$acc parallel loop firstprivate(b(51:100))
@@ -256,7 +256,7 @@ program acc_private
! CHECK: %[[UB:.*]] = arith.constant 99 : index
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<50xf32>> {name = "b(51:100)"}
-! CHECK: acc.parallel firstprivate(@firstprivatization_section_lb50.ub99_ref_50xf32 -> %[[FP_B]] : !fir.ref<!fir.array<50xf32>>)
+! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_lb50.ub99_ref_50xf32 -> %[[FP_B]] : !fir.ref<!fir.array<50xf32>>)
end program
@@ -272,10 +272,10 @@ end subroutine
! CHECK-LABEL: func.func @_QPacc_private_assumed_shape(
! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]] {uniq_name = "_QFacc_private_assumed_shapeEa"} : (!fir.box<!fir.array<?xi32>>) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_box_Uxi32 -> %[[PRIVATE]] : !fir.ref<!fir.array<?xi32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_Uxi32 -> %[[PRIVATE]] : !fir.ref<!fir.array<?xi32>>)
subroutine acc_private_allocatable_array(a, n)
integer, allocatable :: a(:)
@@ -294,11 +294,11 @@ end subroutine
! CHECK-LABEL: func.func @_QPacc_private_allocatable_array(
! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {fir.bindc_name = "a"}
! CHECK: %[[DECLA_A:.*]]:2 = hlfir.declare %[[ARG0]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFacc_private_allocatable_arrayEa"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.heap<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_box_heap_Uxi32 -> %[[PRIVATE]] : !fir.heap<!fir.array<?xi32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_heap_Uxi32 -> %[[PRIVATE]] : !fir.heap<!fir.array<?xi32>>)
! CHECK: acc.serial private(@privatization_box_heap_Uxi32 -> %{{.*}} : !fir.heap<!fir.array<?xi32>>)
subroutine acc_private_pointer_array(a, n)
@@ -314,11 +314,11 @@ end subroutine
! CHECK-LABEL: func.func @_QPacc_private_pointer_array(
! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>> {fir.bindc_name = "a"}, %arg1: !fir.ref<i32> {fir.bindc_name = "n"}) {
! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %arg0 {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFacc_private_pointer_arrayEa"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>)
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.ptr<!fir.array<?xi32>>>) -> !fir.ptr<!fir.array<?xi32>>
! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ptr<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_box_ptr_Uxi32 -> %[[PRIVATE]] : !fir.ptr<!fir.array<?xi32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_ptr_Uxi32 -> %[[PRIVATE]] : !fir.ptr<!fir.array<?xi32>>)
subroutine acc_private_dynamic_extent(a, n)
integer :: n, i
@@ -334,10 +334,10 @@ end subroutine
! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<?x?x2xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"}) {
! CHECK: %[[DECL_N:.*]]:2 = hlfir.declare %[[ARG1]] {uniq_name = "_QFacc_private_dynamic_extentEn"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) {uniq_name = "_QFacc_private_dynamic_extentEa"} : (!fir.ref<!fir.array<?x?x2xi32>>, !fir.shape<3>) -> (!fir.box<!fir.array<?x?x2xi32>>, !fir.ref<!fir.array<?x?x2xi32>>)
-! CHECK: acc.parallel {
+! CHECK: acc.parallel {{.*}} {
! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box<!fir.array<?x?x2xi32>>) -> !fir.ref<!fir.array<?x?x2xi32>>
! CHECK: %[[PRIV:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?x?x2xi32>>) bounds(%{{.*}}, %{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<?x?x2xi32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_box_UxUx2xi32 -> %[[PRIV]] : !fir.ref<!fir.array<?x?x2xi32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_UxUx2xi32 -> %[[PRIV]] : !fir.ref<!fir.array<?x?x2xi32>>)
subroutine acc_firstprivate_assumed_shape(a, n)
integer :: a(:), i, n
@@ -367,7 +367,7 @@ subroutine acc_firstprivate_dynamic_extent(a, n)
end do
end subroutine
-! CHECK: acc.parallel firstprivate(@firstprivatization_box_UxUx2xi32 -> %{{.*}} : !fir.ref<!fir.array<?x?x2xi32>>)
+! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_box_UxUx2xi32 -> %{{.*}} : !fir.ref<!fir.array<?x?x2xi32>>)
module acc_declare_equivalent
integer, parameter :: n = 10
@@ -398,6 +398,6 @@ end
! CHECK: acc.parallel
! CHECK: %[[PRIV_I:.*]] = acc.private varPtr(%[[DECL_I]]#1 : !fir.ref<i32>) -> !fir.ref<i32> {implicit = true, name = ""}
! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFacc_private_useEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: acc.loop private(@privatization_ref_i32 -> %[[PRIV_I]] : !fir.ref<i32>) control(%[[IV0:.*]] : i32) = (%c1{{.*}} : i32) to (%c10{{.*}} : i32) step (%c1{{.*}} : i32)
+! CHECK: acc.loop {{.*}} private(@privatization_ref_i32 -> %[[PRIV_I]] : !fir.ref<i32>) control(%[[IV0:.*]] : i32) = (%c1{{.*}} : i32) to (%c10{{.*}} : i32) step (%c1{{.*}} : i32)
! CHECK: fir.store %[[IV0]] to %[[DECL_PRIV_I]]#0 : !fir.ref<i32>
! CHECK: %{{.*}} = fir.load %[[DECL_PRIV_I]]#0 : !fir.ref<i32>
diff --git a/flang/test/Lower/OpenACC/acc-serial-loop.f90 b/flang/test/Lower/OpenACC/acc-serial-loop.f90
index 77f5e1eaa7c8..fa3b3f758908 100644
--- a/flang/test/Lower/OpenACC/acc-serial-loop.f90
+++ b/flang/test/Lower/OpenACC/acc-serial-loop.f90
@@ -58,13 +58,27 @@ subroutine acc_serial_loop
! CHECK: %[[IFCONDITION:.*]] = fir.address_of(@{{.*}}ifcondition) : !fir.ref<!fir.logical<4>>
! CHECK: %[[DECLIFCONDITION:.*]]:2 = hlfir.declare %[[IFCONDITION]]
- !$acc serial loop
+ !$acc serial
+ !$acc loop
DO i = 1, n
a(i) = b(i)
END DO
+ !$acc end serial
! CHECK: acc.serial {
-! CHECK: acc.loop {{.*}} {
+! CHECK: acc.loop private{{.*}} {
+! CHECK: acc.yield
+! CHECK-NEXT: }{{$}}
+! CHECK: acc.yield
+! CHECK-NEXT: }{{$}}
+
+ !$acc serial loop
+ DO i = 1, n
+ a(i) = b(i)
+ END DO
+
+! CHECK: acc.serial combined(loop) {
+! CHECK: acc.loop combined(serial) private{{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -76,7 +90,7 @@ subroutine acc_serial_loop
END DO
!$acc end serial loop
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -89,7 +103,7 @@ subroutine acc_serial_loop
END DO
! CHECK: [[ASYNC1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.serial async([[ASYNC1]] : i32) {
+! CHECK: acc.serial {{.*}} async([[ASYNC1]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -102,7 +116,7 @@ subroutine acc_serial_loop
END DO
! CHECK: [[ASYNC2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.serial async([[ASYNC2]] : i32) {
+! CHECK: acc.serial {{.*}} async([[ASYNC2]] : i32) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -114,7 +128,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial wait {
+! CHECK: acc.serial {{.*}} wait {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -127,7 +141,7 @@ subroutine acc_serial_loop
END DO
! CHECK: [[WAIT1:%.*]] = arith.constant 1 : i32
-! CHECK: acc.serial wait({[[WAIT1]] : i32}) {
+! CHECK: acc.serial {{.*}} wait({[[WAIT1]] : i32}) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -141,7 +155,7 @@ subroutine acc_serial_loop
! CHECK: [[WAIT2:%.*]] = arith.constant 1 : i32
! CHECK: [[WAIT3:%.*]] = arith.constant 2 : i32
-! CHECK: acc.serial wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
+! CHECK: acc.serial {{.*}} wait({[[WAIT2]] : i32, [[WAIT3]] : i32}) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -155,7 +169,7 @@ subroutine acc_serial_loop
! CHECK: [[WAIT4:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
! CHECK: [[WAIT5:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.serial wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
+! CHECK: acc.serial {{.*}} wait({[[WAIT4]] : i32, [[WAIT5]] : i32}) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -168,7 +182,7 @@ subroutine acc_serial_loop
END DO
! CHECK: [[IF1:%.*]] = arith.constant true
-! CHECK: acc.serial if([[IF1]]) {
+! CHECK: acc.serial {{.*}} if([[IF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -182,7 +196,7 @@ subroutine acc_serial_loop
! CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
! CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1
-! CHECK: acc.serial if([[IF2]]) {
+! CHECK: acc.serial {{.*}} if([[IF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -195,7 +209,7 @@ subroutine acc_serial_loop
END DO
! CHECK: [[SELF1:%.*]] = arith.constant true
-! CHECK: acc.serial self([[SELF1]]) {
+! CHECK: acc.serial {{.*}} self([[SELF1]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -207,7 +221,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -220,7 +234,7 @@ subroutine acc_serial_loop
END DO
! CHECK: %[[SELF2:.*]] = fir.convert %[[DECLIFCONDITION]]#1 : (!fir.ref<!fir.logical<4>>) -> i1
-! CHECK: acc.serial self(%[[SELF2]]) {
+! CHECK: acc.serial {{.*}} self(%[[SELF2]]) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -234,7 +248,7 @@ subroutine acc_serial_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.serial dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -250,7 +264,7 @@ subroutine acc_serial_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: acc.serial dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -266,7 +280,7 @@ subroutine acc_serial_loop
! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: acc.serial dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -280,7 +294,7 @@ subroutine acc_serial_loop
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a"}
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "b"}
-! CHECK: acc.serial dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[CREATE_A]], %[[CREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -296,7 +310,7 @@ subroutine acc_serial_loop
! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "a"}
-! CHECK: acc.serial dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[CREATE_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -312,7 +326,7 @@ subroutine acc_serial_loop
! CHECK: %[[NOCREATE_A:.*]] = acc.nocreate varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[NOCREATE_B:.*]] = acc.nocreate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.serial dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[NOCREATE_A]], %[[NOCREATE_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -326,7 +340,7 @@ subroutine acc_serial_loop
! CHECK: %[[PRESENT_A:.*]] = acc.present varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[PRESENT_B:.*]] = acc.present varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.serial dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[PRESENT_A]], %[[PRESENT_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -340,7 +354,7 @@ subroutine acc_serial_loop
! CHECK: %[[DEVICEPTR_A:.*]] = acc.deviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
! CHECK: %[[DEVICEPTR_B:.*]] = acc.deviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.serial dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[DEVICEPTR_A]], %[[DEVICEPTR_B]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -358,7 +372,7 @@ subroutine acc_serial_loop
! CHECK: %[[BOX_G:.*]] = fir.load %[[DECLG]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
! CHECK: %[[BOX_ADDR_G:.*]] = fir.box_addr %[[BOX_G]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
! CHECK: %[[ATTACH_G:.*]] = acc.attach varPtr(%[[BOX_ADDR_G]] : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "g"}
-! CHECK: acc.serial dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[ATTACH_F]], %[[ATTACH_G]] : !fir.ptr<f32>, !fir.ptr<f32>) {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -371,9 +385,9 @@ subroutine acc_serial_loop
END DO
! CHECK: %[[ACC_FPRIVATE_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "b"}
-! CHECK: acc.serial firstprivate(@firstprivatization_section_ext10_ref_10xf32 -> %[[ACC_FPRIVATE_B]] : !fir.ref<!fir.array<10xf32>>) {
+! CHECK: acc.serial {{.*}} firstprivate(@firstprivatization_section_ext10_ref_10xf32 -> %[[ACC_FPRIVATE_B]] : !fir.ref<!fir.array<10xf32>>) {
! CHECK: %[[ACC_PRIVATE_A:.*]] = acc.private varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: acc.loop private({{.*}}@privatization_ref_10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10xf32>>)
+! CHECK: acc.loop {{.*}} private({{.*}}@privatization_ref_10xf32 -> %[[ACC_PRIVATE_A]] : !fir.ref<!fir.array<10xf32>>)
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -385,7 +399,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, seq = [#acc.device_type<none>]}
@@ -397,7 +411,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {auto_ = [#acc.device_type<none>], inclusiveUpperbound = array<i1: true>}
@@ -409,7 +423,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>, independent = [#acc.device_type<none>]}
@@ -421,8 +435,8 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
-! CHECK: acc.loop gang {{.*}} {
+! CHECK: acc.serial {{.*}} {
+! CHECK: acc.loop {{.*}} gang {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.yield
@@ -433,9 +447,9 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[GANGNUM1:%.*]] = arith.constant 8 : i32
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM1]] : i32}) {{.*}} {
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM1]] : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -446,9 +460,9 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[GANGNUM2:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK-NEXT: acc.loop gang({num=[[GANGNUM2]] : i32}) {{.*}} {
+! CHECK-NEXT: acc.loop {{.*}} gang({num=[[GANGNUM2]] : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -459,8 +473,8 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
-! CHECK: acc.loop gang({num=%{{.*}} : i32, static=%{{.*}} : i32}) {{.*}} {
+! CHECK: acc.serial {{.*}} {
+! CHECK: acc.loop {{.*}} gang({num=%{{.*}} : i32, static=%{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -470,8 +484,9 @@ subroutine acc_serial_loop
DO i = 1, n
a(i) = b(i)
END DO
-! CHECK: acc.serial {
-! CHECK: acc.loop vector {{.*}} {
+
+! CHECK: acc.serial {{.*}} {
+! CHECK: acc.loop {{.*}} vector {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.yield
@@ -482,9 +497,9 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[CONSTANT128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop vector([[CONSTANT128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[CONSTANT128]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -495,9 +510,9 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[VECTORLENGTH:%.*]] = fir.load %{{.*}} : !fir.ref<i32>
-! CHECK: acc.loop vector([[VECTORLENGTH]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} vector([[VECTORLENGTH]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -508,8 +523,8 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
-! CHECK: acc.loop worker {{.*}} {
+! CHECK: acc.serial {{.*}} {
+! CHECK: acc.loop {{.*}} worker {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: } attributes {inclusiveUpperbound = array<i1: true>}{{$}}
! CHECK: acc.yield
@@ -520,9 +535,9 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[WORKER128:%.*]] = arith.constant 128 : i32
-! CHECK: acc.loop worker([[WORKER128]] : i32) {{.*}} {
+! CHECK: acc.loop {{.*}} worker([[WORKER128]] : i32) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
! CHECK: acc.yield
@@ -535,7 +550,7 @@ subroutine acc_serial_loop
END DO
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
@@ -551,7 +566,7 @@ subroutine acc_serial_loop
END DO
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.loop {{.*}} {
! CHECK: acc.yield
@@ -566,7 +581,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[TILESIZE:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -579,7 +594,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[TILESIZEM1:%.*]] = arith.constant -1 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZEM1]] : i32}) {{.*}} {
! CHECK: acc.yield
@@ -594,7 +609,7 @@ subroutine acc_serial_loop
END DO
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: [[TILESIZE1:%.*]] = arith.constant 2 : i32
! CHECK: [[TILESIZE2:%.*]] = arith.constant 2 : i32
! CHECK: acc.loop {{.*}} tile({[[TILESIZE1]] : i32, [[TILESIZE2]] : i32}) {{.*}} {
@@ -608,7 +623,7 @@ subroutine acc_serial_loop
a(i) = b(i)
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -622,7 +637,7 @@ subroutine acc_serial_loop
END DO
END DO
-! CHECK: acc.serial {
+! CHECK: acc.serial {{.*}} {
! CHECK: acc.loop {{.*}} tile({%{{.*}} : i32, %{{.*}} : i32}) {{.*}} {
! CHECK: acc.yield
! CHECK-NEXT: }{{$}}
@@ -637,7 +652,7 @@ subroutine acc_serial_loop
! CHECK: %[[COPYINREDR:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<f32>) -> !fir.ref<f32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_r"}
! CHECK: %[[COPYINREDI:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {dataClause = #acc<data_clause acc_reduction>, implicit = true, name = "reduction_i"}
-! CHECK: acc.serial dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
+! CHECK: acc.serial {{.*}} dataOperands(%[[COPYINREDR]], %[[COPYINREDI]] : !fir.ref<f32>, !fir.ref<i32>) {
! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_f32 -> %{{.*}} : !fir.ref<f32>, @reduction_mul_ref_i32 -> %{{.*}} : !fir.ref<i32>)
! CHECK-NOT: fir.do_loop
! CHECK: acc.yield
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90
new file mode 100644
index 000000000000..9e9951c399c9
--- /dev/null
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-multi.f90
@@ -0,0 +1,81 @@
+! RUN: bbc -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
+! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
+
+!CHECK-LABEL: omp.declare_reduction
+!CHECK-SAME: @[[MIN_RED_I32_NAME:.*]] : i32 init {
+!CHECK: ^bb0(%{{.*}}: i32):
+!CHECK: %[[C0_1:.*]] = arith.constant 2147483647 : i32
+!CHECK: omp.yield(%[[C0_1]] : i32)
+!CHECK: } combiner {
+!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
+!CHECK: %[[RES:.*]] = arith.minsi %[[ARG0]], %[[ARG1]] : i32
+!CHECK: omp.yield(%[[RES]] : i32)
+!CHECK: }
+
+!CHECK-LABEL: omp.declare_reduction
+!CHECK-SAME: @[[ADD_RED_F32_NAME:.*]] : f32 init {
+!CHECK: ^bb0(%{{.*}}: f32):
+!CHECK: %[[C0_1:.*]] = arith.constant 0.000000e+00 : f32
+!CHECK: omp.yield(%[[C0_1]] : f32)
+!CHECK: } combiner {
+!CHECK: ^bb0(%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32):
+!CHECK: %[[RES:.*]] = arith.addf %[[ARG0]], %[[ARG1]] {{.*}} : f32
+!CHECK: omp.yield(%[[RES]] : f32)
+!CHECK: }
+
+!CHECK-LABEL: omp.declare_reduction
+!CHECK-SAME: @[[ADD_RED_I32_NAME:.*]] : i32 init {
+!CHECK: ^bb0(%{{.*}}: i32):
+!CHECK: %[[C0_1:.*]] = arith.constant 0 : i32
+!CHECK: omp.yield(%[[C0_1]] : i32)
+!CHECK: } combiner {
+!CHECK: ^bb0(%[[ARG0:.*]]: i32, %[[ARG1:.*]]: i32):
+!CHECK: %[[RES:.*]] = arith.addi %[[ARG0]], %[[ARG1]] : i32
+!CHECK: omp.yield(%[[RES]] : i32)
+!CHECK: }
+
+!CHECK-LABEL: func.func @_QPmultiple_reduction
+!CHECK: %[[X_REF:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_reductionEx"}
+!CHECK: %[[X_DECL:.*]]:2 = hlfir.declare %[[X_REF]] {uniq_name = "_QFmultiple_reductionEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: %[[Y_REF:.*]] = fir.alloca f32 {bindc_name = "y", uniq_name = "_QFmultiple_reductionEy"}
+!CHECK: %[[Y_DECL:.*]]:2 = hlfir.declare %[[Y_REF]] {uniq_name = "_QFmultiple_reductionEy"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+!CHECK: %[[Z_REF:.*]] = fir.alloca i32 {bindc_name = "z", uniq_name = "_QFmultiple_reductionEz"}
+!CHECK: %[[Z_DECL:.*]]:2 = hlfir.declare %[[Z_REF]] {uniq_name = "_QFmultiple_reductionEz"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: omp.wsloop reduction(
+!CHECK-SAME: @[[ADD_RED_I32_NAME]] %[[X_DECL]]#0 -> %[[PRV_X:.+]] : !fir.ref<i32>,
+!CHECK-SAME: @[[ADD_RED_F32_NAME]] %[[Y_DECL]]#0 -> %[[PRV_Y:.+]] : !fir.ref<f32>,
+!CHECK-SAME: @[[MIN_RED_I32_NAME]] %[[Z_DECL]]#0 -> %[[PRV_Z:.+]] : !fir.ref<i32>) {{.*}}{
+!CHECK: %[[PRV_X_DECL:.+]]:2 = hlfir.declare %[[PRV_X]] {{.*}} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: %[[PRV_Y_DECL:.+]]:2 = hlfir.declare %[[PRV_Y]] {{.*}} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
+!CHECK: %[[PRV_Z_DECL:.+]]:2 = hlfir.declare %[[PRV_Z]] {{.*}} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: %[[LPRV_X:.+]] = fir.load %[[PRV_X_DECL]]#0 : !fir.ref<i32>
+!CHECK: %[[RES_X:.+]] = arith.addi %[[LPRV_X]], %{{.+}} : i32
+!CHECK: hlfir.assign %[[RES_X]] to %[[PRV_X_DECL]]#0 : i32, !fir.ref<i32>
+!CHECK: %[[LPRV_Y:.+]] = fir.load %[[PRV_Y_DECL]]#0 : !fir.ref<f32>
+!CHECK: %[[RES_Y:.+]] = arith.addf %[[LPRV_Y]], %{{.+}} : f32
+!CHECK: hlfir.assign %[[RES_Y]] to %[[PRV_Y_DECL]]#0 : f32, !fir.ref<f32>
+!CHECK: %[[LPRV_Z:.+]] = fir.load %[[PRV_Z_DECL]]#0 : !fir.ref<i32>
+!CHECK: %[[RES_Z:.+]] = arith.select %{{.+}}, %[[LPRV_Z]], %{{.+}} : i32
+!CHECK: hlfir.assign %[[RES_Z]] to %[[PRV_Z_DECL]]#0 : i32, !fir.ref<i32>
+!CHECK: omp.yield
+!CHECK: }
+!CHECK: return
+subroutine multiple_reduction(v)
+ implicit none
+ integer, intent(in) :: v(:)
+ integer :: i
+ integer :: x
+ real :: y
+ integer:: z
+ x = 0
+ y = 0.0
+ z = 10
+
+ !$omp do reduction(+:x,y) reduction(min:z)
+ do i=1, 100
+ x = x + v(i)
+ y = y + 1.5 * v(i)
+ z = min(z, v(i))
+ end do
+ !$omp end do
+end subroutine
diff --git a/flang/test/Lower/cray-pointer.f90 b/flang/test/Lower/cray-pointer.f90
index 4e9f49daab4e..06910bce35a1 100644
--- a/flang/test/Lower/cray-pointer.f90
+++ b/flang/test/Lower/cray-pointer.f90
@@ -264,8 +264,8 @@ subroutine cray_array()
! CHECK: %[[data:.*]] = fir.alloca !fir.array<5xi32> {{.*}}
! CHECK: %[[c3:.*]] = arith.constant 3 : index
! CHECK: %[[k:.*]] = fir.alloca !fir.array<3xi32> {{.*}}
-! CHECK: %[[c31:.*]] = arith.constant 3 : index
! CHECK: %[[ptr:.*]] = fir.alloca i64 {{.*}}
+! CHECK: %[[c31:.*]] = arith.constant 3 : index
! CHECK: %[[c2:.*]] = arith.constant 2 : i64
! CHECK: %[[c1:.*]] = arith.constant 1 : i64
! CHECK: %[[sub:.*]] = arith.subi %[[c2]], %[[c1]] : i64
@@ -327,8 +327,8 @@ subroutine cray_arraySection()
! CHECK: %[[data:.*]] = fir.alloca !fir.array<5xi32> {{.*}}
! CHECK: %[[c2:.*]] = arith.constant 2 : index
! CHECK: %[[k:.*]] = fir.alloca !fir.array<2xi32> {{.*}}
-! CHECK: %[[c3:.*]] = arith.constant 3 : index
! CHECK: %[[ptr:.*]] = fir.alloca i64 {{.*}}
+! CHECK: %[[c3:.*]] = arith.constant 3 : index
! CHECK: %[[c1:.*]] = arith.constant 2 : i64
! CHECK: %[[c0:.*]] = arith.constant 1 : i64
! CHECK: %[[sub:.*]] = arith.subi %[[c1]], %[[c0]] : i64
diff --git a/flang/test/Parser/unrecognized-dir.f90 b/flang/test/Parser/unrecognized-dir.f90
new file mode 100644
index 000000000000..ba6fff7562e2
--- /dev/null
+++ b/flang/test/Parser/unrecognized-dir.f90
@@ -0,0 +1,4 @@
+! RUN: %flang_fc1 -fsyntax-only %s 2>&1 | FileCheck %s
+!CHECK: warning: Compiler directive was ignored
+!DIR$ Not a recognized directive
+end
diff --git a/flang/test/Semantics/OpenMP/do20.f90 b/flang/test/Semantics/OpenMP/do20.f90
new file mode 100644
index 000000000000..915d01e69edd
--- /dev/null
+++ b/flang/test/Semantics/OpenMP/do20.f90
@@ -0,0 +1,18 @@
+! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
+
+! OpenMP 5.2 5.1.1
+! Iteration variables of non-associated loops may be listed in DSA clauses.
+
+!DEF: /shared_iv (Subroutine)Subprogram
+subroutine shared_iv
+ !DEF: /shared_iv/i ObjectEntity INTEGER(4)
+ integer i
+
+ !$omp parallel shared(i)
+ !$omp single
+ !REF: /shared_iv/i
+ do i = 0, 1
+ end do
+ !$omp end single
+ !$omp end parallel
+end subroutine
diff --git a/flang/test/Semantics/deferred01.f90 b/flang/test/Semantics/deferred01.f90
new file mode 100644
index 000000000000..87818c10bd39
--- /dev/null
+++ b/flang/test/Semantics/deferred01.f90
@@ -0,0 +1,28 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1
+! Deferred TBPs must be overridden, but when they are private, those
+! overrides must appear in the same module.
+module m1
+ type, abstract :: absBase
+ contains
+ procedure(deferredInterface), deferred, private :: deferredTbp
+ end type
+ abstract interface
+ subroutine deferredInterface(x)
+ import absBase
+ class(absBase), intent(in) :: x
+ end
+ end interface
+end
+
+module m2
+ use m1
+ type, extends(absBase) :: ext
+ contains
+ !ERROR: Override of PRIVATE DEFERRED 'deferredtbp' must appear in its module
+ procedure :: deferredTbp => implTbp
+ end type
+ contains
+ subroutine implTbp(x)
+ class(ext), intent(in) :: x
+ end
+end
diff --git a/flang/test/Semantics/init01.f90 b/flang/test/Semantics/init01.f90
index f85feef097cd..65d524b16a23 100644
--- a/flang/test/Semantics/init01.f90
+++ b/flang/test/Semantics/init01.f90
@@ -158,8 +158,10 @@ subroutine notObjects
real, external :: x1 = 1.
!ERROR: 'x2' is not a pointer but is initialized like one
real, external :: x2 => sin
+!ERROR: 'x3' is not a known intrinsic procedure
!ERROR: 'x3' is not an object that can be initialized
real, intrinsic :: x3 = 1.
+!ERROR: 'x4' is not a known intrinsic procedure
!ERROR: 'x4' is not a pointer but is initialized like one
real, intrinsic :: x4 => cos
end subroutine
diff --git a/flang/test/Semantics/resolve61.f90 b/flang/test/Semantics/resolve61.f90
index 32bf9091a856..2a1f584ffaf0 100644
--- a/flang/test/Semantics/resolve61.f90
+++ b/flang/test/Semantics/resolve61.f90
@@ -126,3 +126,12 @@ subroutine p13
pointer(ip, x) ! ok, local declaration
end
end
+
+subroutine p14
+ real :: r
+ block
+ asynchronous :: r
+ !ERROR: PARAMETER attribute not allowed on 'r'
+ parameter (r = 1.0)
+ end block
+end
diff --git a/flang/test/Semantics/resolve81.f90 b/flang/test/Semantics/resolve81.f90
index 2a0b961d48e5..87901fd7d2ef 100644
--- a/flang/test/Semantics/resolve81.f90
+++ b/flang/test/Semantics/resolve81.f90
@@ -28,6 +28,7 @@ module m
!WARNING: Attribute 'EXTERNAL' cannot be used more than once
real, external, external :: externFunc
!WARNING: Attribute 'INTRINSIC' cannot be used more than once
+ !ERROR: An interface name with BIND attribute must be specified if the BIND attribute is specified in a procedure declaration statement
real, intrinsic, bind(c), intrinsic :: cos
!WARNING: Attribute 'BIND(C)' cannot be used more than once
integer, bind(c), volatile, bind(c) :: bindVar
diff --git a/flang/test/Semantics/structconst09.f90 b/flang/test/Semantics/structconst09.f90
new file mode 100644
index 000000000000..c129f1160685
--- /dev/null
+++ b/flang/test/Semantics/structconst09.f90
@@ -0,0 +1,37 @@
+! RUN: %python %S/test_errors.py %s %flang_fc1 -pedantic
+! Structure constructors with bad pointer targets
+module m
+ real, target, save :: x
+ type t
+ real, pointer :: rp => x
+ procedure(f), pointer, nopass :: pp => f
+ end type
+ contains
+ real function f()
+ f = 0.
+ end
+ subroutine test(da, dp)
+ real, target :: y, da
+ procedure(f) dp
+ procedure(f), pointer :: lpp
+ external ext
+ type(t) :: a1 = t() ! ok
+ type(t) :: a2 = t(rp=x) ! ok
+ type(t) :: a3 = t(pp=f) ! ok
+ type(t) :: a4 = t(pp=ext) ! ok
+ !ERROR: Must be a constant value
+ type(t) :: a5 = t(rp=y)
+ !ERROR: Must be a constant value
+ type(t) :: a6 = t(rp=da)
+ !ERROR: Must be a constant value
+ type(t) :: a7 = t(pp=lpp)
+ !ERROR: Must be a constant value
+ type(t) :: a8 = t(pp=internal)
+ !ERROR: Must be a constant value
+ type(t) :: a9 = t(pp=dp)
+ contains
+ real function internal()
+ internal = 666.
+ end
+ end
+end
diff --git a/flang/test/Transforms/stack-arrays.fir b/flang/test/Transforms/stack-arrays.fir
index f4fe737e88d7..a2ffe555091e 100644
--- a/flang/test/Transforms/stack-arrays.fir
+++ b/flang/test/Transforms/stack-arrays.fir
@@ -127,9 +127,7 @@ func.func @placement1() {
return
}
// CHECK: func.func @placement1() {
-// CHECK-NEXT: %[[ONE:.*]] = arith.constant 1 : index
-// CHECK-NEXT: %[[TWO:.*]] = arith.constant 2 : index
-// CHECK-NEXT: %[[ARG:.*]] = arith.addi %[[ONE]], %[[TWO]] : index
+// CHECK-NEXT: %[[ARG:.*]] = arith.constant 3 : index
// CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[ARG]]
// CHECK-NEXT: return
// CHECK-NEXT: }
@@ -204,13 +202,12 @@ func.func @placement4(%arg0 : i1) {
// CHECK: func.func @placement4(%arg0: i1) {
// CHECK-NEXT: %[[C1:.*]] = arith.constant 1 : index
// CHECK-NEXT: %[[C1_I32:.*]] = fir.convert %[[C1]] : (index) -> i32
-// CHECK-NEXT: %[[C2:.*]] = arith.constant 2 : index
// CHECK-NEXT: %[[C10:.*]] = arith.constant 10 : index
// CHECK-NEXT: cf.br ^bb1
// CHECK-NEXT: ^bb1:
-// CHECK-NEXT: %[[SUM:.*]] = arith.addi %[[C1]], %[[C2]] : index
+// CHECK-NEXT: %[[C3:.*]] = arith.constant 3 : index
// CHECK-NEXT: %[[SP:.*]] = fir.call @llvm.stacksave.p0() : () -> !fir.ref<i8>
-// CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[SUM]]
+// CHECK-NEXT: %[[MEM:.*]] = fir.alloca !fir.array<?xi32>, %[[C3]]
// CHECK-NEXT: fir.call @llvm.stackrestore.p0(%[[SP]]) : (!fir.ref<i8>) -> ()
// CHECK-NEXT: cf.cond_br %arg0, ^bb1, ^bb2
// CHECK-NEXT: ^bb2:
diff --git a/flang/unittests/Runtime/Reduction.cpp b/flang/unittests/Runtime/Reduction.cpp
index b17988bc1769..b2661e78abdf 100644
--- a/flang/unittests/Runtime/Reduction.cpp
+++ b/flang/unittests/Runtime/Reduction.cpp
@@ -13,6 +13,7 @@
#include "flang/Runtime/allocatable.h"
#include "flang/Runtime/cpp-type.h"
#include "flang/Runtime/descriptor.h"
+#include "flang/Runtime/reduce.h"
#include "flang/Runtime/type-code.h"
#include <cstdint>
#include <cstring>
@@ -634,3 +635,39 @@ TEST(Reductions, ExtremaReal16) {
EXPECT_EQ(RTNAME(MaxvalReal16)(*maxArray, __FILE__, __LINE__), -1.0);
}
#endif // LDBL_MANT_DIG == 113 || HAS_FLOAT128
+
+static std::int32_t IAdd(const std::int32_t *x, const std::int32_t *y) {
+ return *x + *y;
+}
+
+static std::int32_t IMultiply(const std::int32_t *x, const std::int32_t *y) {
+ return *x * *y;
+}
+
+TEST(Reductions, ReduceInt4) {
+ auto intVector{MakeArray<TypeCategory::Integer, 4>(
+ std::vector<int>{4}, std::vector<std::int32_t>{1, 2, 3, 4})};
+ EXPECT_EQ(RTNAME(ReduceInteger4)(*intVector, IAdd, __FILE__, __LINE__), 10);
+ EXPECT_EQ(
+ RTNAME(ReduceInteger4)(*intVector, IMultiply, __FILE__, __LINE__), 24);
+}
+TEST(Reductions, ReduceInt4Dim) {
+ auto intMatrix{MakeArray<TypeCategory::Integer, 4>(
+ std::vector<int>{2, 2}, std::vector<std::int32_t>{1, 2, 3, 4})};
+ StaticDescriptor<1, true> statDesc;
+ Descriptor &sums{statDesc.descriptor()};
+ RTNAME(ReduceInteger4Dim)(sums, *intMatrix, IAdd, __FILE__, __LINE__, 1);
+ EXPECT_EQ(sums.rank(), 1);
+ EXPECT_EQ(sums.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(sums.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(*sums.ZeroBasedIndexedElement<std::int32_t>(0), 3);
+ EXPECT_EQ(*sums.ZeroBasedIndexedElement<std::int32_t>(1), 7);
+ sums.Destroy();
+ RTNAME(ReduceInteger4Dim)(sums, *intMatrix, IAdd, __FILE__, __LINE__, 2);
+ EXPECT_EQ(sums.rank(), 1);
+ EXPECT_EQ(sums.GetDimension(0).LowerBound(), 1);
+ EXPECT_EQ(sums.GetDimension(0).Extent(), 2);
+ EXPECT_EQ(*sums.ZeroBasedIndexedElement<std::int32_t>(0), 4);
+ EXPECT_EQ(*sums.ZeroBasedIndexedElement<std::int32_t>(1), 6);
+ sums.Destroy();
+}
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 5bc0898298ce..40a1cfda060e 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -60,6 +60,15 @@ function(_get_common_compile_options output_var flags)
if (LIBC_CC_SUPPORTS_PATTERN_INIT)
list(APPEND compile_options "-ftrivial-auto-var-init=pattern")
endif()
+ if (LIBC_CONF_KEEP_FRAME_POINTER)
+ list(APPEND compile_options "-fno-omit-frame-pointer")
+ if (LIBC_TARGET_ARCHITECTURE_IS_X86)
+ list(APPEND compile_options "-mno-omit-leaf-frame-pointer")
+ endif()
+ endif()
+ if (LIBC_CONF_ENABLE_STACK_PROTECTOR)
+ list(APPEND compile_options "-fstack-protector-strong")
+ endif()
list(APPEND compile_options "-Wall")
list(APPEND compile_options "-Wextra")
# -DLIBC_WNO_ERROR=ON if you can't build cleanly with -Werror.
diff --git a/libc/cmake/modules/prepare_libc_gpu_build.cmake b/libc/cmake/modules/prepare_libc_gpu_build.cmake
index bea6bb016491..20aca16990fc 100644
--- a/libc/cmake/modules/prepare_libc_gpu_build.cmake
+++ b/libc/cmake/modules/prepare_libc_gpu_build.cmake
@@ -93,6 +93,11 @@ else()
endif()
set(LIBC_GPU_TARGET_ARCHITECTURE "${gpu_test_architecture}")
+# The NVPTX backend cannot currently handle objects created in debug mode.
+if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX AND CMAKE_BUILD_TYPE STREQUAL "Debug")
+ set(LIBC_GPU_TESTS_DISABLED TRUE)
+endif()
+
# Identify the GPU loader utility used to run tests.
set(LIBC_GPU_LOADER_EXECUTABLE "" CACHE STRING "Executable for the GPU loader.")
if(LIBC_GPU_LOADER_EXECUTABLE)
diff --git a/libc/config/config.json b/libc/config/config.json
index b73c47b1a14b..d6ef891b9f26 100644
--- a/libc/config/config.json
+++ b/libc/config/config.json
@@ -30,5 +30,15 @@
"value": false,
"doc": "Inserts prefetch for write instructions (PREFETCHW) for memset on x86 to recover performance when hardware prefetcher is disabled."
}
+ },
+ "codegen": {
+ "LIBC_CONF_KEEP_FRAME_POINTER": {
+ "value": true,
+ "doc": "Keep frame pointer in functions for better debugging experience."
+ },
+ "LIBC_CONF_ENABLE_STRONG_STACK_PROTECTOR": {
+ "value": true,
+ "doc": "Enable -fstack-protector-strong to defend against stack smashing attack."
+ }
}
}
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index dbf81c284e78..78da7f0b334b 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -195,6 +195,7 @@ set(TARGET_LIBC_ENTRYPOINTS
# stdio.h entrypoints
libc.src.stdio.remove
+ libc.src.stdio.rename
libc.src.stdio.sprintf
libc.src.stdio.snprintf
libc.src.stdio.vsprintf
@@ -365,6 +366,30 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fmin
libc.src.math.fminf
libc.src.math.fminl
+ libc.src.math.fmaximum
+ libc.src.math.fmaximumf
+ libc.src.math.fmaximuml
+ libc.src.math.fmaximum_num
+ libc.src.math.fmaximum_numf
+ libc.src.math.fmaximum_numl
+ libc.src.math.fmaximum_mag
+ libc.src.math.fmaximum_magf
+ libc.src.math.fmaximum_magl
+ libc.src.math.fmaximum_mag_num
+ libc.src.math.fmaximum_mag_numf
+ libc.src.math.fmaximum_mag_numl
+ libc.src.math.fminimum
+ libc.src.math.fminimumf
+ libc.src.math.fminimuml
+ libc.src.math.fminimum_num
+ libc.src.math.fminimum_numf
+ libc.src.math.fminimum_numl
+ libc.src.math.fminimum_mag
+ libc.src.math.fminimum_magf
+ libc.src.math.fminimum_magl
+ libc.src.math.fminimum_mag_num
+ libc.src.math.fminimum_mag_numf
+ libc.src.math.fminimum_mag_numl
libc.src.math.fmod
libc.src.math.fmodf
libc.src.math.fmodl
@@ -465,6 +490,14 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fminf128
+ libc.src.math.fmaximumf128
+ libc.src.math.fmaximum_numf128
+ libc.src.math.fmaximum_magf128
+ libc.src.math.fmaximum_mag_numf128
+ libc.src.math.fminimumf128
+ libc.src.math.fminimum_numf128
+ libc.src.math.fminimum_magf128
+ libc.src.math.fminimum_mag_numf128
libc.src.math.fmodf128
libc.src.math.frexpf128
libc.src.math.ilogbf128
diff --git a/libc/config/linux/api.td b/libc/config/linux/api.td
index e9e82c5d4789..eb5ed8089850 100644
--- a/libc/config/linux/api.td
+++ b/libc/config/linux/api.td
@@ -262,3 +262,7 @@ def SetJmpAPI : PublicAPI<"setjmp.h"> {
def SearchAPI : PublicAPI<"search.h"> {
let Types = ["ACTION", "ENTRY", "struct hsearch_data"];
}
+
+def SysStatvfsAPI : PublicAPI<"sys/statvfs.h"> {
+ let Types = ["fsblkcnt_t", "fsfilcnt_t", "struct statvfs"];
+}
diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt
index 3bc5d8efc9d2..6e63e270280e 100644
--- a/libc/config/linux/arm/entrypoints.txt
+++ b/libc/config/linux/arm/entrypoints.txt
@@ -234,6 +234,30 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fmin
libc.src.math.fminf
libc.src.math.fminl
+ libc.src.math.fmaximum
+ libc.src.math.fmaximumf
+ libc.src.math.fmaximuml
+ libc.src.math.fmaximum_num
+ libc.src.math.fmaximum_numf
+ libc.src.math.fmaximum_numl
+ libc.src.math.fmaximum_mag
+ libc.src.math.fmaximum_magf
+ libc.src.math.fmaximum_magl
+ libc.src.math.fmaximum_mag_num
+ libc.src.math.fmaximum_mag_numf
+ libc.src.math.fmaximum_mag_numl
+ libc.src.math.fminimum
+ libc.src.math.fminimumf
+ libc.src.math.fminimuml
+ libc.src.math.fminimum_num
+ libc.src.math.fminimum_numf
+ libc.src.math.fminimum_numl
+ libc.src.math.fminimum_mag
+ libc.src.math.fminimum_magf
+ libc.src.math.fminimum_magl
+ libc.src.math.fminimum_mag_num
+ libc.src.math.fminimum_mag_numf
+ libc.src.math.fminimum_mag_numl
libc.src.math.fmod
libc.src.math.fmodf
libc.src.math.frexp
diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt
index b42a55a4d712..5aae4e246cfb 100644
--- a/libc/config/linux/riscv/entrypoints.txt
+++ b/libc/config/linux/riscv/entrypoints.txt
@@ -196,6 +196,7 @@ set(TARGET_LIBC_ENTRYPOINTS
# stdio.h entrypoints
libc.src.stdio.remove
+ libc.src.stdio.rename
libc.src.stdio.sprintf
libc.src.stdio.snprintf
libc.src.stdio.fprintf
@@ -373,6 +374,30 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fmax
libc.src.math.fmaxf
libc.src.math.fmaxl
+ libc.src.math.fmaximum
+ libc.src.math.fmaximumf
+ libc.src.math.fmaximuml
+ libc.src.math.fmaximum_num
+ libc.src.math.fmaximum_numf
+ libc.src.math.fmaximum_numl
+ libc.src.math.fmaximum_mag
+ libc.src.math.fmaximum_magf
+ libc.src.math.fmaximum_magl
+ libc.src.math.fmaximum_mag_num
+ libc.src.math.fmaximum_mag_numf
+ libc.src.math.fmaximum_mag_numl
+ libc.src.math.fminimum
+ libc.src.math.fminimumf
+ libc.src.math.fminimuml
+ libc.src.math.fminimum_num
+ libc.src.math.fminimum_numf
+ libc.src.math.fminimum_numl
+ libc.src.math.fminimum_mag
+ libc.src.math.fminimum_magf
+ libc.src.math.fminimum_magl
+ libc.src.math.fminimum_mag_num
+ libc.src.math.fminimum_mag_numf
+ libc.src.math.fminimum_mag_numl
libc.src.math.fmod
libc.src.math.fmodf
libc.src.math.fmodl
@@ -473,6 +498,14 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fminf128
+ libc.src.math.fmaximumf128
+ libc.src.math.fmaximum_numf128
+ libc.src.math.fmaximum_magf128
+ libc.src.math.fmaximum_mag_numf128
+ libc.src.math.fminimumf128
+ libc.src.math.fminimum_numf128
+ libc.src.math.fminimum_magf128
+ libc.src.math.fminimum_mag_numf128
libc.src.math.fmodf128
libc.src.math.frexpf128
libc.src.math.ilogbf128
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index c216f4349627..5b428e51aee6 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -180,7 +180,9 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.qsort_r
libc.src.stdlib.rand
libc.src.stdlib.srand
+ libc.src.stdlib.strfromd
libc.src.stdlib.strfromf
+ libc.src.stdlib.strfroml
libc.src.stdlib.strtod
libc.src.stdlib.strtof
libc.src.stdlib.strtol
@@ -198,6 +200,7 @@ set(TARGET_LIBC_ENTRYPOINTS
# stdio.h entrypoints
libc.src.stdio.remove
+ libc.src.stdio.rename
libc.src.stdio.sprintf
libc.src.stdio.snprintf
libc.src.stdio.fprintf
@@ -254,6 +257,10 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.sys.stat.mkdirat
libc.src.sys.stat.stat
+ # sys/statvfs.h
+ libc.src.sys.statvfs.statvfs
+ libc.src.sys.statvfs.fstatvfs
+
# sys/utsname.h entrypoints
libc.src.sys.utsname.uname
@@ -343,6 +350,9 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.asinhf
libc.src.math.atanf
libc.src.math.atanhf
+ libc.src.math.canonicalize
+ libc.src.math.canonicalizef
+ libc.src.math.canonicalizel
libc.src.math.copysign
libc.src.math.copysignf
libc.src.math.copysignl
@@ -378,12 +388,42 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fmax
libc.src.math.fmaxf
libc.src.math.fmaxl
+ libc.src.math.fmaximum
+ libc.src.math.fmaximumf
+ libc.src.math.fmaximuml
+ libc.src.math.fmaximum_num
+ libc.src.math.fmaximum_numf
+ libc.src.math.fmaximum_numl
+ libc.src.math.fmaximum_mag
+ libc.src.math.fmaximum_magf
+ libc.src.math.fmaximum_magl
+ libc.src.math.fmaximum_mag_num
+ libc.src.math.fmaximum_mag_numf
+ libc.src.math.fmaximum_mag_numl
+ libc.src.math.fminimum
+ libc.src.math.fminimumf
+ libc.src.math.fminimuml
+ libc.src.math.fminimum_num
+ libc.src.math.fminimum_numf
+ libc.src.math.fminimum_numl
+ libc.src.math.fminimum_mag
+ libc.src.math.fminimum_magf
+ libc.src.math.fminimum_magl
+ libc.src.math.fminimum_mag_num
+ libc.src.math.fminimum_mag_numf
+ libc.src.math.fminimum_mag_numl
libc.src.math.fmod
libc.src.math.fmodf
libc.src.math.fmodl
libc.src.math.frexp
libc.src.math.frexpf
libc.src.math.frexpl
+ libc.src.math.fromfp
+ libc.src.math.fromfpf
+ libc.src.math.fromfpl
+ libc.src.math.fromfpx
+ libc.src.math.fromfpxf
+ libc.src.math.fromfpxl
libc.src.math.hypot
libc.src.math.hypotf
libc.src.math.ilogb
@@ -468,11 +508,18 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.trunc
libc.src.math.truncf
libc.src.math.truncl
+ libc.src.math.ufromfp
+ libc.src.math.ufromfpf
+ libc.src.math.ufromfpl
+ libc.src.math.ufromfpx
+ libc.src.math.ufromfpxf
+ libc.src.math.ufromfpxl
)
if(LIBC_TYPES_HAS_FLOAT128)
list(APPEND TARGET_LIBM_ENTRYPOINTS
# math.h C23 _Float128 entrypoints
+ libc.src.math.canonicalizef128
libc.src.math.ceilf128
libc.src.math.copysignf128
libc.src.math.fabsf128
@@ -480,8 +527,18 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.floorf128
libc.src.math.fmaxf128
libc.src.math.fminf128
+ libc.src.math.fmaximumf128
+ libc.src.math.fmaximum_numf128
+ libc.src.math.fmaximum_magf128
+ libc.src.math.fmaximum_mag_numf128
+ libc.src.math.fminimumf128
+ libc.src.math.fminimum_numf128
+ libc.src.math.fminimum_magf128
+ libc.src.math.fminimum_mag_numf128
libc.src.math.fmodf128
libc.src.math.frexpf128
+ libc.src.math.fromfpf128
+ libc.src.math.fromfpxf128
libc.src.math.ilogbf128
libc.src.math.ldexpf128
libc.src.math.llogbf128
@@ -499,6 +556,8 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.roundf128
libc.src.math.sqrtf128
libc.src.math.truncf128
+ libc.src.math.ufromfpf128
+ libc.src.math.ufromfpxf128
)
endif()
diff --git a/libc/config/windows/entrypoints.txt b/libc/config/windows/entrypoints.txt
index d6227a427afe..f4456f561ec0 100644
--- a/libc/config/windows/entrypoints.txt
+++ b/libc/config/windows/entrypoints.txt
@@ -153,6 +153,30 @@ set(TARGET_LIBM_ENTRYPOINTS
libc.src.math.fmax
libc.src.math.fmaxf
libc.src.math.fmaxl
+ libc.src.math.fmaximum
+ libc.src.math.fmaximumf
+ libc.src.math.fmaximuml
+ libc.src.math.fmaximum_num
+ libc.src.math.fmaximum_numf
+ libc.src.math.fmaximum_numl
+ libc.src.math.fmaximum_mag
+ libc.src.math.fmaximum_magf
+ libc.src.math.fmaximum_magl
+ libc.src.math.fmaximum_mag_num
+ libc.src.math.fmaximum_mag_numf
+ libc.src.math.fmaximum_mag_numl
+ libc.src.math.fminimum
+ libc.src.math.fminimumf
+ libc.src.math.fminimuml
+ libc.src.math.fminimum_num
+ libc.src.math.fminimum_numf
+ libc.src.math.fminimum_numl
+ libc.src.math.fminimum_mag
+ libc.src.math.fminimum_magf
+ libc.src.math.fminimum_magl
+ libc.src.math.fminimum_mag_num
+ libc.src.math.fminimum_mag_numf
+ libc.src.math.fminimum_mag_numl
libc.src.math.fmod
libc.src.math.fmodf
libc.src.math.fmodl
diff --git a/libc/docs/configure.rst b/libc/docs/configure.rst
index a177550647bd..8f8c44caa115 100644
--- a/libc/docs/configure.rst
+++ b/libc/docs/configure.rst
@@ -25,6 +25,9 @@ See the main ``config/config.json``, and the platform and architecture specific
overrides in ``config/<platform>/config.json`` and ``config/<platform>/<arch>/config.json,``
to learn about the defaults for your platform and target.
+* **"codegen" options**
+ - ``LIBC_CONF_ENABLE_STRONG_STACK_PROTECTOR``: Enable -fstack-protector-strong to defend against stack smashing attack.
+ - ``LIBC_CONF_KEEP_FRAME_POINTER``: Keep frame pointer in functions for better debugging experience.
* **"printf" options**
- ``LIBC_CONF_PRINTF_DISABLE_FIXED_POINT``: Disable printing fixed point values in printf and friends.
- ``LIBC_CONF_PRINTF_DISABLE_FLOAT``: Disable printing floating point values in printf and friends.
diff --git a/libc/docs/dev/code_style.rst b/libc/docs/dev/code_style.rst
index e6fc6df5a0f6..22a18b7a4cc1 100644
--- a/libc/docs/dev/code_style.rst
+++ b/libc/docs/dev/code_style.rst
@@ -55,7 +55,7 @@ We define two kinds of macros:
* ``src/__support/macros/config.h`` - Important compiler and platform
features. Such macros can be used to produce portable code by
parameterizing compilation based on the presence or lack of a given
- feature. e.g., ``LIBC_HAS_BUILTIN``
+ feature. e.g., ``LIBC_HAS_FEATURE``
* ``src/__support/macros/attributes.h`` - Attributes for functions, types,
and variables. e.g., ``LIBC_UNUSED``
* ``src/__support/macros/optimization.h`` - Portable macros for performance
diff --git a/libc/docs/dev/printf_behavior.rst b/libc/docs/dev/printf_behavior.rst
index 9548bfda57aa..c8b8ad45e987 100644
--- a/libc/docs/dev/printf_behavior.rst
+++ b/libc/docs/dev/printf_behavior.rst
@@ -173,6 +173,10 @@ If a number passed as a min width or precision value is out of range for an int,
then it will be treated as the largest or smallest value in the int range
(e.g. "%-999999999999.999999999999s" is the same as "%-2147483648.2147483647s").
+If a number passed as a bit width is less than or equal to zero, the conversion
+is considered invalid. If the provided bit width is larger than the width of
+uintmax_t, it will be clamped to the width of uintmax_t.
+
----------
Conversion
----------
diff --git a/libc/docs/dev/undefined_behavior.rst b/libc/docs/dev/undefined_behavior.rst
index 50e8bdde89dd..c97a539ca8da 100644
--- a/libc/docs/dev/undefined_behavior.rst
+++ b/libc/docs/dev/undefined_behavior.rst
@@ -81,3 +81,11 @@ The C standard does not specify behavior for ``printf("%s", NULL)``. We will
print the string literal ``(null)`` unless using the
``LIBC_COPT_PRINTF_NO_NULLPTR_CHECKS`` option described in :ref:`printf
behavior<printf_behavior>`.
+
+Unknown Math Rounding Direction
+-------------------------------
+The C23 standard states that if the value of the ``rnd`` argument of the
+``fromfp``, ``ufromfp``, ``fromfpx`` and ``ufromfpx`` functions is not equal to
+the value of a math rounding direction macro, the direction of rounding is
+unspecified. LLVM's libc chooses to use the ``FP_INT_TONEAREST`` rounding
+direction in this case.
diff --git a/libc/docs/gpu/rpc.rst b/libc/docs/gpu/rpc.rst
index 9d6d8099db95..e13a377f305c 100644
--- a/libc/docs/gpu/rpc.rst
+++ b/libc/docs/gpu/rpc.rst
@@ -251,14 +251,10 @@ but the following example shows how it can be used by a standard user.
__global__ void hello() { puts("Hello world!"); }
int main() {
- int device = 0;
- // Initialize the RPC server to run on a single device.
- if (rpc_status_t err = rpc_init(/*num_device=*/1))
- handle_error(err);
-
// Initialize the RPC server to run on the given device.
+ rpc_device_t device;
if (rpc_status_t err =
- rpc_server_init(device, RPC_MAXIMUM_PORT_COUNT,
+ rpc_server_init(&device, RPC_MAXIMUM_PORT_COUNT,
/*warp_size=*/32, alloc_host, /*data=*/nullptr))
handle_error(err);
@@ -277,6 +273,7 @@ but the following example shows how it can be used by a standard user.
hello<<<1, 1, 0, stream>>>();
// While the kernel is executing, check the RPC server for work to do.
+ // Requires non-blocking CUDA kernels but avoids a separate thread.
while (cudaStreamQuery(stream) == cudaErrorNotReady)
if (rpc_status_t err = rpc_handle_server(device))
handle_error(err);
@@ -286,10 +283,6 @@ but the following example shows how it can be used by a standard user.
rpc_server_shutdown(device, free_host, /*data=*/nullptr))
handle_error(err);
- // Shut down the entire RPC server interface.
- if (rpc_status_t err = rpc_shutdown())
- handle_error(err);
-
return EXIT_SUCCESS;
}
@@ -300,7 +293,7 @@ associated with relocatable device code linking.
.. code-block:: sh
- $> clang++ -x cuda rpc.cpp --offload-arch=native -fgpu-rdc -lcudart -lcgpu \
+ $> clang++ -x cuda rpc.cpp --offload-arch=native -fgpu-rdc -lcudart -lcgpu-nvptx \
-I<install-path>include -L<install-path>/lib -lllvmlibc_rpc_server \
-O3 -foffload-lto -o hello
$> ./hello
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index d337d060fb5d..080b6a4427f5 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -104,241 +104,282 @@ Implementation Status
Basic Operations
----------------
-+--------------+---------------------------------------+-------------------+-------------------+-------------------+-------------------+
-| <Func> | Linux | Windows | MacOS | Embedded | GPU |
-| +---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| | x86_64 | aarch64 | aarch32 | riscv64 | x86_64 | aarch64 | x86_64 | aarch64 | aarch32 | riscv32 | AMD | nVidia |
-+==============+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+
-| ceil | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ceilf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ceill | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ceilf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| copysign | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| copysignf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| copysignl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| copysignf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fabs | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fabsf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fabsl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fabsf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fdim | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fdimf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fdiml | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fdimf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| floor | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| floorf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| floorl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| floorf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmax | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmaxf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmaxf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmaxl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmin | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fminf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fminf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fminl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmod | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmodf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmodl | |check| | |check| | | |check| | |check| | | | |check| | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| fmodf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| frexp | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| frexpf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| frexpl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| frexpf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ilogb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ilogbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ilogbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ilogf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ldexp | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ldexpf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ldexpl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| ldexpf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llogb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llogbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llogbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llogf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llrint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llrintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llrintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llrintf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llround | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llroundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llroundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| llroundf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| logb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| logbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| logbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| logf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lrint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lrintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lrintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lrintf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lround | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lroundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lroundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| lroundf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| modf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| modff | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| modfl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| modff128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nan | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nanf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nanl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nanf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nearbyint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nearbyintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nearbyintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextafter | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextafterf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextafterl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextafterf128| |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextdown | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextdownf | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextdownl | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextdownf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nexttoward | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nexttowardf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nexttowardl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextup | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextupf | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextupl | |check| | |check| | |check| | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| nextupf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remainder | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remainderf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remainderl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remquo | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remquof | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| remquol | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| rint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| rintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| rintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| rintf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| round | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| roundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| roundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| roundf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| scalbn | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| scalbnf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| scalbnl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| trunc | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| truncf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| truncl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
-| truncf128 | |check| | |check| | | |check| | | | | | | | | |
-+--------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+
++------------------+---------------------------------------+-------------------+-------------------+-------------------+-------------------+
+| <Func> | Linux | Windows | MacOS | Embedded | GPU |
+| +---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| | x86_64 | aarch64 | aarch32 | riscv64 | x86_64 | aarch64 | x86_64 | aarch64 | aarch32 | riscv32 | AMD | nVidia |
++==================+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+=========+
+| ceil | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ceilf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ceill | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ceilf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| canoninicalize | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| canoninicalizef | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| canoninicalizel | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+|canoninicalizef128| |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| copysign | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| copysignf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| copysignl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| copysignf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fabs | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fabsf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fabsl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fabsf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fdim | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fdimf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fdiml | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fdimf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| floor | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| floorf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| floorl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| floorf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmax | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmaxf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmaxf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmaxl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmin | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fminf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fminf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fminl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmod | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmodf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmodl | |check| | |check| | | |check| | |check| | | | |check| | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fmodf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| frexp | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| frexpf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| frexpl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| frexpf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfp | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpf | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpl | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpf128 | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpx | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpxf | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpxl | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| fromfpxf128 | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ilogb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ilogbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ilogbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ilogf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ldexp | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ldexpf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ldexpl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ldexpf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llogb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llogbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llogbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llogf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llrint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llrintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llrintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llrintf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llround | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llroundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llroundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| llroundf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| logb | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| logbf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| logbl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| logf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lrint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lrintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lrintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lrintf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lround | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lroundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lroundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| lroundf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| modf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| modff | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| modfl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| modff128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nan | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nanf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nanl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nanf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nearbyint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nearbyintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nearbyintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextafter | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextafterf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextafterl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextafterf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextdown | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextdownf | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextdownl | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextdownf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nexttoward | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nexttowardf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nexttowardl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextup | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextupf | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextupl | |check| | |check| | |check| | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| nextupf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remainder | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remainderf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remainderl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remquo | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remquof | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| remquol | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| rint | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| rintf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| rintl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| rintf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| round | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| roundf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| roundl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| roundf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| scalbn | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| scalbnf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| scalbnl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| trunc | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| truncf | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| truncl | |check| | |check| | |check| | |check| | |check| | | | |check| | |check| | |check| | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| truncf128 | |check| | |check| | | |check| | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfp | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpf | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpl | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpf128 | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpx | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpxf | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpxl | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
+| ufromfpxf128 | |check| | | | | | | | | | | | |
++------------------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+---------+
Higher Math Functions
diff --git a/libc/docs/stdio.rst b/libc/docs/stdio.rst
index 4fd6b71a0917..d17821562c25 100644
--- a/libc/docs/stdio.rst
+++ b/libc/docs/stdio.rst
@@ -68,7 +68,7 @@ These functions operate on files on the host's system, without using the
Function_Name Available
============= =========
remove |check|
-rename
+rename |check|
tmpnam
============= =========
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index b2cb10459c53..4203f0bc901b 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -499,6 +499,15 @@ add_gen_header(
)
add_gen_header(
+ sys_statvfs
+ DEF_FILE sys/statvfs.h.def
+ GEN_HDR sys/statvfs.h
+ DEPENDS
+ .llvm_libc_common_h
+ .llvm-libc-types.struct_statvfs
+)
+
+add_gen_header(
sys_syscall
DEF_FILE sys/syscall.h.def
GEN_HDR sys/syscall.h
diff --git a/libc/include/arpa/inet.h.def b/libc/include/arpa/inet.h.def
index fdd5ae3e3f85..6a62b2c7be81 100644
--- a/libc/include/arpa/inet.h.def
+++ b/libc/include/arpa/inet.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_ARPA_INET_H
#define LLVM_LIBC_ARPA_INET_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
#include <inttypes.h>
diff --git a/libc/include/assert.h.def b/libc/include/assert.h.def
index e5d7dfbffdbb..e006133a7654 100644
--- a/libc/include/assert.h.def
+++ b/libc/include/assert.h.def
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
// This file may be usefully included multiple times to change assert()'s
// definition based on NDEBUG.
diff --git a/libc/include/ctype.h.def b/libc/include/ctype.h.def
index ac52a36bf72f..a9bb786931ea 100644
--- a/libc/include/ctype.h.def
+++ b/libc/include/ctype.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_CTYPE_H
#define LLVM_LIBC_CTYPE_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/dirent.h.def b/libc/include/dirent.h.def
index 3de8b1c6713f..6786578fbd06 100644
--- a/libc/include/dirent.h.def
+++ b/libc/include/dirent.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_DIRENT_H
#define LLVM_LIBC_DIRENT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/errno.h.def b/libc/include/errno.h.def
index 90bd8bfecf2f..d7ae90ad4524 100644
--- a/libc/include/errno.h.def
+++ b/libc/include/errno.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_ERRNO_H
#define LLVM_LIBC_ERRNO_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
#ifdef __linux__
@@ -40,7 +40,7 @@
#endif // ENOTRECOVERABLE
#else // __linux__
-#include <llvm-libc-macros/generic-error-number-macros.h>
+#include "llvm-libc-macros/generic-error-number-macros.h"
#endif
#if !defined(__AMDGPU__) && !defined(__NVPTX__)
diff --git a/libc/include/fcntl.h.def b/libc/include/fcntl.h.def
index b11645d18c5b..4f608845ce1e 100644
--- a/libc/include/fcntl.h.def
+++ b/libc/include/fcntl.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_FCNTL_H
#define LLVM_LIBC_FCNTL_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/fcntl-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/fcntl-macros.h"
%%public_api()
diff --git a/libc/include/features.h.def b/libc/include/features.h.def
index 64205f57acb5..238b88d4b90f 100644
--- a/libc/include/features.h.def
+++ b/libc/include/features.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_FEATURES_H
#define LLVM_LIBC_FEATURES_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/features-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/features-macros.h"
%%public_api()
diff --git a/libc/include/fenv.h.def b/libc/include/fenv.h.def
index f131a44914ab..c677b2a5930d 100644
--- a/libc/include/fenv.h.def
+++ b/libc/include/fenv.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_FENV_H
#define LLVM_LIBC_FENV_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/fenv-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/fenv-macros.h"
%%public_api()
diff --git a/libc/include/float.h.def b/libc/include/float.h.def
index 6d3599d78c69..3bcd7f5e3f98 100644
--- a/libc/include/float.h.def
+++ b/libc/include/float.h.def
@@ -9,6 +9,6 @@
#ifndef LLVM_LIBC_FLOAT_H
#define LLVM_LIBC_FLOAT_H
-#include <llvm-libc-macros/float-macros.h>
+#include "llvm-libc-macros/float-macros.h"
#endif // LLVM_LIBC_FLOAT_H
diff --git a/libc/include/gpu/rpc.h.def b/libc/include/gpu/rpc.h.def
index 0438cd65e7be..72acf0c81422 100644
--- a/libc/include/gpu/rpc.h.def
+++ b/libc/include/gpu/rpc.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_GPU_RPC_H
#define LLVM_LIBC_GPU_RPC_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-types/rpc_opcodes_t.h>
+#include "llvm-libc-types/rpc_opcodes_t.h"
%%public_api()
diff --git a/libc/include/inttypes.h.def b/libc/include/inttypes.h.def
index a99d4e931f51..5879d2d8e041 100644
--- a/libc/include/inttypes.h.def
+++ b/libc/include/inttypes.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_INTTYPES_H
#define LLVM_LIBC_INTTYPES_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/inttypes-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/inttypes-macros.h"
#include <stdint.h>
%%public_api()
diff --git a/libc/include/limits.h.def b/libc/include/limits.h.def
index de5f3490459e..c37c97c69a84 100644
--- a/libc/include/limits.h.def
+++ b/libc/include/limits.h.def
@@ -9,6 +9,6 @@
#ifndef LLVM_LIBC_LIMITS_H
#define LLVM_LIBC_LIMITS_H
-#include <llvm-libc-macros/limits-macros.h>
+#include "llvm-libc-macros/limits-macros.h"
#endif // LLVM_LIBC_LIMITS_H
diff --git a/libc/include/llvm-libc-macros/containerof-macro.h b/libc/include/llvm-libc-macros/containerof-macro.h
index 62724abd3b0f..592acd6e3aa9 100644
--- a/libc/include/llvm-libc-macros/containerof-macro.h
+++ b/libc/include/llvm-libc-macros/containerof-macro.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
#define LLVM_LIBC_MACROS_CONTAINEROF_MACRO_H
-#include <llvm-libc-macros/offsetof-macro.h>
+#include "llvm-libc-macros/offsetof-macro.h"
#define __containerof(ptr, type, member) \
({ \
diff --git a/libc/include/llvm-libc-macros/math-macros.h b/libc/include/llvm-libc-macros/math-macros.h
index db8a4ea65bd6..1497e32044e9 100644
--- a/libc/include/llvm-libc-macros/math-macros.h
+++ b/libc/include/llvm-libc-macros/math-macros.h
@@ -17,6 +17,12 @@
#define FP_SUBNORMAL 3
#define FP_NORMAL 4
+#define FP_INT_UPWARD 0
+#define FP_INT_DOWNWARD 1
+#define FP_INT_TOWARDZERO 2
+#define FP_INT_TONEARESTFROMZERO 3
+#define FP_INT_TONEAREST 4
+
#define MATH_ERRNO 1
#define MATH_ERREXCEPT 2
@@ -25,14 +31,15 @@
#define NAN __builtin_nanf("")
#define FP_ILOGB0 (-INT_MAX - 1)
-#define FP_ILOGBNAN INT_MAX
-
#define FP_LLOGB0 (-LONG_MAX - 1)
-#define FP_LLOGBNAN LONG_MAX
-#define isfinite(x) __builtin_isfinite(x)
-#define isinf(x) __builtin_isinf(x)
-#define isnan(x) __builtin_isnan(x)
+#ifdef __FP_LOGBNAN_MIN
+#define FP_ILOGBNAN (-INT_MAX - 1)
+#define FP_LLOGBNAN (-LONG_MAX - 1)
+#else
+#define FP_ILOGBNAN INT_MAX
+#define FP_LLOGBNAN LONG_MAX
+#endif
#ifdef __FAST_MATH__
#define math_errhandling 0
@@ -44,4 +51,32 @@
#define math_errhandling (MATH_ERRNO | MATH_ERREXCEPT)
#endif
+// These must be type-generic functions. The C standard specifies them as
+// being macros rather than functions, in fact. However, in C++ it's important
+// that there be function declarations that don't interfere with other uses of
+// the identifier, even in places with parentheses where a function-like macro
+// will be expanded (such as a function declaration in a C++ namespace).
+
+#ifdef __cplusplus
+
+template <typename T> inline constexpr bool isfinite(T x) {
+ return __builtin_isfinite(x);
+}
+
+template <typename T> inline constexpr bool isinf(T x) {
+ return __builtin_isinf(x);
+}
+
+template <typename T> inline constexpr bool isnan(T x) {
+ return __builtin_isnan(x);
+}
+
+#else
+
+#define isfinite(x) __builtin_isfinite(x)
+#define isinf(x) __builtin_isinf(x)
+#define isnan(x) __builtin_isnan(x)
+
+#endif
+
#endif // LLVM_LIBC_MACROS_MATH_MACROS_H
diff --git a/libc/include/llvm-libc-macros/sys-queue-macros.h b/libc/include/llvm-libc-macros/sys-queue-macros.h
index fcac265333fc..089b6abaa024 100644
--- a/libc/include/llvm-libc-macros/sys-queue-macros.h
+++ b/libc/include/llvm-libc-macros/sys-queue-macros.h
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
#define LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
-#include <llvm-libc-macros/containerof-macro.h>
-#include <llvm-libc-macros/null-macro.h>
+#include "llvm-libc-macros/containerof-macro.h"
+#include "llvm-libc-macros/null-macro.h"
#ifdef __cplusplus
#define QUEUE_TYPEOF(type) type
diff --git a/libc/include/llvm-libc-types/CMakeLists.txt b/libc/include/llvm-libc-types/CMakeLists.txt
index 7fef976d7b32..93a79e1477b3 100644
--- a/libc/include/llvm-libc-types/CMakeLists.txt
+++ b/libc/include/llvm-libc-types/CMakeLists.txt
@@ -106,3 +106,13 @@ add_header(
DEPENDS
libc.include.llvm-libc-macros.float_macros
)
+add_header(fsblkcnt_t HDR fsblkcnt_t.h)
+add_header(fsfilcnt_t HDR fsfilcnt_t.h)
+add_header(
+ struct_statvfs
+HDR
+ struct_statvfs.h
+DEPENDS
+ .fsblkcnt_t
+ .fsfilcnt_t
+)
diff --git a/libc/include/llvm-libc-types/__mutex_type.h b/libc/include/llvm-libc-types/__mutex_type.h
index d27bf5db8377..3779c78203ed 100644
--- a/libc/include/llvm-libc-types/__mutex_type.h
+++ b/libc/include/llvm-libc-types/__mutex_type.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES___MUTEX_TYPE_H
#define LLVM_LIBC_TYPES___MUTEX_TYPE_H
-#include <llvm-libc-types/__futex_word.h>
+#include "llvm-libc-types/__futex_word.h"
typedef struct {
unsigned char __timed;
diff --git a/libc/include/llvm-libc-types/cookie_io_functions_t.h b/libc/include/llvm-libc-types/cookie_io_functions_t.h
index f9fa1a2d50ed..a3e7c32a5096 100644
--- a/libc/include/llvm-libc-types/cookie_io_functions_t.h
+++ b/libc/include/llvm-libc-types/cookie_io_functions_t.h
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
#define LLVM_LIBC_TYPES_COOKIE_IO_FUNCTIONS_T_H
-#include <llvm-libc-types/off64_t.h>
-#include <llvm-libc-types/size_t.h>
-#include <llvm-libc-types/ssize_t.h>
+#include "llvm-libc-types/off64_t.h"
+#include "llvm-libc-types/size_t.h"
+#include "llvm-libc-types/ssize_t.h"
typedef ssize_t cookie_read_function_t(void *, char *, size_t);
typedef ssize_t cookie_write_function_t(void *, const char *, size_t);
diff --git a/libc/include/llvm-libc-types/fd_set.h b/libc/include/llvm-libc-types/fd_set.h
index 58fc438bbdd2..fd1bde24c90e 100644
--- a/libc/include/llvm-libc-types/fd_set.h
+++ b/libc/include/llvm-libc-types/fd_set.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_FD_SET_H
#define LLVM_LIBC_TYPES_FD_SET_H
-#include <llvm-libc-macros/sys-select-macros.h> // FD_SETSIZE
+#include "llvm-libc-macros/sys-select-macros.h" // FD_SETSIZE
typedef struct {
__FD_SET_WORD_TYPE __set[__FD_SET_ARRAYSIZE];
diff --git a/libc/include/llvm-libc-types/fsblkcnt_t.h b/libc/include/llvm-libc-types/fsblkcnt_t.h
new file mode 100644
index 000000000000..88a53d38cb1b
--- /dev/null
+++ b/libc/include/llvm-libc-types/fsblkcnt_t.h
@@ -0,0 +1,14 @@
+//===-- Definition of fsblkcnt_t type -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_FSBLKCNT_T_H
+#define LLVM_LIBC_TYPES_FSBLKCNT_T_H
+
+typedef __SIZE_TYPE__ fsblkcnt_t;
+
+#endif // LLVM_LIBC_TYPES_FSBLKCNT_T_H
diff --git a/libc/include/llvm-libc-types/fsfilcnt_t.h b/libc/include/llvm-libc-types/fsfilcnt_t.h
new file mode 100644
index 000000000000..c5693591907a
--- /dev/null
+++ b/libc/include/llvm-libc-types/fsfilcnt_t.h
@@ -0,0 +1,14 @@
+//===-- Definition of fsfilcnt_t type -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_FSFILCNT_T_H
+#define LLVM_LIBC_TYPES_FSFILCNT_T_H
+
+typedef __SIZE_TYPE__ fsfilcnt_t;
+
+#endif // LLVM_LIBC_TYPES_FSFILCNT_T_H
diff --git a/libc/include/llvm-libc-types/mtx_t.h b/libc/include/llvm-libc-types/mtx_t.h
index 0f3882c26b6b..ebf79871c935 100644
--- a/libc/include/llvm-libc-types/mtx_t.h
+++ b/libc/include/llvm-libc-types/mtx_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_MTX_T_H
#define LLVM_LIBC_TYPES_MTX_T_H
-#include <llvm-libc-types/__mutex_type.h>
+#include "llvm-libc-types/__mutex_type.h"
typedef __mutex_type mtx_t;
diff --git a/libc/include/llvm-libc-types/once_flag.h b/libc/include/llvm-libc-types/once_flag.h
index cb8011284610..f80d35e317e9 100644
--- a/libc/include/llvm-libc-types/once_flag.h
+++ b/libc/include/llvm-libc-types/once_flag.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_ONCE_FLAG_H
#define LLVM_LIBC_TYPES_ONCE_FLAG_H
-#include <llvm-libc-types/__futex_word.h>
+#include "llvm-libc-types/__futex_word.h"
#ifdef __linux__
typedef __futex_word once_flag;
diff --git a/libc/include/llvm-libc-types/pthread_attr_t.h b/libc/include/llvm-libc-types/pthread_attr_t.h
index 66c04de04a99..7512193ef97b 100644
--- a/libc/include/llvm-libc-types/pthread_attr_t.h
+++ b/libc/include/llvm-libc-types/pthread_attr_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
#define LLVM_LIBC_TYPES_PTHREAD_ATTR_T_H
-#include <llvm-libc-types/size_t.h>
+#include "llvm-libc-types/size_t.h"
typedef struct {
int __detachstate;
diff --git a/libc/include/llvm-libc-types/pthread_mutex_t.h b/libc/include/llvm-libc-types/pthread_mutex_t.h
index b1eb21f24fac..cf2194d719f3 100644
--- a/libc/include/llvm-libc-types/pthread_mutex_t.h
+++ b/libc/include/llvm-libc-types/pthread_mutex_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
#define LLVM_LIBC_TYPES_PTHREAD_MUTEX_T_H
-#include <llvm-libc-types/__mutex_type.h>
+#include "llvm-libc-types/__mutex_type.h"
typedef __mutex_type pthread_mutex_t;
diff --git a/libc/include/llvm-libc-types/pthread_once_t.h b/libc/include/llvm-libc-types/pthread_once_t.h
index 3fe78b7ddff6..8ea926f4ee7d 100644
--- a/libc/include/llvm-libc-types/pthread_once_t.h
+++ b/libc/include/llvm-libc-types/pthread_once_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
#define LLVM_LIBC_TYPES_PTHREAD_ONCE_T_H
-#include <llvm-libc-types/__futex_word.h>
+#include "llvm-libc-types/__futex_word.h"
#ifdef __linux__
typedef __futex_word pthread_once_t;
diff --git a/libc/include/llvm-libc-types/pthread_t.h b/libc/include/llvm-libc-types/pthread_t.h
index 72c14e1c2eea..63cc0d7dd74c 100644
--- a/libc/include/llvm-libc-types/pthread_t.h
+++ b/libc/include/llvm-libc-types/pthread_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_PTHREAD_T_H
#define LLVM_LIBC_TYPES_PTHREAD_T_H
-#include <llvm-libc-types/__thread_type.h>
+#include "llvm-libc-types/__thread_type.h"
typedef __thread_type pthread_t;
diff --git a/libc/include/llvm-libc-types/siginfo_t.h b/libc/include/llvm-libc-types/siginfo_t.h
index 935ef4bbcb72..dafe9c1b5f8e 100644
--- a/libc/include/llvm-libc-types/siginfo_t.h
+++ b/libc/include/llvm-libc-types/siginfo_t.h
@@ -9,10 +9,10 @@
#ifndef LLVM_LIBC_TYPES_SIGINFO_T_H
#define LLVM_LIBC_TYPES_SIGINFO_T_H
-#include <llvm-libc-types/clock_t.h>
-#include <llvm-libc-types/pid_t.h>
-#include <llvm-libc-types/uid_t.h>
-#include <llvm-libc-types/union_sigval.h>
+#include "llvm-libc-types/clock_t.h"
+#include "llvm-libc-types/pid_t.h"
+#include "llvm-libc-types/uid_t.h"
+#include "llvm-libc-types/union_sigval.h"
#define SI_MAX_SIZE 128
diff --git a/libc/include/llvm-libc-types/sigset_t.h b/libc/include/llvm-libc-types/sigset_t.h
index f159c6c6c643..311a92b823ff 100644
--- a/libc/include/llvm-libc-types/sigset_t.h
+++ b/libc/include/llvm-libc-types/sigset_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_SIGSET_T_H
#define LLVM_LIBC_TYPES_SIGSET_T_H
-#include <llvm-libc-macros/signal-macros.h>
+#include "llvm-libc-macros/signal-macros.h"
// This definition can be adjusted/specialized for different targets and
// platforms as necessary. This definition works for Linux on most targets.
diff --git a/libc/include/llvm-libc-types/stack_t.h b/libc/include/llvm-libc-types/stack_t.h
index 5fa4d3a6d3dc..9156425436e9 100644
--- a/libc/include/llvm-libc-types/stack_t.h
+++ b/libc/include/llvm-libc-types/stack_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STACK_T_H
#define LLVM_LIBC_TYPES_STACK_T_H
-#include <llvm-libc-types/size_t.h>
+#include "llvm-libc-types/size_t.h"
typedef struct {
// The order of the fields declared here should match the kernel definition
diff --git a/libc/include/llvm-libc-types/struct_dirent.h b/libc/include/llvm-libc-types/struct_dirent.h
index 3c5b361c3cbc..0bb71b9f3b84 100644
--- a/libc/include/llvm-libc-types/struct_dirent.h
+++ b/libc/include/llvm-libc-types/struct_dirent.h
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_DIRENT_H
#define LLVM_LIBC_TYPES_STRUCT_DIRENT_H
-#include <llvm-libc-types/ino_t.h>
-#include <llvm-libc-types/off_t.h>
+#include "llvm-libc-types/ino_t.h"
+#include "llvm-libc-types/off_t.h"
struct dirent {
ino_t d_ino;
diff --git a/libc/include/llvm-libc-types/struct_epoll_event.h b/libc/include/llvm-libc-types/struct_epoll_event.h
index 6fc5b410348a..66cf86c1e2a0 100644
--- a/libc/include/llvm-libc-types/struct_epoll_event.h
+++ b/libc/include/llvm-libc-types/struct_epoll_event.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
#define LLVM_LIBC_TYPES_STRUCT_EPOLL_EVENT_H
-#include <llvm-libc-types/struct_epoll_data.h>
+#include "llvm-libc-types/struct_epoll_data.h"
typedef struct epoll_event {
__UINT32_TYPE__ events;
diff --git a/libc/include/llvm-libc-types/struct_rlimit.h b/libc/include/llvm-libc-types/struct_rlimit.h
index e093d9f306c9..11e6bee15f9d 100644
--- a/libc/include/llvm-libc-types/struct_rlimit.h
+++ b/libc/include/llvm-libc-types/struct_rlimit.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
#define LLVM_LIBC_TYPES_STRUCT_RLIMIT_H
-#include <llvm-libc-types/rlim_t.h>
+#include "llvm-libc-types/rlim_t.h"
struct rlimit {
rlim_t rlim_cur;
diff --git a/libc/include/llvm-libc-types/struct_rusage.h b/libc/include/llvm-libc-types/struct_rusage.h
index 21ea8b1061c2..ed838d30ede3 100644
--- a/libc/include/llvm-libc-types/struct_rusage.h
+++ b/libc/include/llvm-libc-types/struct_rusage.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
#define LLVM_LIBC_TYPES_STRUCT_RUSAGE_H
-#include <llvm-libc-types/struct_timeval.h>
+#include "llvm-libc-types/struct_timeval.h"
struct rusage {
struct timeval ru_utime;
diff --git a/libc/include/llvm-libc-types/struct_sched_param.h b/libc/include/llvm-libc-types/struct_sched_param.h
index 0521a4df652f..86209ac3a181 100644
--- a/libc/include/llvm-libc-types/struct_sched_param.h
+++ b/libc/include/llvm-libc-types/struct_sched_param.h
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
#define LLVM_LIBC_TYPES_STRUCT_SCHED_PARAM_H
-#include <llvm-libc-types/pid_t.h>
-#include <llvm-libc-types/struct_timespec.h>
-#include <llvm-libc-types/time_t.h>
+#include "llvm-libc-types/pid_t.h"
+#include "llvm-libc-types/struct_timespec.h"
+#include "llvm-libc-types/time_t.h"
struct sched_param {
// Process or thread execution scheduling priority.
diff --git a/libc/include/llvm-libc-types/struct_sigaction.h b/libc/include/llvm-libc-types/struct_sigaction.h
index 54d2995f4ecd..ffce04d0f7e8 100644
--- a/libc/include/llvm-libc-types/struct_sigaction.h
+++ b/libc/include/llvm-libc-types/struct_sigaction.h
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
#define LLVM_LIBC_TYPES_STRUCT_SIGACTION_H
-#include <llvm-libc-types/siginfo_t.h>
-#include <llvm-libc-types/sigset_t.h>
+#include "llvm-libc-types/siginfo_t.h"
+#include "llvm-libc-types/sigset_t.h"
struct sigaction {
union {
diff --git a/libc/include/llvm-libc-types/struct_sockaddr.h b/libc/include/llvm-libc-types/struct_sockaddr.h
index 074b1ae50ef0..a98606323c52 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_H
-#include <llvm-libc-types/sa_family_t.h>
+#include "llvm-libc-types/sa_family_t.h"
struct sockaddr {
sa_family_t sa_family;
diff --git a/libc/include/llvm-libc-types/struct_sockaddr_un.h b/libc/include/llvm-libc-types/struct_sockaddr_un.h
index 4332419a5b71..3c0362ce24fb 100644
--- a/libc/include/llvm-libc-types/struct_sockaddr_un.h
+++ b/libc/include/llvm-libc-types/struct_sockaddr_un.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
#define LLVM_LIBC_TYPES_STRUCT_SOCKADDR_UN_H
-#include <llvm-libc-types/sa_family_t.h>
+#include "llvm-libc-types/sa_family_t.h"
// This is the sockaddr specialization for AF_UNIX or AF_LOCAL sockets, as
// defined by posix.
diff --git a/libc/include/llvm-libc-types/struct_stat.h b/libc/include/llvm-libc-types/struct_stat.h
index 3539fb5b920e..d8ae9dd6ffdc 100644
--- a/libc/include/llvm-libc-types/struct_stat.h
+++ b/libc/include/llvm-libc-types/struct_stat.h
@@ -9,16 +9,16 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_STAT_H
#define LLVM_LIBC_TYPES_STRUCT_STAT_H
-#include <llvm-libc-types/blkcnt_t.h>
-#include <llvm-libc-types/blksize_t.h>
-#include <llvm-libc-types/dev_t.h>
-#include <llvm-libc-types/gid_t.h>
-#include <llvm-libc-types/ino_t.h>
-#include <llvm-libc-types/mode_t.h>
-#include <llvm-libc-types/nlink_t.h>
-#include <llvm-libc-types/off_t.h>
-#include <llvm-libc-types/struct_timespec.h>
-#include <llvm-libc-types/uid_t.h>
+#include "llvm-libc-types/blkcnt_t.h"
+#include "llvm-libc-types/blksize_t.h"
+#include "llvm-libc-types/dev_t.h"
+#include "llvm-libc-types/gid_t.h"
+#include "llvm-libc-types/ino_t.h"
+#include "llvm-libc-types/mode_t.h"
+#include "llvm-libc-types/nlink_t.h"
+#include "llvm-libc-types/off_t.h"
+#include "llvm-libc-types/struct_timespec.h"
+#include "llvm-libc-types/uid_t.h"
struct stat {
dev_t st_dev;
diff --git a/libc/include/llvm-libc-types/struct_statvfs.h b/libc/include/llvm-libc-types/struct_statvfs.h
new file mode 100644
index 000000000000..f467cfd936bd
--- /dev/null
+++ b/libc/include/llvm-libc-types/struct_statvfs.h
@@ -0,0 +1,29 @@
+//===-- Definition of type struct statvfs ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TYPES_STRUCT_STATVFS_H
+#define LLVM_LIBC_TYPES_STRUCT_STATVFS_H
+
+#include <llvm-libc-types/fsblkcnt_t.h>
+#include <llvm-libc-types/fsfilcnt_t.h>
+
+struct statvfs {
+ unsigned long f_bsize; /* Filesystem block size */
+ unsigned long f_frsize; /* Fragment size */
+ fsblkcnt_t f_blocks; /* Size of fs in f_frsize units */
+ fsblkcnt_t f_bfree; /* Number of free blocks */
+ fsblkcnt_t f_bavail; /* Number of free blocks for unprivileged users */
+ fsfilcnt_t f_files; /* Number of inodes */
+ fsfilcnt_t f_ffree; /* Number of free inodes */
+ fsfilcnt_t f_favail; /* Number of free inodes for unprivileged users */
+ unsigned long f_fsid; /* Filesystem ID */
+ unsigned long f_flag; /* Mount flags */
+ unsigned long f_namemax; /* Maximum filename length */
+};
+
+#endif // LLVM_LIBC_TYPES_STRUCT_STATVFS_H
diff --git a/libc/include/llvm-libc-types/struct_termios.h b/libc/include/llvm-libc-types/struct_termios.h
index 72aefe4f6926..51241192f741 100644
--- a/libc/include/llvm-libc-types/struct_termios.h
+++ b/libc/include/llvm-libc-types/struct_termios.h
@@ -9,9 +9,9 @@
#ifndef __LLVM_LIBC_TYPES_STRUCT_TERMIOS_H__
#define __LLVM_LIBC_TYPES_STRUCT_TERMIOS_H__
-#include <llvm-libc-types/cc_t.h>
-#include <llvm-libc-types/speed_t.h>
-#include <llvm-libc-types/tcflag_t.h>
+#include "llvm-libc-types/cc_t.h"
+#include "llvm-libc-types/speed_t.h"
+#include "llvm-libc-types/tcflag_t.h"
struct termios {
tcflag_t c_iflag; // Input mode flags
diff --git a/libc/include/llvm-libc-types/struct_timespec.h b/libc/include/llvm-libc-types/struct_timespec.h
index 5d56d9c9468b..4baab07c10f8 100644
--- a/libc/include/llvm-libc-types/struct_timespec.h
+++ b/libc/include/llvm-libc-types/struct_timespec.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
#define LLVM_LIBC_TYPES_STRUCT_TIMESPEC_H
-#include <llvm-libc-types/time_t.h>
+#include "llvm-libc-types/time_t.h"
struct timespec {
time_t tv_sec; /* Seconds. */
diff --git a/libc/include/llvm-libc-types/struct_timeval.h b/libc/include/llvm-libc-types/struct_timeval.h
index 6a0b7bbaf825..365b835d345d 100644
--- a/libc/include/llvm-libc-types/struct_timeval.h
+++ b/libc/include/llvm-libc-types/struct_timeval.h
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
#define LLVM_LIBC_TYPES_STRUCT_TIMEVAL_H
-#include <llvm-libc-types/suseconds_t.h>
-#include <llvm-libc-types/time_t.h>
+#include "llvm-libc-types/suseconds_t.h"
+#include "llvm-libc-types/time_t.h"
struct timeval {
time_t tv_sec; // Seconds
diff --git a/libc/include/llvm-libc-types/thrd_t.h b/libc/include/llvm-libc-types/thrd_t.h
index 2e0f9a0d75ad..751ea5b9e4c0 100644
--- a/libc/include/llvm-libc-types/thrd_t.h
+++ b/libc/include/llvm-libc-types/thrd_t.h
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_TYPES_THRD_T_H
#define LLVM_LIBC_TYPES_THRD_T_H
-#include <llvm-libc-types/__thread_type.h>
+#include "llvm-libc-types/__thread_type.h"
typedef __thread_type thrd_t;
diff --git a/libc/include/math.h.def b/libc/include/math.h.def
index 927e2d6697c6..cd2fe76f40bf 100644
--- a/libc/include/math.h.def
+++ b/libc/include/math.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_MATH_H
#define LLVM_LIBC_MATH_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/math-macros.h>
-#include <llvm-libc-types/float128.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/math-macros.h"
+#include "llvm-libc-types/float128.h"
%%public_api()
diff --git a/libc/include/pthread.h.def b/libc/include/pthread.h.def
index 391ecd3c124f..abeb839ee83d 100644
--- a/libc/include/pthread.h.def
+++ b/libc/include/pthread.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_PTHREAD_H
#define LLVM_LIBC_PTHREAD_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
#define PTHREAD_STACK_MIN (1 << 14) // 16KB
diff --git a/libc/include/sched.h.def b/libc/include/sched.h.def
index 3b2d5e330859..493028e8dcc4 100644
--- a/libc/include/sched.h.def
+++ b/libc/include/sched.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_SCHED_H
#define LLVM_LIBC_SCHED_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/sched-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/sched-macros.h"
%%public_api()
diff --git a/libc/include/search.h.def b/libc/include/search.h.def
index 3435c1f8ad04..6301ba7b656c 100644
--- a/libc/include/search.h.def
+++ b/libc/include/search.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SEARCH_H
#define LLVM_LIBC_SEARCH_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
#define __need_size_t
#include <stddef.h>
diff --git a/libc/include/setjmp.h.def b/libc/include/setjmp.h.def
index 7447be2415bd..670bc1ac0fe2 100644
--- a/libc/include/setjmp.h.def
+++ b/libc/include/setjmp.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SETJMP_H
#define LLVM_LIBC_SETJMP_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/signal.h.def b/libc/include/signal.h.def
index 0e7033452715..50a5f44c7337 100644
--- a/libc/include/signal.h.def
+++ b/libc/include/signal.h.def
@@ -9,12 +9,12 @@
#ifndef LLVM_LIBC_SIGNAL_H
#define LLVM_LIBC_SIGNAL_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
#define __need_size_t
#include <stddef.h>
-#include <llvm-libc-macros/signal-macros.h>
+#include "llvm-libc-macros/signal-macros.h"
%%public_api()
diff --git a/libc/include/spawn.h.def b/libc/include/spawn.h.def
index 368ebff17ca1..a8d701585286 100644
--- a/libc/include/spawn.h.def
+++ b/libc/include/spawn.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SPAWN_H
#define LLVM_LIBC_SPAWN_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/stdbit.h.def b/libc/include/stdbit.h.def
index c5a77329fbfe..28c147b01e22 100644
--- a/libc/include/stdbit.h.def
+++ b/libc/include/stdbit.h.def
@@ -9,10 +9,10 @@
#ifndef LLVM_LIBC_STDBIT_H
#define LLVM_LIBC_STDBIT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
-#include <llvm-libc-macros/stdbit-macros.h>
+#include "llvm-libc-macros/stdbit-macros.h"
#endif // LLVM_LIBC_STDBIT_H
diff --git a/libc/include/stdckdint.h.def b/libc/include/stdckdint.h.def
index c82470911c33..d4a9d829a3c9 100644
--- a/libc/include/stdckdint.h.def
+++ b/libc/include/stdckdint.h.def
@@ -9,10 +9,10 @@
#ifndef LLVM_LIBC_STDCKDINT_H
#define LLVM_LIBC_STDCKDINT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
-#include <llvm-libc-macros/stdckdint-macros.h>
+#include "llvm-libc-macros/stdckdint-macros.h"
#endif // LLVM_LIBC_STDCKDINT_H
diff --git a/libc/include/stdfix.h.def b/libc/include/stdfix.h.def
index 368eeb33f2f0..8ac49be45fba 100644
--- a/libc/include/stdfix.h.def
+++ b/libc/include/stdfix.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_STDFIX_H
#define LLVM_LIBC_STDFIX_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/stdfix-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/stdfix-macros.h"
// From ISO/IEC TR 18037:2008 standard:
// https://www.iso.org/standard/51126.html
diff --git a/libc/include/stdint.h.def b/libc/include/stdint.h.def
index 9e269101acd2..d7660860c91f 100644
--- a/libc/include/stdint.h.def
+++ b/libc/include/stdint.h.def
@@ -9,6 +9,6 @@
#ifndef LLVM_LIBC_STDINT_H
#define LLVM_LIBC_STDINT_H
-#include <llvm-libc-macros/stdint-macros.h>
+#include "llvm-libc-macros/stdint-macros.h"
#endif // LLVM_LIBC_STDINT_H
diff --git a/libc/include/stdio.h.def b/libc/include/stdio.h.def
index a28d009288d5..78d800c83b51 100644
--- a/libc/include/stdio.h.def
+++ b/libc/include/stdio.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_STDIO_H
#define LLVM_LIBC_STDIO_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/file-seek-macros.h>
-#include <llvm-libc-macros/stdio-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/file-seek-macros.h"
+#include "llvm-libc-macros/stdio-macros.h"
#include <stdarg.h>
diff --git a/libc/include/stdlib.h.def b/libc/include/stdlib.h.def
index 18df71a49a9b..d523f7a53024 100644
--- a/libc/include/stdlib.h.def
+++ b/libc/include/stdlib.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_STDLIB_H
#define LLVM_LIBC_STDLIB_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/stdlib-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/stdlib-macros.h"
%%public_api()
diff --git a/libc/include/string.h.def b/libc/include/string.h.def
index 26e6ef93d314..1bd2687db2be 100644
--- a/libc/include/string.h.def
+++ b/libc/include/string.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_STRING_H
#define LLVM_LIBC_STRING_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/null-macro.h>
+#include "llvm-libc-macros/null-macro.h"
%%public_api()
diff --git a/libc/include/strings.h.def b/libc/include/strings.h.def
index f07ca30d5dbd..9b016bf0bc50 100644
--- a/libc/include/strings.h.def
+++ b/libc/include/strings.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_STRINGS_H
#define LLVM_LIBC_STRINGS_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/sys/auxv.h.def b/libc/include/sys/auxv.h.def
index 504c2f68cb1e..11ab25bcfe2c 100644
--- a/libc/include/sys/auxv.h.def
+++ b/libc/include/sys/auxv.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_AUXV_H
#define LLVM_LIBC_SYS_AUXV_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-auxv-macros.h>
+#include "llvm-libc-macros/sys-auxv-macros.h"
%%public_api()
diff --git a/libc/include/sys/epoll.h.def b/libc/include/sys/epoll.h.def
index 490fad91db3c..85f7d9ad6091 100644
--- a/libc/include/sys/epoll.h.def
+++ b/libc/include/sys/epoll.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SYS_EPOLL_H
#define LLVM_LIBC_SYS_EPOLL_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/sys/ioctl.h.def b/libc/include/sys/ioctl.h.def
index 90d91cf38291..2f37a1190ac1 100644
--- a/libc/include/sys/ioctl.h.def
+++ b/libc/include/sys/ioctl.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_IOCTL_H
#define LLVM_LIBC_SYS_IOCTL_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-ioctl-macros.h>
+#include "llvm-libc-macros/sys-ioctl-macros.h"
%%public_api()
diff --git a/libc/include/sys/mman.h.def b/libc/include/sys/mman.h.def
index ab9fde1bb920..2e2c2f1997b8 100644
--- a/libc/include/sys/mman.h.def
+++ b/libc/include/sys/mman.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_MMAN_H
#define LLVM_LIBC_SYS_MMAN_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-mman-macros.h>
+#include "llvm-libc-macros/sys-mman-macros.h"
%%public_api()
diff --git a/libc/include/sys/prctl.h.def b/libc/include/sys/prctl.h.def
index 0a11543d0729..08648c9f4792 100644
--- a/libc/include/sys/prctl.h.def
+++ b/libc/include/sys/prctl.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SYS_PRCTL_H
#define LLVM_LIBC_SYS_PRCTL_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
// Process control is highly platform specific, so the platform usually defines
// the macros itself.
diff --git a/libc/include/sys/queue.h b/libc/include/sys/queue.h
index 1cde35e77a04..cca53c16f0f3 100644
--- a/libc/include/sys/queue.h
+++ b/libc/include/sys/queue.h
@@ -9,6 +9,6 @@
#ifndef SYS_QUEUE_H
#define SYS_QUEUE_H
-#include <llvm-libc-macros/sys-queue-macros.h>
+#include "llvm-libc-macros/sys-queue-macros.h"
#endif // SYS_QUEUE_H
diff --git a/libc/include/sys/random.h.def b/libc/include/sys/random.h.def
index b767f2479fc2..d11431b2755d 100644
--- a/libc/include/sys/random.h.def
+++ b/libc/include/sys/random.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_RANDOM_H
#define LLVM_LIBC_SYS_RANDOM_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-random-macros.h>
+#include "llvm-libc-macros/sys-random-macros.h"
%%public_api()
diff --git a/libc/include/sys/resource.h.def b/libc/include/sys/resource.h.def
index 31132d3b2608..365d803cf715 100644
--- a/libc/include/sys/resource.h.def
+++ b/libc/include/sys/resource.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_RESOURCE_H
#define LLVM_LIBC_SYS_RESOURCE_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-resource-macros.h>
+#include "llvm-libc-macros/sys-resource-macros.h"
%%public_api()
diff --git a/libc/include/sys/select.h.def b/libc/include/sys/select.h.def
index 4f3cebaecbb9..529be7158f26 100644
--- a/libc/include/sys/select.h.def
+++ b/libc/include/sys/select.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_SELECT_H
#define LLVM_LIBC_SYS_SELECT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-select-macros.h>
+#include "llvm-libc-macros/sys-select-macros.h"
%%public_api()
diff --git a/libc/include/sys/sendfile.h.def b/libc/include/sys/sendfile.h.def
index 947edc28e1ec..d7f21f91f95e 100644
--- a/libc/include/sys/sendfile.h.def
+++ b/libc/include/sys/sendfile.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SYS_SENDFILE_H
#define LLVM_LIBC_SYS_SENDFILE_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/sys/socket.h.def b/libc/include/sys/socket.h.def
index 71654c64b988..933ef1512e45 100644
--- a/libc/include/sys/socket.h.def
+++ b/libc/include/sys/socket.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_SOCKET_H
#define LLVM_LIBC_SYS_SOCKET_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-socket-macros.h>
+#include "llvm-libc-macros/sys-socket-macros.h"
%%public_api()
diff --git a/libc/include/sys/stat.h.def b/libc/include/sys/stat.h.def
index ed37d010f497..06a98a4aa029 100644
--- a/libc/include/sys/stat.h.def
+++ b/libc/include/sys/stat.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_STAT_H
#define LLVM_LIBC_SYS_STAT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-stat-macros.h>
+#include "llvm-libc-macros/sys-stat-macros.h"
%%public_api()
diff --git a/libc/include/sys/statvfs.h.def b/libc/include/sys/statvfs.h.def
new file mode 100644
index 000000000000..f23c9a3d5b1f
--- /dev/null
+++ b/libc/include/sys/statvfs.h.def
@@ -0,0 +1,16 @@
+//===-- POSIX header statvfs.h --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SYS_STATVFS_H
+#define LLVM_LIBC_SYS_STATVFS_H
+
+#include <__llvm-libc-common.h>
+
+%%public_api()
+
+#endif // LLVM_LIBC_SYS_STATVFS_H
diff --git a/libc/include/sys/time.h.def b/libc/include/sys/time.h.def
index 9a3bd7bb49f8..5a87139aefc9 100644
--- a/libc/include/sys/time.h.def
+++ b/libc/include/sys/time.h.def
@@ -9,11 +9,11 @@
#ifndef LLVM_LIBC_SYS_TIME_H
#define LLVM_LIBC_SYS_TIME_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-types/struct_timeval.h>
+#include "llvm-libc-types/struct_timeval.h"
-#include <llvm-libc-macros/sys-time-macros.h>
+#include "llvm-libc-macros/sys-time-macros.h"
%%public_api()
diff --git a/libc/include/sys/types.h.def b/libc/include/sys/types.h.def
index 689482973fc7..f5c3bb2c928b 100644
--- a/libc/include/sys/types.h.def
+++ b/libc/include/sys/types.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SYS_TYPES_H
#define LLVM_LIBC_SYS_TYPES_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/sys/utsname.h.def b/libc/include/sys/utsname.h.def
index 6d7daeb45f01..08dbbfc06245 100644
--- a/libc/include/sys/utsname.h.def
+++ b/libc/include/sys/utsname.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_SYS_UTSNAME_H
#define LLVM_LIBC_SYS_UTSNAME_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/sys/wait.h.def b/libc/include/sys/wait.h.def
index b4fcce4d1652..0a76da019fdc 100644
--- a/libc/include/sys/wait.h.def
+++ b/libc/include/sys/wait.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_SYS_WAIT_H
#define LLVM_LIBC_SYS_WAIT_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
-#include <llvm-libc-macros/sys-wait-macros.h>
+#include "llvm-libc-macros/sys-wait-macros.h"
%%public_api()
diff --git a/libc/include/termios.h.def b/libc/include/termios.h.def
index be1cd2bff526..7538944c0985 100644
--- a/libc/include/termios.h.def
+++ b/libc/include/termios.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TERMIOS_H
#define LLVM_LIBC_TERMIOS_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/termios-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/termios-macros.h"
%%public_api()
diff --git a/libc/include/threads.h.def b/libc/include/threads.h.def
index 93541b8d3bac..b114bea0ace3 100644
--- a/libc/include/threads.h.def
+++ b/libc/include/threads.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_THREADS_H
#define LLVM_LIBC_THREADS_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/time.h.def b/libc/include/time.h.def
index d8988329a372..2355e8822fad 100644
--- a/libc/include/time.h.def
+++ b/libc/include/time.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_TIME_H
#define LLVM_LIBC_TIME_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/time-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/time-macros.h"
%%public_api()
diff --git a/libc/include/uchar.h.def b/libc/include/uchar.h.def
index 7e62d43e9cc4..31b7fcb73ded 100644
--- a/libc/include/uchar.h.def
+++ b/libc/include/uchar.h.def
@@ -9,7 +9,7 @@
#ifndef LLVM_LIBC_UCHAR_H
#define LLVM_LIBC_UCHAR_H
-#include <__llvm-libc-common.h>
+#include "__llvm-libc-common.h"
%%public_api()
diff --git a/libc/include/unistd.h.def b/libc/include/unistd.h.def
index fa10af653fae..6b9137e14623 100644
--- a/libc/include/unistd.h.def
+++ b/libc/include/unistd.h.def
@@ -9,9 +9,9 @@
#ifndef LLVM_LIBC_UNISTD_H
#define LLVM_LIBC_UNISTD_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/file-seek-macros.h>
-#include <llvm-libc-macros/unistd-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/file-seek-macros.h"
+#include "llvm-libc-macros/unistd-macros.h"
%%public_api()
diff --git a/libc/include/wchar.h.def b/libc/include/wchar.h.def
index ac72f80aa083..4c25de700d60 100644
--- a/libc/include/wchar.h.def
+++ b/libc/include/wchar.h.def
@@ -9,8 +9,8 @@
#ifndef LLVM_LIBC_WCHAR_H
#define LLVM_LIBC_WCHAR_H
-#include <__llvm-libc-common.h>
-#include <llvm-libc-macros/wchar-macros.h>
+#include "__llvm-libc-common.h"
+#include "llvm-libc-macros/wchar-macros.h"
%%public_api()
diff --git a/libc/spec/posix.td b/libc/spec/posix.td
index 3b793ea90ffd..8444a449ebe5 100644
--- a/libc/spec/posix.td
+++ b/libc/spec/posix.td
@@ -86,6 +86,10 @@ def ConstStructSockAddrPtr : ConstType<StructSockAddrPtr>;
def StructSockAddrUn : NamedType<"struct sockaddr_un">;
+def StructStatvfs : NamedType<"struct statvfs">;
+def StructStatvfsPtr : PtrType<StructStatvfs>;
+def RestrictedStructStatvfsPtr : RestrictedPtrType<StructStatvfs>;
+
def POSIX : StandardSpec<"POSIX"> {
PtrType CharPtr = PtrType<CharType>;
RestrictedPtrType RestrictedCharPtr = RestrictedPtrType<CharType>;
@@ -888,6 +892,31 @@ def POSIX : StandardSpec<"POSIX"> {
]
>;
+ HeaderSpec SysStatvfs = HeaderSpec<
+ "sys/statvfs.h",
+ [], // Macros
+ [StructStatvfs], // Types
+ [], // Enumerations
+ [
+ FunctionSpec<
+ "statvfs",
+ RetValSpec<IntType>,
+ [
+ ArgSpec<ConstRestrictedCharPtr>,
+ ArgSpec<RestrictedStructStatvfsPtr>
+ ]
+ >,
+ FunctionSpec<
+ "fstatvfs",
+ RetValSpec<IntType>,
+ [
+ ArgSpec<IntType>,
+ ArgSpec<StructStatvfsPtr>
+ ]
+ >,
+ ] // Functions
+ >;
+
NamedType StructUtsName = NamedType<"struct utsname">;
PtrType StructUtsNamePtr = PtrType<StructUtsName>;
HeaderSpec SysUtsName = HeaderSpec<
@@ -1505,6 +1534,7 @@ def POSIX : StandardSpec<"POSIX"> {
SysSelect,
SysSocket,
SysStat,
+ SysStatvfs,
SysTypes,
SysUtsName,
SysWait,
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 920036adfed5..ac6e1d1801ba 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -352,6 +352,12 @@ def StdC : StandardSpec<"stdc"> {
Macro<"INFINITY">,
Macro<"NAN">,
+ Macro<"FP_INT_UPWARD">,
+ Macro<"FP_INT_DOWNWARD">,
+ Macro<"FP_INT_TOWARDZERO">,
+ Macro<"FP_INT_TONEARESTFROMZERO">,
+ Macro<"FP_INT_TONEAREST">,
+
Macro<"FP_ILOGB0">,
Macro<"FP_ILOGBNAN">,
@@ -400,6 +406,46 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"fmaxf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"fmaxl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
GuardedFunctionSpec<"fmaxf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fmaximum", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fmaximumf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fmaximuml", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fmaximumf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fmaximum_num", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fmaximum_numf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fmaximum_numl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fmaximum_numf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fmaximum_mag", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fmaximum_magf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fmaximum_magl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fmaximum_magf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fmaximum_mag_num", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fmaximum_mag_numf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fmaximum_mag_numl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fmaximum_mag_numf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fminimum", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fminimumf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fminimuml", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fminimumf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fminimum_num", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fminimum_numf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fmaximum_numl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fminimum_numf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fminimum_mag", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fminimum_magf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fminimum_magl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fminimum_magf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fminimum_mag_num", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"fminimum_mag_numf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"fminimum_mag_numl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fminimum_mag_numf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"fma", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fmaf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>, ArgSpec<FloatType>]>,
@@ -414,6 +460,26 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"frexpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntPtr>]>,
GuardedFunctionSpec<"frexpf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<IntPtr>], "LIBC_TYPES_HAS_FLOAT128">,
+ FunctionSpec<"fromfp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"fromfpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"fromfpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ GuardedFunctionSpec<"fromfpf128", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"fromfpx", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"fromfpxf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"fromfpxl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ GuardedFunctionSpec<"fromfpxf128", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"ufromfp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"ufromfpf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"ufromfpl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ GuardedFunctionSpec<"ufromfpf128", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"ufromfpx", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"ufromfpxf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ FunctionSpec<"ufromfpxl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>]>,
+ GuardedFunctionSpec<"ufromfpxf128", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<IntType>, ArgSpec<UnsignedIntType>], "LIBC_TYPES_HAS_FLOAT128">,
+
FunctionSpec<"hypot", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"hypotf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
@@ -570,6 +636,11 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"nan", RetValSpec<DoubleType>, [ArgSpec<ConstCharPtr>]>,
FunctionSpec<"nanl", RetValSpec<LongDoubleType>, [ArgSpec<ConstCharPtr>]>,
GuardedFunctionSpec<"nanf128", RetValSpec<Float128Type>, [ArgSpec<ConstCharPtr>], "LIBC_TYPES_HAS_FLOAT128">,
+
+ FunctionSpec<"canonicalize", RetValSpec<IntType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"canonicalizef", RetValSpec<IntType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
+ FunctionSpec<"canonicalizel", RetValSpec<IntType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"canonicalizef128", RetValSpec<IntType>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
]
>;
@@ -707,6 +778,11 @@ def StdC : StandardSpec<"stdc"> {
[ArgSpec<ConstCharPtr>]
>,
FunctionSpec<
+ "rename",
+ RetValSpec<IntType>,
+ [ArgSpec<ConstCharPtr>, ArgSpec<ConstCharPtr>]
+ >,
+ FunctionSpec<
"setbuf",
RetValSpec<VoidType>,
[ArgSpec<FILERestrictedPtr>, ArgSpec<CharRestrictedPtr>]
@@ -957,6 +1033,8 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"srand", RetValSpec<VoidType>, [ArgSpec<UnsignedIntType>]>,
FunctionSpec<"strfromf", RetValSpec<IntType>, [ArgSpec<CharRestrictedPtr>, ArgSpec<SizeTType>, ArgSpec<ConstCharRestrictedPtr>, ArgSpec<FloatType>]>,
+ FunctionSpec<"strfromd", RetValSpec<IntType>, [ArgSpec<CharRestrictedPtr>, ArgSpec<SizeTType>, ArgSpec<ConstCharRestrictedPtr>, ArgSpec<DoubleType>]>,
+ FunctionSpec<"strfroml", RetValSpec<IntType>, [ArgSpec<CharRestrictedPtr>, ArgSpec<SizeTType>, ArgSpec<ConstCharRestrictedPtr>, ArgSpec<LongDoubleType>]>,
FunctionSpec<"strtof", RetValSpec<FloatType>, [ArgSpec<ConstCharRestrictedPtr>, ArgSpec<CharRestrictedPtrPtr>]>,
FunctionSpec<"strtod", RetValSpec<DoubleType>, [ArgSpec<ConstCharRestrictedPtr>, ArgSpec<CharRestrictedPtrPtr>]>,
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index 6216505eae23..84d01fe04516 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -18,7 +18,6 @@ add_header_library(
.limits
.type_traits
libc.src.__support.macros.attributes
- libc.src.__support.macros.config
libc.src.__support.macros.sanitizer
)
@@ -103,23 +102,24 @@ add_header_library(
type_traits
HDRS
type_traits.h
- type_traits/always_false.h
type_traits/add_lvalue_reference.h
type_traits/add_pointer.h
type_traits/add_rvalue_reference.h
+ type_traits/always_false.h
type_traits/bool_constant.h
type_traits/conditional.h
type_traits/decay.h
type_traits/enable_if.h
type_traits/false_type.h
type_traits/integral_constant.h
- type_traits/invoke.h
type_traits/invoke_result.h
+ type_traits/invoke.h
type_traits/is_arithmetic.h
type_traits/is_array.h
type_traits/is_base_of.h
type_traits/is_class.h
type_traits/is_const.h
+ type_traits/is_constant_evaluated.h
type_traits/is_convertible.h
type_traits/is_destructible.h
type_traits/is_enum.h
@@ -156,7 +156,6 @@ add_header_library(
DEPENDS
libc.include.llvm-libc-macros.stdfix_macros
libc.src.__support.macros.attributes
- libc.src.__support.macros.config
libc.src.__support.macros.properties.types
)
diff --git a/libc/src/__support/CPP/atomic.h b/libc/src/__support/CPP/atomic.h
index b74cb5981dba..5e428940565b 100644
--- a/libc/src/__support/CPP/atomic.h
+++ b/libc/src/__support/CPP/atomic.h
@@ -71,10 +71,11 @@ public:
T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_load_n))
- return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
- else
- return __atomic_load_n(&val, int(mem_ord));
+#if __has_builtin(__scoped_atomic_load_n)
+ return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
+#else
+ return __atomic_load_n(&val, int(mem_ord));
+#endif
}
// Atomic store.
@@ -85,10 +86,11 @@ public:
void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_store_n))
- __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
- else
- __atomic_store_n(&val, rhs, int(mem_ord));
+#if __has_builtin(__scoped_atomic_store_n)
+ __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
+#else
+ __atomic_store_n(&val, rhs, int(mem_ord));
+#endif
}
// Atomic compare exchange
@@ -101,47 +103,51 @@ public:
T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_exchange_n))
- return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
- (int)(mem_scope));
- else
- return __atomic_exchange_n(&val, desired, int(mem_ord));
+#if __has_builtin(__scoped_atomic_exchange_n)
+ return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
+ (int)(mem_scope));
+#else
+ return __atomic_exchange_n(&val, desired, int(mem_ord));
+#endif
}
T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_fetch_add))
- return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
- (int)(mem_scope));
- else
- return __atomic_fetch_add(&val, increment, int(mem_ord));
+#if __has_builtin(__scoped_atomic_fetch_add)
+ return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
+ (int)(mem_scope));
+#else
+ return __atomic_fetch_add(&val, increment, int(mem_ord));
+#endif
}
T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_fetch_or))
- return __scoped_atomic_fetch_or(&val, mask, int(mem_ord),
- (int)(mem_scope));
- else
- return __atomic_fetch_or(&val, mask, int(mem_ord));
+#if __has_builtin(__scoped_atomic_fetch_or)
+ return __scoped_atomic_fetch_or(&val, mask, int(mem_ord), (int)(mem_scope));
+#else
+ return __atomic_fetch_or(&val, mask, int(mem_ord));
+#endif
}
T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_fetch_and))
- return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
- (int)(mem_scope));
- else
- return __atomic_fetch_and(&val, mask, int(mem_ord));
+#if __has_builtin(__scoped_atomic_fetch_and)
+ return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
+ (int)(mem_scope));
+#else
+ return __atomic_fetch_and(&val, mask, int(mem_ord));
+#endif
}
T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
- if constexpr (LIBC_HAS_BUILTIN(__scoped_atomic_fetch_sub))
- return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
- (int)(mem_scope));
- else
- return __atomic_fetch_sub(&val, decrement, int(mem_ord));
+#if __has_builtin(__scoped_atomic_fetch_sub)
+ return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
+ (int)(mem_scope));
+#else
+ return __atomic_fetch_sub(&val, decrement, int(mem_ord));
+#endif
}
// Set the value without using an atomic operation. This is useful
@@ -166,7 +172,7 @@ LIBC_INLINE void atomic_thread_fence([[maybe_unused]] MemoryOrder mem_ord) {
// except no instructions for memory ordering are issued. Only reordering of
// the instructions by the compiler is suppressed as order instructs.
LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
-#if LIBC_HAS_BUILTIN(__atomic_signal_fence)
+#if __has_builtin(__atomic_signal_fence)
__atomic_signal_fence(static_cast<int>(mem_ord));
#else
// if the builtin is not ready, use asm as a full compiler barrier.
diff --git a/libc/src/__support/CPP/bit.h b/libc/src/__support/CPP/bit.h
index 3f2fbec94405..80f50fd221ef 100644
--- a/libc/src/__support/CPP/bit.h
+++ b/libc/src/__support/CPP/bit.h
@@ -14,14 +14,13 @@
#include "src/__support/CPP/limits.h" // numeric_limits
#include "src/__support/CPP/type_traits.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
#include "src/__support/macros/sanitizer.h"
#include <stdint.h>
namespace LIBC_NAMESPACE::cpp {
-#if LIBC_HAS_BUILTIN(__builtin_memcpy_inline)
+#if __has_builtin(__builtin_memcpy_inline)
#define LLVM_LIBC_HAS_BUILTIN_MEMCPY_INLINE
#endif
@@ -36,20 +35,20 @@ LIBC_INLINE constexpr cpp::enable_if_t<
To>
bit_cast(const From &from) {
MSAN_UNPOISON(&from, sizeof(From));
-#if LIBC_HAS_BUILTIN(__builtin_bit_cast)
+#if __has_builtin(__builtin_bit_cast)
return __builtin_bit_cast(To, from);
#else
To to;
char *dst = reinterpret_cast<char *>(&to);
const char *src = reinterpret_cast<const char *>(&from);
-#if LIBC_HAS_BUILTIN(__builtin_memcpy_inline)
+#if __has_builtin(__builtin_memcpy_inline)
__builtin_memcpy_inline(dst, src, sizeof(To));
#else
for (unsigned i = 0; i < sizeof(To); ++i)
dst[i] = src[i];
-#endif // LIBC_HAS_BUILTIN(__builtin_memcpy_inline)
+#endif // __has_builtin(__builtin_memcpy_inline)
return to;
-#endif // LIBC_HAS_BUILTIN(__builtin_bit_cast)
+#endif // __has_builtin(__builtin_bit_cast)
}
template <typename T>
@@ -94,7 +93,7 @@ countr_zero(T value) {
}
return zero_bits;
}
-#if LIBC_HAS_BUILTIN(__builtin_ctzs)
+#if __has_builtin(__builtin_ctzs)
ADD_SPECIALIZATION(countr_zero, unsigned short, __builtin_ctzs)
#endif
ADD_SPECIALIZATION(countr_zero, unsigned int, __builtin_ctz)
@@ -124,7 +123,7 @@ countl_zero(T value) {
}
return zero_bits;
}
-#if LIBC_HAS_BUILTIN(__builtin_clzs)
+#if __has_builtin(__builtin_clzs)
ADD_SPECIALIZATION(countl_zero, unsigned short, __builtin_clzs)
#endif
ADD_SPECIALIZATION(countl_zero, unsigned int, __builtin_clz)
@@ -242,6 +241,14 @@ LIBC_INLINE constexpr To bit_or_static_cast(const From &from) {
/// Count number of 1's aka population count or Hamming weight.
///
/// Only unsigned integral types are allowed.
+// clang-19+, gcc-14+
+#if __has_builtin(__builtin_popcountg)
+template <typename T>
+[[nodiscard]] LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_unsigned_v<T>, int>
+popcount(T value) {
+ return __builtin_popcountg(value);
+}
+#else // !__has_builtin(__builtin_popcountg)
template <typename T>
[[nodiscard]] LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_unsigned_v<T>, int>
popcount(T value) {
@@ -261,7 +268,7 @@ ADD_SPECIALIZATION(unsigned short, __builtin_popcount)
ADD_SPECIALIZATION(unsigned, __builtin_popcount)
ADD_SPECIALIZATION(unsigned long, __builtin_popcountl)
ADD_SPECIALIZATION(unsigned long long, __builtin_popcountll)
-// TODO: 128b specializations?
+#endif // __builtin_popcountg
#undef ADD_SPECIALIZATION
} // namespace LIBC_NAMESPACE::cpp
diff --git a/libc/src/__support/CPP/iterator.h b/libc/src/__support/CPP/iterator.h
index c5bfb1912c7b..b0fd5c9f22ae 100644
--- a/libc/src/__support/CPP/iterator.h
+++ b/libc/src/__support/CPP/iterator.h
@@ -20,6 +20,7 @@ namespace cpp {
template <typename T> struct iterator_traits;
template <typename T> struct iterator_traits<T *> {
using reference = T &;
+ using value_type = T;
};
template <typename Iter> class reverse_iterator {
@@ -27,6 +28,8 @@ template <typename Iter> class reverse_iterator {
public:
using reference = typename iterator_traits<Iter>::reference;
+ using value_type = typename iterator_traits<Iter>::value_type;
+ using iterator_type = Iter;
LIBC_INLINE reverse_iterator() : current() {}
LIBC_INLINE constexpr explicit reverse_iterator(Iter it) : current(it) {}
@@ -38,6 +41,38 @@ public:
LIBC_INLINE constexpr explicit reverse_iterator(const Other &it)
: current(it) {}
+ LIBC_INLINE friend constexpr bool operator==(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() == rhs.base();
+ }
+
+ LIBC_INLINE friend constexpr bool operator!=(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() != rhs.base();
+ }
+
+ LIBC_INLINE friend constexpr bool operator<(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() > rhs.base();
+ }
+
+ LIBC_INLINE friend constexpr bool operator<=(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() >= rhs.base();
+ }
+
+ LIBC_INLINE friend constexpr bool operator>(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() < rhs.base();
+ }
+
+ LIBC_INLINE friend constexpr bool operator>=(const reverse_iterator &lhs,
+ const reverse_iterator &rhs) {
+ return lhs.base() <= rhs.base();
+ }
+
+ LIBC_INLINE constexpr iterator_type base() const { return current; }
+
LIBC_INLINE constexpr reference operator*() const {
Iter tmp = current;
return *--tmp;
diff --git a/libc/src/__support/CPP/type_traits.h b/libc/src/__support/CPP/type_traits.h
index 697cf79d6ccc..1494aeb905e0 100644
--- a/libc/src/__support/CPP/type_traits.h
+++ b/libc/src/__support/CPP/type_traits.h
@@ -25,6 +25,7 @@
#include "src/__support/CPP/type_traits/is_base_of.h"
#include "src/__support/CPP/type_traits/is_class.h"
#include "src/__support/CPP/type_traits/is_const.h"
+#include "src/__support/CPP/type_traits/is_constant_evaluated.h"
#include "src/__support/CPP/type_traits/is_convertible.h"
#include "src/__support/CPP/type_traits/is_destructible.h"
#include "src/__support/CPP/type_traits/is_enum.h"
diff --git a/libc/src/__support/CPP/type_traits/add_pointer.h b/libc/src/__support/CPP/type_traits/add_pointer.h
index 72a764bb8ba6..1257033ee80e 100644
--- a/libc/src/__support/CPP/type_traits/add_pointer.h
+++ b/libc/src/__support/CPP/type_traits/add_pointer.h
@@ -10,7 +10,6 @@
#include "src/__support/CPP/type_traits/remove_reference.h"
#include "src/__support/CPP/type_traits/type_identity.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
diff --git a/libc/src/__support/CPP/type_traits/decay.h b/libc/src/__support/CPP/type_traits/decay.h
index a018286fddd8..f1a1200ab2ba 100644
--- a/libc/src/__support/CPP/type_traits/decay.h
+++ b/libc/src/__support/CPP/type_traits/decay.h
@@ -9,7 +9,6 @@
#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_DECAY_H
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
#include "src/__support/CPP/type_traits/add_pointer.h"
#include "src/__support/CPP/type_traits/conditional.h"
diff --git a/libc/src/__support/CPP/type_traits/is_constant_evaluated.h b/libc/src/__support/CPP/type_traits/is_constant_evaluated.h
new file mode 100644
index 000000000000..93cfd07b567f
--- /dev/null
+++ b/libc/src/__support/CPP/type_traits/is_constant_evaluated.h
@@ -0,0 +1,21 @@
+//===-- is_constant_evaluated type_traits -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_CONSTANT_EVALUATED_H
+#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_CONSTANT_EVALUATED_H
+
+#include "src/__support/macros/attributes.h"
+
+namespace LIBC_NAMESPACE::cpp {
+
+LIBC_INLINE constexpr bool is_constant_evaluated() {
+ return __builtin_is_constant_evaluated();
+}
+
+} // namespace LIBC_NAMESPACE::cpp
+
+#endif // LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_CONSTANT_EVALUATED_H
diff --git a/libc/src/__support/CPP/type_traits/is_destructible.h b/libc/src/__support/CPP/type_traits/is_destructible.h
index d47de1cc797b..f94fe309ac8f 100644
--- a/libc/src/__support/CPP/type_traits/is_destructible.h
+++ b/libc/src/__support/CPP/type_traits/is_destructible.h
@@ -16,12 +16,11 @@
#include "src/__support/CPP/type_traits/true_type.h"
#include "src/__support/CPP/type_traits/type_identity.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_destructible
-#if LIBC_HAS_BUILTIN(__is_destructible)
+#if __has_builtin(__is_destructible)
template <typename T>
struct is_destructible : bool_constant<__is_destructible(T)> {};
#else
diff --git a/libc/src/__support/CPP/type_traits/is_function.h b/libc/src/__support/CPP/type_traits/is_function.h
index 557b3224484b..0eba5860ad60 100644
--- a/libc/src/__support/CPP/type_traits/is_function.h
+++ b/libc/src/__support/CPP/type_traits/is_function.h
@@ -12,12 +12,11 @@
#include "src/__support/CPP/type_traits/is_const.h"
#include "src/__support/CPP/type_traits/is_reference.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_function
-#if LIBC_HAS_BUILTIN(__is_function)
+#if __has_builtin(__is_function)
template <typename T>
struct is_function : integral_constant<bool, __is_function(T)> {};
#else
diff --git a/libc/src/__support/CPP/type_traits/is_lvalue_reference.h b/libc/src/__support/CPP/type_traits/is_lvalue_reference.h
index f52e303afad2..1dff57f186a3 100644
--- a/libc/src/__support/CPP/type_traits/is_lvalue_reference.h
+++ b/libc/src/__support/CPP/type_traits/is_lvalue_reference.h
@@ -12,12 +12,11 @@
#include "src/__support/CPP/type_traits/false_type.h"
#include "src/__support/CPP/type_traits/true_type.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_lvalue_reference
-#if LIBC_HAS_BUILTIN(__is_lvalue_reference)
+#if __has_builtin(__is_lvalue_reference)
template <typename T>
struct is_lvalue_reference : bool_constant<__is_lvalue_reference(T)> {};
#else
diff --git a/libc/src/__support/CPP/type_traits/is_reference.h b/libc/src/__support/CPP/type_traits/is_reference.h
index c017028edf41..bbfb2b7359c3 100644
--- a/libc/src/__support/CPP/type_traits/is_reference.h
+++ b/libc/src/__support/CPP/type_traits/is_reference.h
@@ -12,12 +12,11 @@
#include "src/__support/CPP/type_traits/false_type.h"
#include "src/__support/CPP/type_traits/true_type.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_reference
-#if LIBC_HAS_BUILTIN(__is_reference)
+#if __has_builtin(__is_reference)
template <typename T> struct is_reference : bool_constant<__is_reference(T)> {};
#else
template <typename T> struct is_reference : public false_type {};
diff --git a/libc/src/__support/CPP/type_traits/is_rvalue_reference.h b/libc/src/__support/CPP/type_traits/is_rvalue_reference.h
index f0487e41c998..3efbbe6b033a 100644
--- a/libc/src/__support/CPP/type_traits/is_rvalue_reference.h
+++ b/libc/src/__support/CPP/type_traits/is_rvalue_reference.h
@@ -12,12 +12,11 @@
#include "src/__support/CPP/type_traits/false_type.h"
#include "src/__support/CPP/type_traits/true_type.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_rvalue_reference
-#if LIBC_HAS_BUILTIN(__is_rvalue_reference)
+#if __has_builtin(__is_rvalue_reference)
template <typename T>
struct is_rvalue_reference : bool_constant<__is_rvalue_reference(T)> {};
#else
diff --git a/libc/src/__support/CPP/type_traits/is_trivially_copyable.h b/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
index 0c3fdcc711d5..b4c825d57961 100644
--- a/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
+++ b/libc/src/__support/CPP/type_traits/is_trivially_copyable.h
@@ -9,7 +9,6 @@
#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_IS_TRIVIALLY_COPYABLE_H
#include "src/__support/CPP/type_traits/integral_constant.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
diff --git a/libc/src/__support/CPP/type_traits/is_trivially_destructible.h b/libc/src/__support/CPP/type_traits/is_trivially_destructible.h
index 3345149433af..37e0e869266e 100644
--- a/libc/src/__support/CPP/type_traits/is_trivially_destructible.h
+++ b/libc/src/__support/CPP/type_traits/is_trivially_destructible.h
@@ -11,12 +11,11 @@
#include "src/__support/CPP/type_traits/bool_constant.h"
#include "src/__support/CPP/type_traits/is_destructible.h"
#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
namespace LIBC_NAMESPACE::cpp {
// is_trivially_destructible
-#if LIBC_HAS_BUILTIN(__is_trivially_destructible)
+#if __has_builtin(__is_trivially_destructible)
template <typename T>
struct is_trivially_destructible
: public bool_constant<__is_trivially_destructible(T)> {};
@@ -25,7 +24,7 @@ template <typename T>
struct is_trivially_destructible
: public bool_constant<cpp::is_destructible_v<T> &&__has_trivial_destructor(
T)> {};
-#endif // LIBC_HAS_BUILTIN(__is_trivially_destructible)
+#endif // __has_builtin(__is_trivially_destructible)
template <typename T>
LIBC_INLINE_VAR constexpr bool is_trivially_destructible_v =
is_trivially_destructible<T>::value;
diff --git a/libc/src/__support/CPP/type_traits/remove_all_extents.h b/libc/src/__support/CPP/type_traits/remove_all_extents.h
index bff6341d3e45..5941b82bbc16 100644
--- a/libc/src/__support/CPP/type_traits/remove_all_extents.h
+++ b/libc/src/__support/CPP/type_traits/remove_all_extents.h
@@ -9,14 +9,13 @@
#define LLVM_LIBC_SRC___SUPPORT_CPP_TYPE_TRAITS_REMOVE_ALL_EXTENTS_H
#include "src/__support/CPP/type_traits/type_identity.h"
-#include "src/__support/macros/config.h"
#include <stddef.h> // size_t
namespace LIBC_NAMESPACE::cpp {
// remove_all_extents
-#if LIBC_HAS_BUILTIN(__remove_all_extents)
+#if __has_builtin(__remove_all_extents)
template <typename T> using remove_all_extents_t = __remove_all_extents(T);
template <typename T>
struct remove_all_extents : cpp::type_identity<remove_all_extents_t<T>> {};
diff --git a/libc/src/__support/FPUtil/BasicOperations.h b/libc/src/__support/FPUtil/BasicOperations.h
index a19d6d0bef08..6e4156497618 100644
--- a/libc/src/__support/FPUtil/BasicOperations.h
+++ b/libc/src/__support/FPUtil/BasicOperations.h
@@ -9,10 +9,14 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_FPUTIL_BASICOPERATIONS_H
#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_BASICOPERATIONS_H
+#include "FEnvImpl.h"
#include "FPBits.h"
+#include "FEnvImpl.h"
#include "src/__support/CPP/type_traits.h"
+#include "src/__support/UInt128.h"
#include "src/__support/common.h"
+#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
namespace LIBC_NAMESPACE {
namespace fputil {
@@ -26,36 +30,136 @@ template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
LIBC_INLINE T fmin(T x, T y) {
const FPBits<T> bitx(x), bity(y);
- if (bitx.is_nan()) {
+ if (bitx.is_nan())
return y;
- } else if (bity.is_nan()) {
+ if (bity.is_nan())
return x;
- } else if (bitx.sign() != bity.sign()) {
+ if (bitx.sign() != bity.sign())
// To make sure that fmin(+0, -0) == -0 == fmin(-0, +0), whenever x and
// y has different signs and both are not NaNs, we return the number
// with negative sign.
- return (bitx.is_neg()) ? x : y;
- } else {
- return (x < y ? x : y);
- }
+ return bitx.is_neg() ? x : y;
+ return x < y ? x : y;
}
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
LIBC_INLINE T fmax(T x, T y) {
FPBits<T> bitx(x), bity(y);
- if (bitx.is_nan()) {
+ if (bitx.is_nan())
return y;
- } else if (bity.is_nan()) {
+ if (bity.is_nan())
return x;
- } else if (bitx.sign() != bity.sign()) {
+ if (bitx.sign() != bity.sign())
// To make sure that fmax(+0, -0) == +0 == fmax(-0, +0), whenever x and
// y has different signs and both are not NaNs, we return the number
// with positive sign.
+ return bitx.is_neg() ? y : x;
+ return x > y ? x : y;
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fmaximum(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+
+ if (bitx.is_nan())
+ return x;
+ if (bity.is_nan())
+ return y;
+ if (bitx.sign() != bity.sign())
+ return (bitx.is_neg() ? y : x);
+ return x > y ? x : y;
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fminimum(T x, T y) {
+ const FPBits<T> bitx(x), bity(y);
+
+ if (bitx.is_nan())
+ return x;
+ if (bity.is_nan())
+ return y;
+ if (bitx.sign() != bity.sign())
+ return (bitx.is_neg()) ? x : y;
+ return x < y ? x : y;
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fmaximum_num(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+ if (bitx.is_signaling_nan() || bity.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ if (bitx.is_nan() && bity.is_nan())
+ return FPBits<T>::quiet_nan().get_val();
+ }
+ if (bitx.is_nan())
+ return y;
+ if (bity.is_nan())
+ return x;
+ if (bitx.sign() != bity.sign())
return (bitx.is_neg() ? y : x);
- } else {
- return (x > y ? x : y);
+ return x > y ? x : y;
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fminimum_num(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+ if (bitx.is_signaling_nan() || bity.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ if (bitx.is_nan() && bity.is_nan())
+ return FPBits<T>::quiet_nan().get_val();
}
+ if (bitx.is_nan())
+ return y;
+ if (bity.is_nan())
+ return x;
+ if (bitx.sign() != bity.sign())
+ return (bitx.is_neg() ? x : y);
+ return x < y ? x : y;
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fmaximum_mag(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+
+ if (abs(x) > abs(y))
+ return x;
+ if (abs(y) > abs(x))
+ return y;
+ return fmaximum(x, y);
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fminimum_mag(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+
+ if (abs(x) < abs(y))
+ return x;
+ if (abs(y) < abs(x))
+ return y;
+ return fminimum(x, y);
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fmaximum_mag_num(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+
+ if (abs(x) > abs(y))
+ return x;
+ if (abs(y) > abs(x))
+ return y;
+ return fmaximum_num(x, y);
+}
+
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE T fminimum_mag_num(T x, T y) {
+ FPBits<T> bitx(x), bity(y);
+
+ if (abs(x) < abs(y))
+ return x;
+ if (abs(y) < abs(x))
+ return y;
+ return fminimum_num(x, y);
}
template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
@@ -73,6 +177,69 @@ LIBC_INLINE T fdim(T x, T y) {
return (x > y ? x - y : 0);
}
+template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
+LIBC_INLINE int canonicalize(T &cx, const T &x) {
+ FPBits<T> sx(x);
+ if constexpr (get_fp_type<T>() == FPType::X86_Binary80) {
+ // All the pseudo and unnormal numbers are not canonical.
+ // More precisely :
+ // Exponent | Significand | Meaning
+ // | Bits 63-62 | Bits 61-0 |
+ // All Ones | 00 | Zero | Pseudo Infinity, Value = SNaN
+ // All Ones | 00 | Non-Zero | Pseudo NaN, Value = SNaN
+ // All Ones | 01 | Anything | Pseudo NaN, Value = SNaN
+ // | Bit 63 | Bits 62-0 |
+ // All zeroes | One | Anything | Pseudo Denormal, Value =
+ // | | | (−1)**s × m × 2**−16382
+ // All Other | Zero | Anything | Unnormal, Value = SNaN
+ // Values | | |
+ bool bit63 = sx.get_implicit_bit();
+ UInt128 mantissa = sx.get_explicit_mantissa();
+ bool bit62 = static_cast<bool>((mantissa & (1ULL << 62)) >> 62);
+ int exponent = sx.get_biased_exponent();
+ if (exponent == 0x7FFF) {
+ if (!bit63 && !bit62) {
+ if (mantissa == 0) {
+ cx = FPBits<T>::quiet_nan(sx.sign(), mantissa).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ }
+ cx = FPBits<T>::quiet_nan(sx.sign(), mantissa).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else if (!bit63 && bit62) {
+ cx = FPBits<T>::quiet_nan(sx.sign(), mantissa).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else if (LIBC_UNLIKELY(sx.is_signaling_nan())) {
+ cx = FPBits<T>::quiet_nan(sx.sign(), sx.get_explicit_mantissa())
+ .get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else
+ cx = x;
+ } else if (exponent == 0 && bit63)
+ cx = FPBits<T>::make_value(mantissa, 0).get_val();
+ else if (exponent != 0 && !bit63) {
+ cx = FPBits<T>::quiet_nan(sx.sign(), mantissa).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else if (LIBC_UNLIKELY(sx.is_signaling_nan())) {
+ cx =
+ FPBits<T>::quiet_nan(sx.sign(), sx.get_explicit_mantissa()).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else
+ cx = x;
+ } else if (LIBC_UNLIKELY(sx.is_signaling_nan())) {
+ cx = FPBits<T>::quiet_nan(sx.sign(), sx.get_explicit_mantissa()).get_val();
+ raise_except_if_required(FE_INVALID);
+ return 1;
+ } else
+ cx = x;
+ return 0;
+}
+
} // namespace fputil
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/FPUtil/CMakeLists.txt b/libc/src/__support/FPUtil/CMakeLists.txt
index 4ded70a675ea..ff155a19758d 100644
--- a/libc/src/__support/FPUtil/CMakeLists.txt
+++ b/libc/src/__support/FPUtil/CMakeLists.txt
@@ -6,7 +6,6 @@ add_header_library(
libc.include.fenv
libc.include.math
libc.src.__support.macros.attributes
- libc.src.__support.macros.config
libc.src.errno.errno
)
@@ -82,8 +81,11 @@ add_header_library(
BasicOperations.h
DEPENDS
.fp_bits
+ .fenv_impl
libc.src.__support.CPP.type_traits
+ libc.src.__support.uint128
libc.src.__support.common
+ libc.src.__support.macros.optimization
)
add_header_library(
diff --git a/libc/src/__support/FPUtil/FEnvImpl.h b/libc/src/__support/FPUtil/FEnvImpl.h
index a6a533dcfdf4..6086d5d3de2d 100644
--- a/libc/src/__support/FPUtil/FEnvImpl.h
+++ b/libc/src/__support/FPUtil/FEnvImpl.h
@@ -11,7 +11,6 @@
#include "include/llvm-libc-macros/math-macros.h"
#include "src/__support/macros/attributes.h" // LIBC_INLINE
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
#include "src/__support/macros/properties/architectures.h"
#include "src/errno/libc_errno.h"
#include <fenv.h>
diff --git a/libc/src/__support/FPUtil/NearestIntegerOperations.h b/libc/src/__support/FPUtil/NearestIntegerOperations.h
index e890e38ba4ae..6b28e7ffb387 100644
--- a/libc/src/__support/FPUtil/NearestIntegerOperations.h
+++ b/libc/src/__support/FPUtil/NearestIntegerOperations.h
@@ -140,8 +140,9 @@ LIBC_INLINE T round(T x) {
}
}
-template <typename T, cpp::enable_if_t<cpp::is_floating_point_v<T>, int> = 0>
-LIBC_INLINE T round_using_current_rounding_mode(T x) {
+template <typename T>
+LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_floating_point_v<T>, T>
+round_using_specific_rounding_mode(T x, int rnd) {
using StorageType = typename FPBits<T>::StorageType;
FPBits<T> bits(x);
@@ -151,7 +152,6 @@ LIBC_INLINE T round_using_current_rounding_mode(T x) {
bool is_neg = bits.is_neg();
int exponent = bits.get_exponent();
- int rounding_mode = quick_get_round();
// If the exponent is greater than the most negative mantissa
// exponent, then x is already an integer.
@@ -159,20 +159,23 @@ LIBC_INLINE T round_using_current_rounding_mode(T x) {
return x;
if (exponent <= -1) {
- switch (rounding_mode) {
- case FE_DOWNWARD:
+ switch (rnd) {
+ case FP_INT_DOWNWARD:
return is_neg ? T(-1.0) : T(0.0);
- case FE_UPWARD:
+ case FP_INT_UPWARD:
return is_neg ? T(-0.0) : T(1.0);
- case FE_TOWARDZERO:
+ case FP_INT_TOWARDZERO:
return is_neg ? T(-0.0) : T(0.0);
- case FE_TONEAREST:
+ case FP_INT_TONEARESTFROMZERO:
+ if (exponent < -1)
+ return is_neg ? T(-0.0) : T(0.0); // abs(x) < 0.5
+ return is_neg ? T(-1.0) : T(1.0); // abs(x) >= 0.5
+ case FP_INT_TONEAREST:
+ default:
if (exponent <= -2 || bits.get_mantissa() == 0)
return is_neg ? T(-0.0) : T(0.0); // abs(x) <= 0.5
else
return is_neg ? T(-1.0) : T(1.0); // abs(x) > 0.5
- default:
- __builtin_unreachable();
}
}
@@ -194,14 +197,19 @@ LIBC_INLINE T round_using_current_rounding_mode(T x) {
StorageType trunc_is_odd =
new_bits.get_mantissa() & (StorageType(1) << trim_size);
- switch (rounding_mode) {
- case FE_DOWNWARD:
+ switch (rnd) {
+ case FP_INT_DOWNWARD:
return is_neg ? trunc_value - T(1.0) : trunc_value;
- case FE_UPWARD:
+ case FP_INT_UPWARD:
return is_neg ? trunc_value : trunc_value + T(1.0);
- case FE_TOWARDZERO:
+ case FP_INT_TOWARDZERO:
return trunc_value;
- case FE_TONEAREST:
+ case FP_INT_TONEARESTFROMZERO:
+ if (trim_value >= half_value)
+ return is_neg ? trunc_value - T(1.0) : trunc_value + T(1.0);
+ return trunc_value;
+ case FP_INT_TONEAREST:
+ default:
if (trim_value > half_value) {
return is_neg ? trunc_value - T(1.0) : trunc_value + T(1.0);
} else if (trim_value == half_value) {
@@ -214,11 +222,109 @@ LIBC_INLINE T round_using_current_rounding_mode(T x) {
} else {
return trunc_value;
}
+ }
+}
+
+template <typename T>
+LIBC_INLINE cpp::enable_if_t<cpp::is_floating_point_v<T>, T>
+round_using_current_rounding_mode(T x) {
+ int rounding_mode = quick_get_round();
+
+ switch (rounding_mode) {
+ case FE_DOWNWARD:
+ return round_using_specific_rounding_mode(x, FP_INT_DOWNWARD);
+ case FE_UPWARD:
+ return round_using_specific_rounding_mode(x, FP_INT_UPWARD);
+ case FE_TOWARDZERO:
+ return round_using_specific_rounding_mode(x, FP_INT_TOWARDZERO);
+ case FE_TONEAREST:
+ return round_using_specific_rounding_mode(x, FP_INT_TONEAREST);
default:
__builtin_unreachable();
}
}
+template <bool IsSigned, typename T>
+LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_floating_point_v<T>, T>
+fromfp(T x, int rnd, unsigned int width) {
+ using StorageType = typename FPBits<T>::StorageType;
+
+ constexpr StorageType EXPLICIT_BIT =
+ FPBits<T>::SIG_MASK - FPBits<T>::FRACTION_MASK;
+
+ if (width == 0U) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+
+ FPBits<T> bits(x);
+
+ if (bits.is_inf_or_nan()) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+
+ T rounded_value = round_using_specific_rounding_mode(x, rnd);
+
+ if constexpr (IsSigned) {
+ // T can't hold a finite number >= 2.0 * 2^EXP_BIAS.
+ if (width - 1 > FPBits<T>::EXP_BIAS)
+ return rounded_value;
+
+ StorageType range_exp = width - 1U + FPBits<T>::EXP_BIAS;
+ // rounded_value < -2^(width - 1)
+ T range_min =
+ FPBits<T>::create_value(Sign::NEG, range_exp, EXPLICIT_BIT).get_val();
+ if (rounded_value < range_min) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+ // rounded_value > 2^(width - 1) - 1
+ T range_max =
+ FPBits<T>::create_value(Sign::POS, range_exp, EXPLICIT_BIT).get_val() -
+ T(1.0);
+ if (rounded_value > range_max) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+
+ return rounded_value;
+ }
+
+ if (rounded_value < T(0.0)) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+
+ // T can't hold a finite number >= 2.0 * 2^EXP_BIAS.
+ if (width > FPBits<T>::EXP_BIAS)
+ return rounded_value;
+
+ StorageType range_exp = width + FPBits<T>::EXP_BIAS;
+ // rounded_value > 2^width - 1
+ T range_max =
+ FPBits<T>::create_value(Sign::POS, range_exp, EXPLICIT_BIT).get_val() -
+ T(1.0);
+ if (rounded_value > range_max) {
+ raise_except_if_required(FE_INVALID);
+ return FPBits<T>::quiet_nan().get_val();
+ }
+
+ return rounded_value;
+}
+
+template <bool IsSigned, typename T>
+LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_floating_point_v<T>, T>
+fromfpx(T x, int rnd, unsigned int width) {
+ T rounded_value = fromfp<IsSigned>(x, rnd, width);
+ FPBits<T> bits(rounded_value);
+
+ if (!bits.is_nan() && rounded_value != x)
+ raise_except_if_required(FE_INEXACT);
+
+ return rounded_value;
+}
+
namespace internal {
template <typename F, typename I,
diff --git a/libc/src/__support/FPUtil/gpu/FMA.h b/libc/src/__support/FPUtil/gpu/FMA.h
index 86bc86031496..ef1cd26a72dd 100644
--- a/libc/src/__support/FPUtil/gpu/FMA.h
+++ b/libc/src/__support/FPUtil/gpu/FMA.h
@@ -10,12 +10,12 @@
#define LLVM_LIBC_SRC___SUPPORT_FPUTIL_GPU_FMA_H
#include "src/__support/CPP/type_traits.h"
-#include "src/__support/macros/config.h"
-// These intrinsics map to the FMA instrunctions in the target ISA for the GPU.
+// These intrinsics map to the FMA instructions in the target ISA for the GPU.
// The default rounding mode generated from these will be to the nearest even.
-static_assert(LIBC_HAS_BUILTIN(__builtin_fma), "FMA builtins must be defined");
-static_assert(LIBC_HAS_BUILTIN(__builtin_fmaf), "FMA builtins must be defined");
+#if !__has_builtin(__builtin_fma) || !__has_builtin(__builtin_fmaf)
+#error "FMA builtins must be defined");
+#endif
namespace LIBC_NAMESPACE {
namespace fputil {
diff --git a/libc/src/__support/File/file.h b/libc/src/__support/File/file.h
index 2ea3843749ff..eafd3ab7d910 100644
--- a/libc/src/__support/File/file.h
+++ b/libc/src/__support/File/file.h
@@ -76,7 +76,7 @@ public:
private:
enum class FileOp : uint8_t { NONE, READ, WRITE, SEEK };
- // Platfrom specific functions which create new file objects should initialize
+ // Platform specific functions which create new file objects should initialize
// these fields suitably via the constructor. Typically, they should be simple
// syscall wrappers for the corresponding functionality.
WriteFunc *platform_write;
@@ -299,7 +299,7 @@ private:
}
};
-// The implementaiton of this function is provided by the platfrom_file
+// The implementaiton of this function is provided by the platform_file
// library.
ErrorOr<File *> openfile(const char *path, const char *mode);
diff --git a/libc/src/__support/OSUtil/CMakeLists.txt b/libc/src/__support/OSUtil/CMakeLists.txt
index ca3b3bf1263e..94d1042ccbb4 100644
--- a/libc/src/__support/OSUtil/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/CMakeLists.txt
@@ -8,23 +8,10 @@ if(NOT TARGET ${target_os_util})
return()
endif()
-# The OSUtil is an object library in GPU mode.
-if(NOT LIBC_TARGET_OS_IS_GPU)
- add_header_library(
- osutil
- HDRS
- io.h
- quick_exit.h
- syscall.h
- DEPENDS
- ${target_os_util}
- )
-else()
- add_object_library(
- osutil
- ALIAS
- ${target_os_util}
- DEPENDS
- ${target_os_util}
- )
-endif()
+add_object_library(
+ osutil
+ ALIAS
+ ${target_os_util}
+ DEPENDS
+ ${target_os_util}
+)
diff --git a/libc/src/__support/OSUtil/baremetal/CMakeLists.txt b/libc/src/__support/OSUtil/baremetal/CMakeLists.txt
index 280ff87cf147..e78301d104c1 100644
--- a/libc/src/__support/OSUtil/baremetal/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/baremetal/CMakeLists.txt
@@ -1,8 +1,10 @@
-add_header_library(
+add_object_library(
baremetal_util
+ SRCS
+ io.cpp
+ quick_exit.cpp
HDRS
io.h
- quick_exit.h
DEPENDS
libc.src.__support.common
libc.src.__support.CPP.string_view
diff --git a/libc/src/__support/OSUtil/baremetal/io.cpp b/libc/src/__support/OSUtil/baremetal/io.cpp
new file mode 100644
index 000000000000..347c7d405b0a
--- /dev/null
+++ b/libc/src/__support/OSUtil/baremetal/io.cpp
@@ -0,0 +1,22 @@
+//===---------- Baremetal implementation of IO utils ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "io.h"
+
+#include "src/__support/CPP/string_view.h"
+
+// This is intended to be provided by the vendor.
+extern "C" void __llvm_libc_log_write(const char *msg, size_t len);
+
+namespace LIBC_NAMESPACE {
+
+void write_to_stderr(cpp::string_view msg) {
+ __llvm_libc_log_write(msg.data(), msg.size());
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/OSUtil/baremetal/io.h b/libc/src/__support/OSUtil/baremetal/io.h
index a50c11d4aea1..87534641b1fa 100644
--- a/libc/src/__support/OSUtil/baremetal/io.h
+++ b/libc/src/__support/OSUtil/baremetal/io.h
@@ -13,12 +13,7 @@
namespace LIBC_NAMESPACE {
-// This is intended to be provided by the vendor.
-extern "C" void __llvm_libc_log_write(const char *msg, size_t len);
-
-void write_to_stderr(cpp::string_view msg) {
- __llvm_libc_log_write(msg.data(), msg.size());
-}
+void write_to_stderr(cpp::string_view msg);
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/OSUtil/baremetal/quick_exit.h b/libc/src/__support/OSUtil/baremetal/quick_exit.cpp
index 74f9142e21b8..5b6fcf42341e 100644
--- a/libc/src/__support/OSUtil/baremetal/quick_exit.h
+++ b/libc/src/__support/OSUtil/baremetal/quick_exit.cpp
@@ -6,16 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_BAREMETAL_QUICK_EXIT_H
-#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_BAREMETAL_QUICK_EXIT_H
-
-namespace LIBC_NAMESPACE {
+#include "src/__support/OSUtil/quick_exit.h"
// This is intended to be provided by the vendor.
-extern "C" void __llvm_libc_quick_exit(int status);
+extern "C" [[noreturn]] void __llvm_libc_quick_exit(int status);
-void quick_exit(int status) { __llvm_libc_quick_exit(status); }
+namespace LIBC_NAMESPACE {
-} // namespace LIBC_NAMESPACE
+[[noreturn]] void quick_exit(int status) { __llvm_libc_quick_exit(status); }
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_BAREMETAL_QUICK_EXIT_H
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/OSUtil/darwin/CMakeLists.txt b/libc/src/__support/OSUtil/darwin/CMakeLists.txt
index a3e6b93c8e99..4241bb37684f 100644
--- a/libc/src/__support/OSUtil/darwin/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/darwin/CMakeLists.txt
@@ -8,7 +8,6 @@ add_header_library(
darwin_util
HDRS
io.h
- quick_exit.h
syscall.h
DEPENDS
.${LIBC_TARGET_ARCHITECTURE}.darwin_util
diff --git a/libc/src/__support/OSUtil/darwin/quick_exit.h b/libc/src/__support/OSUtil/darwin/quick_exit.h
deleted file mode 100644
index 71647f50def5..000000000000
--- a/libc/src/__support/OSUtil/darwin/quick_exit.h
+++ /dev/null
@@ -1,26 +0,0 @@
-//===--------- Darwin implementation of a quick exit function ---*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_DARWIN_QUICK_EXIT_H
-#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_DARWIN_QUICK_EXIT_H
-
-#include "syscall.h" // For internal syscall function.
-
-#include "src/__support/common.h"
-
-namespace LIBC_NAMESPACE {
-
-LIBC_INLINE void quick_exit(int status) {
- for (;;) {
- LIBC_NAMESPACE::syscall_impl<long>(1 /* SYS_exit */, status);
- }
-}
-
-} // namespace LIBC_NAMESPACE
-
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_DARWIN_QUICK_EXIT_H
diff --git a/libc/src/__support/OSUtil/gpu/CMakeLists.txt b/libc/src/__support/OSUtil/gpu/CMakeLists.txt
index 8e892a7000c9..0c89f9223678 100644
--- a/libc/src/__support/OSUtil/gpu/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/gpu/CMakeLists.txt
@@ -4,7 +4,6 @@ add_object_library(
quick_exit.cpp
io.cpp
HDRS
- quick_exit.h
io.h
DEPENDS
libc.src.__support.common
diff --git a/libc/src/__support/OSUtil/gpu/quick_exit.cpp b/libc/src/__support/OSUtil/gpu/quick_exit.cpp
index 1a03be0ace67..af4795905e78 100644
--- a/libc/src/__support/OSUtil/gpu/quick_exit.cpp
+++ b/libc/src/__support/OSUtil/gpu/quick_exit.cpp
@@ -6,17 +6,14 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
-#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
-
-#include "quick_exit.h"
+#include "src/__support/OSUtil/quick_exit.h"
#include "src/__support/RPC/rpc_client.h"
#include "src/__support/macros/properties/architectures.h"
namespace LIBC_NAMESPACE {
-void quick_exit(int status) {
+[[noreturn]] void quick_exit(int status) {
// We want to first make sure the server is listening before we exit.
rpc::Client::Port port = rpc::client.open<RPC_EXIT>();
port.send_and_recv([](rpc::Buffer *) {}, [](rpc::Buffer *) {});
@@ -29,5 +26,3 @@ void quick_exit(int status) {
}
} // namespace LIBC_NAMESPACE
-
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
diff --git a/libc/src/__support/OSUtil/linux/CMakeLists.txt b/libc/src/__support/OSUtil/linux/CMakeLists.txt
index c27f9be74648..239d11570492 100644
--- a/libc/src/__support/OSUtil/linux/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/linux/CMakeLists.txt
@@ -4,11 +4,12 @@ endif()
add_subdirectory(${LIBC_TARGET_ARCHITECTURE})
-add_header_library(
+add_object_library(
linux_util
+ SRCS
+ quick_exit.cpp
HDRS
io.h
- quick_exit.h
syscall.h
DEPENDS
.${LIBC_TARGET_ARCHITECTURE}.linux_${LIBC_TARGET_ARCHITECTURE}_util
diff --git a/libc/src/__support/OSUtil/linux/quick_exit.h b/libc/src/__support/OSUtil/linux/quick_exit.cpp
index 432395584d84..51b3231d389f 100644
--- a/libc/src/__support/OSUtil/linux/quick_exit.h
+++ b/libc/src/__support/OSUtil/linux/quick_exit.cpp
@@ -6,13 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_LINUX_QUICK_EXIT_H
-#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_LINUX_QUICK_EXIT_H
-
-#include "syscall.h" // For internal syscall function.
-
#include "src/__support/common.h"
-
+#include "syscall.h" // For internal syscall function.
#include <sys/syscall.h> // For syscall numbers.
namespace LIBC_NAMESPACE {
@@ -22,7 +17,7 @@ namespace LIBC_NAMESPACE {
#ifdef LIBC_TARGET_ARCH_IS_X86
__attribute__((no_stack_protector))
#endif
-LIBC_INLINE void
+__attribute__((noreturn)) void
quick_exit(int status) {
for (;;) {
LIBC_NAMESPACE::syscall_impl<long>(SYS_exit_group, status);
@@ -31,5 +26,3 @@ quick_exit(int status) {
}
} // namespace LIBC_NAMESPACE
-
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_LINUX_QUICK_EXIT_H
diff --git a/libc/src/__support/OSUtil/quick_exit.h b/libc/src/__support/OSUtil/quick_exit.h
index 6c59c1afcda2..e445917059c3 100644
--- a/libc/src/__support/OSUtil/quick_exit.h
+++ b/libc/src/__support/OSUtil/quick_exit.h
@@ -9,17 +9,10 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_QUICK_EXIT_H
#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_QUICK_EXIT_H
-#include "src/__support/macros/properties/architectures.h"
+namespace LIBC_NAMESPACE {
-#if defined(LIBC_TARGET_ARCH_IS_GPU)
-#include "gpu/quick_exit.h"
-#elif defined(__APPLE__)
-#include "darwin/quick_exit.h"
-#elif defined(__linux__)
-#include "linux/quick_exit.h"
-#elif defined(__ELF__)
-// TODO: Ideally we would have LIBC_TARGET_OS_IS_BAREMETAL.
-#include "baremetal/quick_exit.h"
-#endif
+[[noreturn]] void quick_exit(int status);
+
+}
#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_QUICK_EXIT_H
diff --git a/libc/src/__support/UInt.h b/libc/src/__support/UInt.h
index df01e081e3c1..282efdba1c5f 100644
--- a/libc/src/__support/UInt.h
+++ b/libc/src/__support/UInt.h
@@ -1082,6 +1082,17 @@ bit_cast(const UInt<Bits> &from) {
return cpp::bit_cast<To>(from.val);
}
+// Specialization of cpp::popcount ('bit.h') for BigInt.
+template <typename T>
+[[nodiscard]] LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+popcount(T value) {
+ int bits = 0;
+ for (auto word : value.val)
+ if (word)
+ bits += popcount(word);
+ return bits;
+}
+
// Specialization of cpp::has_single_bit ('bit.h') for BigInt.
template <typename T>
[[nodiscard]] LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, bool>
@@ -1218,6 +1229,49 @@ LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, T> mask_leading_ones() {
return out;
}
+// Specialization of count_zeros ('math_extras.h') for BigInt.
+template <typename T>
+[[nodiscard]]
+LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+count_zeros(T value) {
+ return cpp::popcount(~value);
+}
+
+// Specialization of first_leading_zero ('math_extras.h') for BigInt.
+template <typename T>
+[[nodiscard]]
+LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+first_leading_zero(T value) {
+ return value == cpp::numeric_limits<T>::max() ? 0
+ : cpp::countl_one(value) + 1;
+}
+
+// Specialization of first_leading_one ('math_extras.h') for BigInt.
+template <typename T>
+[[nodiscard]]
+LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+first_leading_one(T value) {
+ return first_leading_zero(~value);
+}
+
+// Specialization of first_trailing_zero ('math_extras.h') for BigInt.
+template <typename T>
+[[nodiscard]]
+LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+first_trailing_zero(T value) {
+ return value == cpp::numeric_limits<T>::max() ? 0
+ : cpp::countr_zero(~value) + 1;
+}
+
+// Specialization of first_trailing_one ('math_extras.h') for BigInt.
+template <typename T>
+[[nodiscard]]
+LIBC_INLINE constexpr cpp::enable_if_t<is_big_int_v<T>, int>
+first_trailing_one(T value) {
+ return value == cpp::numeric_limits<T>::max() ? 0
+ : cpp::countr_zero(value) + 1;
+}
+
} // namespace LIBC_NAMESPACE
#endif // LLVM_LIBC_SRC___SUPPORT_UINT_H
diff --git a/libc/src/__support/char_vector.h b/libc/src/__support/char_vector.h
index 955abdc1fa5a..d39310e09dd7 100644
--- a/libc/src/__support/char_vector.h
+++ b/libc/src/__support/char_vector.h
@@ -11,8 +11,8 @@
#include "src/__support/common.h" // LIBC_INLINE
-#include <stddef.h>
-#include <stdlib.h> // For allocation.
+#include <stddef.h> // size_t
+#include <stdlib.h> // malloc, realloc, free
namespace LIBC_NAMESPACE {
@@ -46,7 +46,7 @@ public:
if (cur_str == local_buffer) {
char *new_str;
new_str = reinterpret_cast<char *>(malloc(cur_buff_size));
- if (new_str == NULL) {
+ if (new_str == nullptr) {
return false;
}
// TODO: replace with inline memcpy
@@ -55,7 +55,7 @@ public:
cur_str = new_str;
} else {
cur_str = reinterpret_cast<char *>(realloc(cur_str, cur_buff_size));
- if (cur_str == NULL) {
+ if (cur_str == nullptr) {
return false;
}
}
diff --git a/libc/src/__support/fixedvector.h b/libc/src/__support/fixedvector.h
index fff905d8c6c4..81747ee10067 100644
--- a/libc/src/__support/fixedvector.h
+++ b/libc/src/__support/fixedvector.h
@@ -11,6 +11,8 @@
#include "src/__support/CPP/array.h"
+#include "src/__support/CPP/iterator.h"
+
namespace LIBC_NAMESPACE {
// A fixed size data store backed by an underlying cpp::array data structure. It
@@ -55,6 +57,12 @@ public:
// matches the `destroy` API of those other data structures so that users
// can easily swap one data structure for the other.
static void destroy(FixedVector<T, CAPACITY> *store) { store->reset(); }
+
+ using reverse_iterator = typename cpp::array<T, CAPACITY>::reverse_iterator;
+ LIBC_INLINE constexpr reverse_iterator rbegin() {
+ return reverse_iterator{&store[item_count]};
+ }
+ LIBC_INLINE constexpr reverse_iterator rend() { return store.rend(); }
};
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/__support/macros/config.h b/libc/src/__support/macros/config.h
index fcc8f551a783..3f200f0d62ba 100644
--- a/libc/src/__support/macros/config.h
+++ b/libc/src/__support/macros/config.h
@@ -13,24 +13,6 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_MACROS_CONFIG_H
#define LLVM_LIBC_SRC___SUPPORT_MACROS_CONFIG_H
-// LIBC_HAS_BUILTIN()
-//
-// Checks whether the compiler supports a Clang Feature Checking Macro, and if
-// so, checks whether it supports the provided builtin function "x" where x
-// is one of the functions noted in
-// https://clang.llvm.org/docs/LanguageExtensions.html
-//
-// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check.
-// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html
-
-// Compiler builtin-detection.
-// clang.llvm.org/docs/LanguageExtensions.html#has-builtin
-#ifdef __has_builtin
-#define LIBC_HAS_BUILTIN(x) __has_builtin(x)
-#else
-#define LIBC_HAS_BUILTIN(x) 0
-#endif
-
// Compiler feature-detection.
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
#ifdef __has_feature
@@ -39,12 +21,4 @@
#define LIBC_HAS_FEATURE(f) 0
#endif
-// Compiler attribute-detection.
-// https://clang.llvm.org/docs/LanguageExtensions.html#has-attribute
-#ifdef __has_attribute
-#define LIBC_HAS_ATTRIBUTE(f) __has_attribute(f)
-#else
-#define LIBC_HAS_ATTRIBUTE(f) 0
-#endif
-
#endif // LLVM_LIBC_SRC___SUPPORT_MACROS_CONFIG_H
diff --git a/libc/src/__support/macros/optimization.h b/libc/src/__support/macros/optimization.h
index ae97efcaa417..59886ca44be1 100644
--- a/libc/src/__support/macros/optimization.h
+++ b/libc/src/__support/macros/optimization.h
@@ -11,7 +11,6 @@
#define LLVM_LIBC_SRC___SUPPORT_MACROS_OPTIMIZATION_H
#include "src/__support/macros/attributes.h" // LIBC_INLINE
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
#include "src/__support/macros/properties/compiler.h" // LIBC_COMPILER_IS_CLANG
// We use a template to implement likely/unlikely to make sure that we don't
diff --git a/libc/src/__support/macros/sanitizer.h b/libc/src/__support/macros/sanitizer.h
index fc66c2005c42..bd9b62b7121a 100644
--- a/libc/src/__support/macros/sanitizer.h
+++ b/libc/src/__support/macros/sanitizer.h
@@ -47,8 +47,7 @@
// Functions to unpoison memory
//-----------------------------------------------------------------------------
-#if defined(LIBC_HAVE_MEMORY_SANITIZER) && \
- LIBC_HAS_BUILTIN(__builtin_constant_p)
+#if defined(LIBC_HAVE_MEMORY_SANITIZER) && __has_builtin(__builtin_constant_p)
// Only perform MSAN unpoison in non-constexpr context.
#include <sanitizer/msan_interface.h>
#define MSAN_UNPOISON(addr, size) \
diff --git a/libc/src/__support/math_extras.h b/libc/src/__support/math_extras.h
index 28ee1be8b999..70a8800b285d 100644
--- a/libc/src/__support/math_extras.h
+++ b/libc/src/__support/math_extras.h
@@ -14,7 +14,6 @@
#include "src/__support/CPP/limits.h" // CHAR_BIT, numeric_limits
#include "src/__support/CPP/type_traits.h" // is_unsigned_v
#include "src/__support/macros/attributes.h" // LIBC_INLINE
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
namespace LIBC_NAMESPACE {
@@ -61,7 +60,7 @@ add_with_carry(T a, T b, T carry_in) {
return add_with_carry_const<T>(a, b, carry_in);
}
-#if LIBC_HAS_BUILTIN(__builtin_addc)
+#if __has_builtin(__builtin_addc)
// https://clang.llvm.org/docs/LanguageExtensions.html#multiprecision-arithmetic-builtins
template <>
@@ -129,7 +128,7 @@ add_with_carry<unsigned long long>(unsigned long long a, unsigned long long b,
}
}
-#endif // LIBC_HAS_BUILTIN(__builtin_addc)
+#endif // __has_builtin(__builtin_addc)
// Subtract with borrow
template <typename T> struct DiffBorrow {
@@ -157,7 +156,7 @@ sub_with_borrow(T a, T b, T borrow_in) {
return sub_with_borrow_const<T>(a, b, borrow_in);
}
-#if LIBC_HAS_BUILTIN(__builtin_subc)
+#if __has_builtin(__builtin_subc)
// https://clang.llvm.org/docs/LanguageExtensions.html#multiprecision-arithmetic-builtins
template <>
@@ -225,7 +224,7 @@ sub_with_borrow<unsigned long long>(unsigned long long a, unsigned long long b,
}
}
-#endif // LIBC_HAS_BUILTIN(__builtin_subc)
+#endif // __has_builtin(__builtin_subc)
template <typename T>
[[nodiscard]] LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_unsigned_v<T>, int>
diff --git a/libc/src/__support/memory_size.h b/libc/src/__support/memory_size.h
index 7bd16a1695be..491123bbabf3 100644
--- a/libc/src/__support/memory_size.h
+++ b/libc/src/__support/memory_size.h
@@ -19,7 +19,7 @@
namespace LIBC_NAMESPACE {
namespace internal {
template <class T> LIBC_INLINE bool mul_overflow(T a, T b, T *res) {
-#if LIBC_HAS_BUILTIN(__builtin_mul_overflow)
+#if __has_builtin(__builtin_mul_overflow)
return __builtin_mul_overflow(a, b, res);
#else
T max = cpp::numeric_limits<T>::max();
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 5e2e6e699d0e..b67ee3a8e0df 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -59,6 +59,11 @@ add_math_entrypoint_object(atan2f)
add_math_entrypoint_object(atanh)
add_math_entrypoint_object(atanhf)
+add_math_entrypoint_object(canonicalize)
+add_math_entrypoint_object(canonicalizef)
+add_math_entrypoint_object(canonicalizef128)
+add_math_entrypoint_object(canonicalizel)
+
add_math_entrypoint_object(ceil)
add_math_entrypoint_object(ceilf)
add_math_entrypoint_object(ceill)
@@ -117,6 +122,46 @@ add_math_entrypoint_object(fminf)
add_math_entrypoint_object(fminl)
add_math_entrypoint_object(fminf128)
+add_math_entrypoint_object(fmaximum)
+add_math_entrypoint_object(fmaximumf)
+add_math_entrypoint_object(fmaximuml)
+add_math_entrypoint_object(fmaximumf128)
+
+add_math_entrypoint_object(fmaximum_num)
+add_math_entrypoint_object(fmaximum_numf)
+add_math_entrypoint_object(fmaximum_numl)
+add_math_entrypoint_object(fmaximum_numf128)
+
+add_math_entrypoint_object(fmaximum_mag)
+add_math_entrypoint_object(fmaximum_magf)
+add_math_entrypoint_object(fmaximum_magl)
+add_math_entrypoint_object(fmaximum_magf128)
+
+add_math_entrypoint_object(fmaximum_mag_num)
+add_math_entrypoint_object(fmaximum_mag_numf)
+add_math_entrypoint_object(fmaximum_mag_numl)
+add_math_entrypoint_object(fmaximum_mag_numf128)
+
+add_math_entrypoint_object(fminimum)
+add_math_entrypoint_object(fminimumf)
+add_math_entrypoint_object(fminimuml)
+add_math_entrypoint_object(fminimumf128)
+
+add_math_entrypoint_object(fminimum_num)
+add_math_entrypoint_object(fminimum_numf)
+add_math_entrypoint_object(fminimum_numl)
+add_math_entrypoint_object(fminimum_numf128)
+
+add_math_entrypoint_object(fminimum_mag)
+add_math_entrypoint_object(fminimum_magf)
+add_math_entrypoint_object(fminimum_magl)
+add_math_entrypoint_object(fminimum_magf128)
+
+add_math_entrypoint_object(fminimum_mag_num)
+add_math_entrypoint_object(fminimum_mag_numf)
+add_math_entrypoint_object(fminimum_mag_numl)
+add_math_entrypoint_object(fminimum_mag_numf128)
+
add_math_entrypoint_object(fmod)
add_math_entrypoint_object(fmodf)
add_math_entrypoint_object(fmodl)
@@ -127,6 +172,16 @@ add_math_entrypoint_object(frexpf)
add_math_entrypoint_object(frexpl)
add_math_entrypoint_object(frexpf128)
+add_math_entrypoint_object(fromfp)
+add_math_entrypoint_object(fromfpf)
+add_math_entrypoint_object(fromfpl)
+add_math_entrypoint_object(fromfpf128)
+
+add_math_entrypoint_object(fromfpx)
+add_math_entrypoint_object(fromfpxf)
+add_math_entrypoint_object(fromfpxl)
+add_math_entrypoint_object(fromfpxf128)
+
add_math_entrypoint_object(hypot)
add_math_entrypoint_object(hypotf)
@@ -267,3 +322,13 @@ add_math_entrypoint_object(trunc)
add_math_entrypoint_object(truncf)
add_math_entrypoint_object(truncl)
add_math_entrypoint_object(truncf128)
+
+add_math_entrypoint_object(ufromfp)
+add_math_entrypoint_object(ufromfpf)
+add_math_entrypoint_object(ufromfpl)
+add_math_entrypoint_object(ufromfpf128)
+
+add_math_entrypoint_object(ufromfpx)
+add_math_entrypoint_object(ufromfpxf)
+add_math_entrypoint_object(ufromfpxl)
+add_math_entrypoint_object(ufromfpxf128)
diff --git a/libc/src/math/canonicalize.h b/libc/src/math/canonicalize.h
new file mode 100644
index 000000000000..b7b5959fb667
--- /dev/null
+++ b/libc/src/math/canonicalize.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for canonicalize -------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_CANONICALIZE_H
+#define LLVM_LIBC_SRC_MATH_CANONICALIZE_H
+
+namespace LIBC_NAMESPACE {
+
+int canonicalize(double *cx, const double *x);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_CANONICALIZE_H
diff --git a/libc/src/math/canonicalizef.h b/libc/src/math/canonicalizef.h
new file mode 100644
index 000000000000..556607f13349
--- /dev/null
+++ b/libc/src/math/canonicalizef.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for canonicalizef ------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_CANONICALIZEF_H
+#define LLVM_LIBC_SRC_MATH_CANONICALIZEF_H
+
+namespace LIBC_NAMESPACE {
+
+int canonicalizef(float *cx, const float *x);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_CANONICALIZEF_H
diff --git a/libc/src/math/canonicalizef128.h b/libc/src/math/canonicalizef128.h
new file mode 100644
index 000000000000..6db800947537
--- /dev/null
+++ b/libc/src/math/canonicalizef128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for canonicalizef128 ---------------*-C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_CANONICALIZEF128_H
+#define LLVM_LIBC_SRC_MATH_CANONICALIZEF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+int canonicalizef128(float128 *cx, const float128 *x);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_CANONICALIZEF128_H
diff --git a/libc/src/math/canonicalizel.h b/libc/src/math/canonicalizel.h
new file mode 100644
index 000000000000..1cab29e8e8b1
--- /dev/null
+++ b/libc/src/math/canonicalizel.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for canonicalizel ------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_CANONICALIZEL_H
+#define LLVM_LIBC_SRC_MATH_CANONICALIZEL_H
+
+namespace LIBC_NAMESPACE {
+
+int canonicalizel(long double *cx, const long double *x);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_CANONICALIZEL_H
diff --git a/libc/src/math/docs/add_math_function.md b/libc/src/math/docs/add_math_function.md
index f8bc8a3bdd8b..d1bca222b428 100644
--- a/libc/src/math/docs/add_math_function.md
+++ b/libc/src/math/docs/add_math_function.md
@@ -177,7 +177,7 @@ implementation (which is very often glibc).
- Build and Run a specific unit test:
```
- $ ninja libc.test.src.math.<func>_test
+ $ ninja libc.test.src.math.<func>_test.__unit__
$ projects/libc/test/src/math/libc.test.src.math.<func>_test
```
diff --git a/libc/src/math/fmaximum.h b/libc/src/math/fmaximum.h
new file mode 100644
index 000000000000..8eac02b17b79
--- /dev/null
+++ b/libc/src/math/fmaximum.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum --------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_H
+
+namespace LIBC_NAMESPACE {
+
+double fmaximum(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_H
diff --git a/libc/src/math/fmaximum_mag.h b/libc/src/math/fmaximum_mag.h
new file mode 100644
index 000000000000..31b7c0fcf7ee
--- /dev/null
+++ b/libc/src/math/fmaximum_mag.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_mag------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_H
+
+namespace LIBC_NAMESPACE {
+
+double fmaximum_mag(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_H
diff --git a/libc/src/__support/OSUtil/gpu/quick_exit.h b/libc/src/math/fmaximum_mag_num.h
index b51385defbc0..c4ff243846e0 100644
--- a/libc/src/__support/OSUtil/gpu/quick_exit.h
+++ b/libc/src/math/fmaximum_mag_num.h
@@ -1,4 +1,4 @@
-//===---------- GPU implementation of a quick exit function -----*- C++ -*-===//
+//===-- Implementation header for fmaximum_mag_num---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
-#define LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUM_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUM_H
namespace LIBC_NAMESPACE {
-void quick_exit(int status);
+double fmaximum_mag_num(double x, double y);
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC___SUPPORT_OSUTIL_GPU_QUICK_EXIT_H
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUM_H
diff --git a/libc/src/math/fmaximum_mag_numf.h b/libc/src/math/fmaximum_mag_numf.h
new file mode 100644
index 000000000000..702903ab8bcf
--- /dev/null
+++ b/libc/src/math/fmaximum_mag_numf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_mag_numf ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fmaximum_mag_numf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF_H
diff --git a/libc/src/math/fmaximum_mag_numf128.h b/libc/src/math/fmaximum_mag_numf128.h
new file mode 100644
index 000000000000..2afae7fc37c4
--- /dev/null
+++ b/libc/src/math/fmaximum_mag_numf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fmaximum_mag_numf128 -------------------*- C++
+// -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fmaximum_mag_numf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUMF128_H
diff --git a/libc/src/math/fmaximum_mag_numl.h b/libc/src/math/fmaximum_mag_numl.h
new file mode 100644
index 000000000000..32f9ae9708a6
--- /dev/null
+++ b/libc/src/math/fmaximum_mag_numl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_mag_numl ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUML_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fmaximum_mag_numl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAG_NUML_H
diff --git a/libc/src/math/fmaximum_magf.h b/libc/src/math/fmaximum_magf.h
new file mode 100644
index 000000000000..1bfcc795f8f8
--- /dev/null
+++ b/libc/src/math/fmaximum_magf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_magf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF_H
+
+namespace LIBC_NAMESPACE {
+
+float fmaximum_magf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF_H
diff --git a/libc/src/math/fmaximum_magf128.h b/libc/src/math/fmaximum_magf128.h
new file mode 100644
index 000000000000..23c466b74cce
--- /dev/null
+++ b/libc/src/math/fmaximum_magf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fmaximum_magf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF128_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fmaximum_magf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGF128_H
diff --git a/libc/src/math/fmaximum_magl.h b/libc/src/math/fmaximum_magl.h
new file mode 100644
index 000000000000..23b283cb5dbc
--- /dev/null
+++ b/libc/src/math/fmaximum_magl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_magl -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGL_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGL_H
+
+namespace LIBC_NAMESPACE {
+
+long double fmaximum_magl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_MAGL_H
diff --git a/libc/src/math/fmaximum_num.h b/libc/src/math/fmaximum_num.h
new file mode 100644
index 000000000000..ce3ce12ca64b
--- /dev/null
+++ b/libc/src/math/fmaximum_num.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_num--------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_NUM_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_NUM_H
+
+namespace LIBC_NAMESPACE {
+
+double fmaximum_num(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_NUM_H
diff --git a/libc/src/math/fmaximum_numf.h b/libc/src/math/fmaximum_numf.h
new file mode 100644
index 000000000000..b3243ed16be9
--- /dev/null
+++ b/libc/src/math/fmaximum_numf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_numf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fmaximum_numf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF_H
diff --git a/libc/src/math/fmaximum_numf128.h b/libc/src/math/fmaximum_numf128.h
new file mode 100644
index 000000000000..d55183ce88cc
--- /dev/null
+++ b/libc/src/math/fmaximum_numf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fmaximum_numf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fmaximum_numf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_NUMF128_H
diff --git a/libc/src/math/fmaximum_numl.h b/libc/src/math/fmaximum_numl.h
new file mode 100644
index 000000000000..f668cbdf7315
--- /dev/null
+++ b/libc/src/math/fmaximum_numl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximum_numl -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUM_NUML_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUM_NUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fmaximum_numl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUM_NUML_H
diff --git a/libc/src/math/fmaximumf.h b/libc/src/math/fmaximumf.h
new file mode 100644
index 000000000000..4eee696ce152
--- /dev/null
+++ b/libc/src/math/fmaximumf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximumf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUMF_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fmaximumf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUMF_H
diff --git a/libc/src/math/fmaximumf128.h b/libc/src/math/fmaximumf128.h
new file mode 100644
index 000000000000..4a214ef096e1
--- /dev/null
+++ b/libc/src/math/fmaximumf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fmaximumf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fmaximumf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUMF128_H
diff --git a/libc/src/math/fmaximuml.h b/libc/src/math/fmaximuml.h
new file mode 100644
index 000000000000..ba8a8b12110c
--- /dev/null
+++ b/libc/src/math/fmaximuml.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fmaximuml -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMAXIMUML_H
+#define LLVM_LIBC_SRC_MATH_FMAXIMUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fmaximuml(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMAXIMUML_H
diff --git a/libc/src/math/fminimum.h b/libc/src/math/fminimum.h
new file mode 100644
index 000000000000..9d39b1bdd3db
--- /dev/null
+++ b/libc/src/math/fminimum.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_H
+
+namespace LIBC_NAMESPACE {
+
+double fminimum(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_H
diff --git a/libc/src/math/fminimum_mag.h b/libc/src/math/fminimum_mag.h
new file mode 100644
index 000000000000..10b242e6026e
--- /dev/null
+++ b/libc/src/math/fminimum_mag.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_mag--------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_H
+
+namespace LIBC_NAMESPACE {
+
+double fminimum_mag(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_H
diff --git a/libc/src/math/fminimum_mag_num.h b/libc/src/math/fminimum_mag_num.h
new file mode 100644
index 000000000000..eb1823018851
--- /dev/null
+++ b/libc/src/math/fminimum_mag_num.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_mag_num------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUM_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUM_H
+
+namespace LIBC_NAMESPACE {
+
+double fminimum_mag_num(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMH
diff --git a/libc/src/math/fminimum_mag_numf.h b/libc/src/math/fminimum_mag_numf.h
new file mode 100644
index 000000000000..809199091139
--- /dev/null
+++ b/libc/src/math/fminimum_mag_numf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_mag_numf ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fminimum_mag_numf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF_H
diff --git a/libc/src/math/fminimum_mag_numf128.h b/libc/src/math/fminimum_mag_numf128.h
new file mode 100644
index 000000000000..803c5e641d17
--- /dev/null
+++ b/libc/src/math/fminimum_mag_numf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fminimum_mag_numf128 -------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fminimum_mag_numf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUMF128_H
diff --git a/libc/src/math/fminimum_mag_numl.h b/libc/src/math/fminimum_mag_numl.h
new file mode 100644
index 000000000000..fdbb18328069
--- /dev/null
+++ b/libc/src/math/fminimum_mag_numl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_mag_numl ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUML_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fminimum_mag_numl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAG_NUML_H
diff --git a/libc/src/math/fminimum_magf.h b/libc/src/math/fminimum_magf.h
new file mode 100644
index 000000000000..6209340074d2
--- /dev/null
+++ b/libc/src/math/fminimum_magf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_magf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF_H
+
+namespace LIBC_NAMESPACE {
+
+float fminimum_magf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF_H
diff --git a/libc/src/math/fminimum_magf128.h b/libc/src/math/fminimum_magf128.h
new file mode 100644
index 000000000000..05bd163be97c
--- /dev/null
+++ b/libc/src/math/fminimum_magf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fminimum_magf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF128_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fminimum_magf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAGF128_H
diff --git a/libc/src/math/fminimum_magl.h b/libc/src/math/fminimum_magl.h
new file mode 100644
index 000000000000..bcda35ce3bd2
--- /dev/null
+++ b/libc/src/math/fminimum_magl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_magl -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_MAGL_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_MAGL_H
+
+namespace LIBC_NAMESPACE {
+
+long double fminimum_magl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_MAGL_H
diff --git a/libc/src/math/fminimum_num.h b/libc/src/math/fminimum_num.h
new file mode 100644
index 000000000000..4c864cba8487
--- /dev/null
+++ b/libc/src/math/fminimum_num.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_num--------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_NUM_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_NUM_H
+
+namespace LIBC_NAMESPACE {
+
+double fminimum_num(double x, double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_NUM_H
diff --git a/libc/src/math/fminimum_numf.h b/libc/src/math/fminimum_numf.h
new file mode 100644
index 000000000000..ac4b08b292be
--- /dev/null
+++ b/libc/src/math/fminimum_numf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_numf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fminimum_numf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF_H
diff --git a/libc/src/math/fminimum_numf128.h b/libc/src/math/fminimum_numf128.h
new file mode 100644
index 000000000000..00f8960ff127
--- /dev/null
+++ b/libc/src/math/fminimum_numf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fminimum_numf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fminimum_numf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_NUMF128_H
diff --git a/libc/src/math/fminimum_numl.h b/libc/src/math/fminimum_numl.h
new file mode 100644
index 000000000000..0da204e72c74
--- /dev/null
+++ b/libc/src/math/fminimum_numl.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimum_numl -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUM_NUML_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUM_NUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fminimum_numl(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUM_NUML_H
diff --git a/libc/src/math/fminimumf.h b/libc/src/math/fminimumf.h
new file mode 100644
index 000000000000..424309f3a531
--- /dev/null
+++ b/libc/src/math/fminimumf.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimumf -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUMF_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUMF_H
+
+namespace LIBC_NAMESPACE {
+
+float fminimumf(float x, float y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUMF_H
diff --git a/libc/src/math/fminimumf128.h b/libc/src/math/fminimumf128.h
new file mode 100644
index 000000000000..7ff019072737
--- /dev/null
+++ b/libc/src/math/fminimumf128.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for fminimumf128 ----------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUMF128_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUMF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fminimumf128(float128 x, float128 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUMF128_H
diff --git a/libc/src/math/fminimuml.h b/libc/src/math/fminimuml.h
new file mode 100644
index 000000000000..b9cc321354a2
--- /dev/null
+++ b/libc/src/math/fminimuml.h
@@ -0,0 +1,19 @@
+//===-- Implementation header for fminimuml -------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMINIMUML_H
+#define LLVM_LIBC_SRC_MATH_FMINIMUML_H
+
+namespace LIBC_NAMESPACE {
+
+long double fminimuml(long double x, long double y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMINIMUML_H
diff --git a/libc/src/math/fromfp.h b/libc/src/math/fromfp.h
new file mode 100644
index 000000000000..d3de2dd34608
--- /dev/null
+++ b/libc/src/math/fromfp.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFP_H
+#define LLVM_LIBC_SRC_MATH_FROMFP_H
+
+namespace LIBC_NAMESPACE {
+
+double fromfp(double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFP_H
diff --git a/libc/src/math/fromfpf.h b/libc/src/math/fromfpf.h
new file mode 100644
index 000000000000..11d432148d01
--- /dev/null
+++ b/libc/src/math/fromfpf.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfpf -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPF_H
+#define LLVM_LIBC_SRC_MATH_FROMFPF_H
+
+namespace LIBC_NAMESPACE {
+
+float fromfpf(float x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPF_H
diff --git a/libc/src/math/fromfpf128.h b/libc/src/math/fromfpf128.h
new file mode 100644
index 000000000000..5f85fde570a0
--- /dev/null
+++ b/libc/src/math/fromfpf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fromfpf128 --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPF128_H
+#define LLVM_LIBC_SRC_MATH_FROMFPF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fromfpf128(float128 x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPF128_H
diff --git a/libc/src/math/fromfpl.h b/libc/src/math/fromfpl.h
new file mode 100644
index 000000000000..dd8e1eebdea9
--- /dev/null
+++ b/libc/src/math/fromfpl.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfpl -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPL_H
+#define LLVM_LIBC_SRC_MATH_FROMFPL_H
+
+namespace LIBC_NAMESPACE {
+
+long double fromfpl(long double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPL_H
diff --git a/libc/src/math/fromfpx.h b/libc/src/math/fromfpx.h
new file mode 100644
index 000000000000..3fc96e1e648b
--- /dev/null
+++ b/libc/src/math/fromfpx.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfpx -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPX_H
+#define LLVM_LIBC_SRC_MATH_FROMFPX_H
+
+namespace LIBC_NAMESPACE {
+
+double fromfpx(double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPX_H
diff --git a/libc/src/math/fromfpxf.h b/libc/src/math/fromfpxf.h
new file mode 100644
index 000000000000..b55bc4c75139
--- /dev/null
+++ b/libc/src/math/fromfpxf.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfpxf ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPXF_H
+#define LLVM_LIBC_SRC_MATH_FROMFPXF_H
+
+namespace LIBC_NAMESPACE {
+
+float fromfpxf(float x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPXF_H
diff --git a/libc/src/math/fromfpxf128.h b/libc/src/math/fromfpxf128.h
new file mode 100644
index 000000000000..88932646cdcc
--- /dev/null
+++ b/libc/src/math/fromfpxf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fromfpxf128 -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPXF128_H
+#define LLVM_LIBC_SRC_MATH_FROMFPXF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 fromfpxf128(float128 x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPXF128_H
diff --git a/libc/src/math/fromfpxl.h b/libc/src/math/fromfpxl.h
new file mode 100644
index 000000000000..545288834b8f
--- /dev/null
+++ b/libc/src/math/fromfpxl.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for fromfpxl ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FROMFPXL_H
+#define LLVM_LIBC_SRC_MATH_FROMFPXL_H
+
+namespace LIBC_NAMESPACE {
+
+long double fromfpxl(long double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FROMFPXL_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 176c32e47768..4d9b91150d02 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -1,4 +1,52 @@
add_entrypoint_object(
+ canonicalize
+ SRCS
+ canonicalize.cpp
+ HDRS
+ ../canonicalize.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+)
+
+add_entrypoint_object(
+ canonicalizef
+ SRCS
+ canonicalizef.cpp
+ HDRS
+ ../canonicalizef.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+)
+
+add_entrypoint_object(
+ canonicalizef128
+ SRCS
+ canonicalizef128.cpp
+ HDRS
+ ../canonicalizef128.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+)
+
+add_entrypoint_object(
+ canonicalizel
+ SRCS
+ canonicalizel.cpp
+ HDRS
+ ../canonicalizel.h
+ COMPILE_OPTIONS
+ -O3
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+)
+
+add_entrypoint_object(
ceil
SRCS
ceil.cpp
@@ -1546,6 +1594,400 @@ add_entrypoint_object(
)
add_entrypoint_object(
+ fmaximum
+ SRCS
+ fmaximum.cpp
+ HDRS
+ ../fmaximum.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximumf
+ SRCS
+ fmaximumf.cpp
+ HDRS
+ ../fmaximumf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximuml
+ SRCS
+ fmaximuml.cpp
+ HDRS
+ ../fmaximuml.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximumf128
+ SRCS
+ fmaximumf128.cpp
+ HDRS
+ ../fmaximumf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fmaximum_num
+ SRCS
+ fmaximum_num.cpp
+ HDRS
+ ../fmaximum_num.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_numf
+ SRCS
+ fmaximum_numf.cpp
+ HDRS
+ ../fmaximum_numf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_numl
+ SRCS
+ fmaximum_numl.cpp
+ HDRS
+ ../fmaximum_numl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_numf128
+ SRCS
+ fmaximum_numf128.cpp
+ HDRS
+ ../fmaximum_numf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fmaximum_mag
+ SRCS
+ fmaximum_mag.cpp
+ HDRS
+ ../fmaximum_mag.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_magf
+ SRCS
+ fmaximum_magf.cpp
+ HDRS
+ ../fmaximum_magf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_magl
+ SRCS
+ fmaximum_magl.cpp
+ HDRS
+ ../fmaximum_magl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_magf128
+ SRCS
+ fmaximum_magf128.cpp
+ HDRS
+ ../fmaximum_magf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+
+add_entrypoint_object(
+ fmaximum_mag_num
+ SRCS
+ fmaximum_mag_num.cpp
+ HDRS
+ ../fmaximum_mag_num.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_mag_numf
+ SRCS
+ fmaximum_mag_numf.cpp
+ HDRS
+ ../fmaximum_mag_numf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_mag_numl
+ SRCS
+ fmaximum_mag_numl.cpp
+ HDRS
+ ../fmaximum_mag_numl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fmaximum_mag_numf128
+ SRCS
+ fmaximum_mag_numf128.cpp
+ HDRS
+ ../fmaximum_mag_numf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fminimum
+ SRCS
+ fminimum.cpp
+ HDRS
+ ../fminimum.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimumf
+ SRCS
+ fminimumf.cpp
+ HDRS
+ ../fminimumf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimuml
+ SRCS
+ fminimuml.cpp
+ HDRS
+ ../fminimuml.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimumf128
+ SRCS
+ fminimumf128.cpp
+ HDRS
+ ../fminimumf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fminimum_num
+ SRCS
+ fminimum_num.cpp
+ HDRS
+ ../fminimum_num.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_numf
+ SRCS
+ fminimum_numf.cpp
+ HDRS
+ ../fminimum_numf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_numl
+ SRCS
+ fminimum_numl.cpp
+ HDRS
+ ../fminimum_numl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_numf128
+ SRCS
+ fminimum_numf128.cpp
+ HDRS
+ ../fminimum_numf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fminimum_mag
+ SRCS
+ fminimum_mag.cpp
+ HDRS
+ ../fminimum_mag.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_magf
+ SRCS
+ fminimum_magf.cpp
+ HDRS
+ ../fminimum_magf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_magl
+ SRCS
+ fminimum_magl.cpp
+ HDRS
+ ../fminimum_magl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_magf128
+ SRCS
+ fminimum_magf128.cpp
+ HDRS
+ ../fminimum_magf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+
+add_entrypoint_object(
+ fminimum_mag_num
+ SRCS
+ fminimum_mag_num.cpp
+ HDRS
+ ../fminimum_mag_num.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_mag_numf
+ SRCS
+ fminimum_mag_numf.cpp
+ HDRS
+ ../fminimum_mag_numf.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_mag_numl
+ SRCS
+ fminimum_mag_numl.cpp
+ HDRS
+ ../fminimum_mag_numl.h
+ DEPENDS
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
+add_entrypoint_object(
+ fminimum_mag_numf128
+ SRCS
+ fminimum_mag_numf128.cpp
+ HDRS
+ ../fminimum_mag_numf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.basic_operations
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
sqrt
SRCS
sqrt.cpp
@@ -1557,6 +1999,7 @@ add_entrypoint_object(
-O3
)
+
add_entrypoint_object(
sqrtf
SRCS
@@ -2025,6 +2468,202 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ fromfp
+ SRCS
+ fromfp.cpp
+ HDRS
+ ../fromfp.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpf
+ SRCS
+ fromfpf.cpp
+ HDRS
+ ../fromfpf.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpl
+ SRCS
+ fromfpl.cpp
+ HDRS
+ ../fromfpl.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpf128
+ SRCS
+ fromfpf128.cpp
+ HDRS
+ ../fromfpf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpx
+ SRCS
+ fromfpx.cpp
+ HDRS
+ ../fromfpx.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpxf
+ SRCS
+ fromfpxf.cpp
+ HDRS
+ ../fromfpxf.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpxl
+ SRCS
+ fromfpxl.cpp
+ HDRS
+ ../fromfpxl.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ fromfpxf128
+ SRCS
+ fromfpxf128.cpp
+ HDRS
+ ../fromfpxf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfp
+ SRCS
+ ufromfp.cpp
+ HDRS
+ ../ufromfp.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpf
+ SRCS
+ ufromfpf.cpp
+ HDRS
+ ../ufromfpf.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpl
+ SRCS
+ ufromfpl.cpp
+ HDRS
+ ../ufromfpl.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpf128
+ SRCS
+ ufromfpf128.cpp
+ HDRS
+ ../ufromfpf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpx
+ SRCS
+ ufromfpx.cpp
+ HDRS
+ ../ufromfpx.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpxf
+ SRCS
+ ufromfpxf.cpp
+ HDRS
+ ../ufromfpxf.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpxl
+ SRCS
+ ufromfpxl.cpp
+ HDRS
+ ../ufromfpxl.h
+ DEPENDS
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
+add_entrypoint_object(
+ ufromfpxf128
+ SRCS
+ ufromfpxf128.cpp
+ HDRS
+ ../ufromfpxf128.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.nearest_integer
+ COMPILE_OPTIONS
+ -O3
+)
+
#TODO: Add errno include to the hyperbolic functions.
add_object_library(
explogxf
diff --git a/libc/src/math/generic/canonicalize.cpp b/libc/src/math/generic/canonicalize.cpp
new file mode 100644
index 000000000000..f38ca01e157f
--- /dev/null
+++ b/libc/src/math/generic/canonicalize.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of canonicalize function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/canonicalize.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, canonicalize, (double *cx, const double *x)) {
+ return fputil::canonicalize(*cx, *x);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/canonicalizef.cpp b/libc/src/math/generic/canonicalizef.cpp
new file mode 100644
index 000000000000..dce601de1491
--- /dev/null
+++ b/libc/src/math/generic/canonicalizef.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of canonicalizef function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/canonicalizef.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, canonicalizef, (float *cx, const float *x)) {
+ return fputil::canonicalize(*cx, *x);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/canonicalizef128.cpp b/libc/src/math/generic/canonicalizef128.cpp
new file mode 100644
index 000000000000..0078b478238c
--- /dev/null
+++ b/libc/src/math/generic/canonicalizef128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of canonicalizef128 function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/canonicalizef128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, canonicalizef128, (float128 * cx, const float128 *x)) {
+ return fputil::canonicalize(*cx, *x);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/canonicalizel.cpp b/libc/src/math/generic/canonicalizel.cpp
new file mode 100644
index 000000000000..5310a316acdd
--- /dev/null
+++ b/libc/src/math/generic/canonicalizel.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of canonicalizel function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/canonicalizel.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, canonicalizel,
+ (long double *cx, const long double *x)) {
+ return fputil::canonicalize(*cx, *x);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum.cpp b/libc/src/math/generic/fmaximum.cpp
new file mode 100644
index 000000000000..ac9593b325d4
--- /dev/null
+++ b/libc/src/math/generic/fmaximum.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum function--------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fmaximum, (double x, double y)) {
+ return fputil::fmaximum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_mag.cpp b/libc/src/math/generic/fmaximum_mag.cpp
new file mode 100644
index 000000000000..0deb0c2835f9
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_mag.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_mag function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_mag.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fmaximum_mag, (double x, double y)) {
+ return fputil::fmaximum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_mag_num.cpp b/libc/src/math/generic/fmaximum_mag_num.cpp
new file mode 100644
index 000000000000..d0b1096b88bc
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_mag_num.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_mag_num function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_mag_num.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fmaximum_mag_num, (double x, double y)) {
+ return fputil::fmaximum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_mag_numf.cpp b/libc/src/math/generic/fmaximum_mag_numf.cpp
new file mode 100644
index 000000000000..672d3fd3b263
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_mag_numf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_mag_numf function-----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_mag_numf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fmaximum_mag_numf, (float x, float y)) {
+ return fputil::fmaximum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_mag_numf128.cpp b/libc/src/math/generic/fmaximum_mag_numf128.cpp
new file mode 100644
index 000000000000..e7d13f13a098
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_mag_numf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_mag_numf128 function--------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_mag_numf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fmaximum_mag_numf128, (float128 x, float128 y)) {
+ return fputil::fmaximum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_mag_numl.cpp b/libc/src/math/generic/fmaximum_mag_numl.cpp
new file mode 100644
index 000000000000..a8499ca473b3
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_mag_numl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fmaximum_mag_numl function-----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_mag_numl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fmaximum_mag_numl,
+ (long double x, long double y)) {
+ return fputil::fmaximum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_magf.cpp b/libc/src/math/generic/fmaximum_magf.cpp
new file mode 100644
index 000000000000..380aca05a525
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_magf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_magf function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_magf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fmaximum_magf, (float x, float y)) {
+ return fputil::fmaximum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_magf128.cpp b/libc/src/math/generic/fmaximum_magf128.cpp
new file mode 100644
index 000000000000..301938fb7ffd
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_magf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_magf128 function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_magf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fmaximum_magf128, (float128 x, float128 y)) {
+ return fputil::fmaximum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_magl.cpp b/libc/src/math/generic/fmaximum_magl.cpp
new file mode 100644
index 000000000000..283a11eda9aa
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_magl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_magl function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_magl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fmaximum_magl, (long double x, long double y)) {
+ return fputil::fmaximum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_num.cpp b/libc/src/math/generic/fmaximum_num.cpp
new file mode 100644
index 000000000000..23553dbcae7e
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_num.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_num function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_num.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fmaximum_num, (double x, double y)) {
+ return fputil::fmaximum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_numf.cpp b/libc/src/math/generic/fmaximum_numf.cpp
new file mode 100644
index 000000000000..f946ff43f543
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_numf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_numf function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_numf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fmaximum_numf, (float x, float y)) {
+ return fputil::fmaximum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_numf128.cpp b/libc/src/math/generic/fmaximum_numf128.cpp
new file mode 100644
index 000000000000..f33a5e195bf2
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_numf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_numf128 function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_numf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fmaximum_numf128, (float128 x, float128 y)) {
+ return fputil::fmaximum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximum_numl.cpp b/libc/src/math/generic/fmaximum_numl.cpp
new file mode 100644
index 000000000000..503fc41409f6
--- /dev/null
+++ b/libc/src/math/generic/fmaximum_numl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximum_numl function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximum_numl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fmaximum_numl, (long double x, long double y)) {
+ return fputil::fmaximum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximumf.cpp b/libc/src/math/generic/fmaximumf.cpp
new file mode 100644
index 000000000000..3b2a60931bf6
--- /dev/null
+++ b/libc/src/math/generic/fmaximumf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximumf function-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximumf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fmaximumf, (float x, float y)) {
+ return fputil::fmaximum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximumf128.cpp b/libc/src/math/generic/fmaximumf128.cpp
new file mode 100644
index 000000000000..0099c913b052
--- /dev/null
+++ b/libc/src/math/generic/fmaximumf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximumf128 function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximumf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fmaximumf128, (float128 x, float128 y)) {
+ return fputil::fmaximum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fmaximuml.cpp b/libc/src/math/generic/fmaximuml.cpp
new file mode 100644
index 000000000000..ecd698300458
--- /dev/null
+++ b/libc/src/math/generic/fmaximuml.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmaximuml function-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmaximuml.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fmaximuml, (long double x, long double y)) {
+ return fputil::fmaximum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum.cpp b/libc/src/math/generic/fminimum.cpp
new file mode 100644
index 000000000000..28b257d950f4
--- /dev/null
+++ b/libc/src/math/generic/fminimum.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum function--------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fminimum, (double x, double y)) {
+ return fputil::fminimum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_mag.cpp b/libc/src/math/generic/fminimum_mag.cpp
new file mode 100644
index 000000000000..6af99570e1ea
--- /dev/null
+++ b/libc/src/math/generic/fminimum_mag.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_mag function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_mag.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fminimum_mag, (double x, double y)) {
+ return fputil::fminimum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_mag_num.cpp b/libc/src/math/generic/fminimum_mag_num.cpp
new file mode 100644
index 000000000000..fc5431ae2799
--- /dev/null
+++ b/libc/src/math/generic/fminimum_mag_num.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_mag_num function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_mag_num.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fminimum_mag_num, (double x, double y)) {
+ return fputil::fminimum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_mag_numf.cpp b/libc/src/math/generic/fminimum_mag_numf.cpp
new file mode 100644
index 000000000000..71179a6f03c2
--- /dev/null
+++ b/libc/src/math/generic/fminimum_mag_numf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_mag_numf function-----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_mag_numf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fminimum_mag_numf, (float x, float y)) {
+ return fputil::fminimum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_mag_numf128.cpp b/libc/src/math/generic/fminimum_mag_numf128.cpp
new file mode 100644
index 000000000000..109ce7e4e011
--- /dev/null
+++ b/libc/src/math/generic/fminimum_mag_numf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_mag_numf128 function--------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_mag_numf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fminimum_mag_numf128, (float128 x, float128 y)) {
+ return fputil::fminimum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_mag_numl.cpp b/libc/src/math/generic/fminimum_mag_numl.cpp
new file mode 100644
index 000000000000..c97ce6ead042
--- /dev/null
+++ b/libc/src/math/generic/fminimum_mag_numl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fminimum_mag_numl function-----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_mag_numl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fminimum_mag_numl,
+ (long double x, long double y)) {
+ return fputil::fminimum_mag_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_magf.cpp b/libc/src/math/generic/fminimum_magf.cpp
new file mode 100644
index 000000000000..834f6a4a9710
--- /dev/null
+++ b/libc/src/math/generic/fminimum_magf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_magf function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_magf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fminimum_magf, (float x, float y)) {
+ return fputil::fminimum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_magf128.cpp b/libc/src/math/generic/fminimum_magf128.cpp
new file mode 100644
index 000000000000..2828e28bb0a4
--- /dev/null
+++ b/libc/src/math/generic/fminimum_magf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_magf128 function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_magf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fminimum_magf128, (float128 x, float128 y)) {
+ return fputil::fminimum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_magl.cpp b/libc/src/math/generic/fminimum_magl.cpp
new file mode 100644
index 000000000000..50e328fd92d1
--- /dev/null
+++ b/libc/src/math/generic/fminimum_magl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_magl function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_magl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fminimum_magl, (long double x, long double y)) {
+ return fputil::fminimum_mag(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_num.cpp b/libc/src/math/generic/fminimum_num.cpp
new file mode 100644
index 000000000000..e89c7f5acf9b
--- /dev/null
+++ b/libc/src/math/generic/fminimum_num.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_num function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_num.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fminimum_num, (double x, double y)) {
+ return fputil::fminimum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_numf.cpp b/libc/src/math/generic/fminimum_numf.cpp
new file mode 100644
index 000000000000..c37c8bd423a1
--- /dev/null
+++ b/libc/src/math/generic/fminimum_numf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_numf function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_numf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fminimum_numf, (float x, float y)) {
+ return fputil::fminimum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_numf128.cpp b/libc/src/math/generic/fminimum_numf128.cpp
new file mode 100644
index 000000000000..6b1f77bb447c
--- /dev/null
+++ b/libc/src/math/generic/fminimum_numf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_numf128 function------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_numf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fminimum_numf128, (float128 x, float128 y)) {
+ return fputil::fminimum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimum_numl.cpp b/libc/src/math/generic/fminimum_numl.cpp
new file mode 100644
index 000000000000..22045f83f2a7
--- /dev/null
+++ b/libc/src/math/generic/fminimum_numl.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimum_numl function---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimum_numl.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fminimum_numl, (long double x, long double y)) {
+ return fputil::fminimum_num(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimumf.cpp b/libc/src/math/generic/fminimumf.cpp
new file mode 100644
index 000000000000..c937fb0ea01d
--- /dev/null
+++ b/libc/src/math/generic/fminimumf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimumf function-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimumf.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fminimumf, (float x, float y)) {
+ return fputil::fminimum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimumf128.cpp b/libc/src/math/generic/fminimumf128.cpp
new file mode 100644
index 000000000000..24e02b8ff537
--- /dev/null
+++ b/libc/src/math/generic/fminimumf128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimumf128 function----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimumf128.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fminimumf128, (float128 x, float128 y)) {
+ return fputil::fminimum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fminimuml.cpp b/libc/src/math/generic/fminimuml.cpp
new file mode 100644
index 000000000000..43319503661a
--- /dev/null
+++ b/libc/src/math/generic/fminimuml.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fminimuml function-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fminimuml.h"
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fminimuml, (long double x, long double y)) {
+ return fputil::fminimum(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfp.cpp b/libc/src/math/generic/fromfp.cpp
new file mode 100644
index 000000000000..ba3f0a133cbc
--- /dev/null
+++ b/libc/src/math/generic/fromfp.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fromfp function ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfp.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fromfp, (double x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpf.cpp b/libc/src/math/generic/fromfpf.cpp
new file mode 100644
index 000000000000..fd058a13201c
--- /dev/null
+++ b/libc/src/math/generic/fromfpf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fromfpf function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpf.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fromfpf, (float x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpf128.cpp b/libc/src/math/generic/fromfpf128.cpp
new file mode 100644
index 000000000000..440a5da75693
--- /dev/null
+++ b/libc/src/math/generic/fromfpf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fromfpf128 function -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpf128.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fromfpf128,
+ (float128 x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpl.cpp b/libc/src/math/generic/fromfpl.cpp
new file mode 100644
index 000000000000..ee3abeaf98f8
--- /dev/null
+++ b/libc/src/math/generic/fromfpl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fromfpl function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpl.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fromfpl,
+ (long double x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpx.cpp b/libc/src/math/generic/fromfpx.cpp
new file mode 100644
index 000000000000..b9e7e4a7aa12
--- /dev/null
+++ b/libc/src/math/generic/fromfpx.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fromfpx function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpx.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, fromfpx, (double x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpxf.cpp b/libc/src/math/generic/fromfpxf.cpp
new file mode 100644
index 000000000000..1473499244d3
--- /dev/null
+++ b/libc/src/math/generic/fromfpxf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fromfpxf function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpxf.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, fromfpxf, (float x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpxf128.cpp b/libc/src/math/generic/fromfpxf128.cpp
new file mode 100644
index 000000000000..5d930d22ae5e
--- /dev/null
+++ b/libc/src/math/generic/fromfpxf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fromfpxf128 function ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpxf128.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, fromfpxf128,
+ (float128 x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/fromfpxl.cpp b/libc/src/math/generic/fromfpxl.cpp
new file mode 100644
index 000000000000..c3db055246f2
--- /dev/null
+++ b/libc/src/math/generic/fromfpxl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of fromfpxl function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fromfpxl.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, fromfpxl,
+ (long double x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/true>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfp.cpp b/libc/src/math/generic/ufromfp.cpp
new file mode 100644
index 000000000000..15800d67fd8d
--- /dev/null
+++ b/libc/src/math/generic/ufromfp.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ufromfp function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfp.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, ufromfp, (double x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpf.cpp b/libc/src/math/generic/ufromfpf.cpp
new file mode 100644
index 000000000000..898446ec45aa
--- /dev/null
+++ b/libc/src/math/generic/ufromfpf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ufromfpf function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpf.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, ufromfpf, (float x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpf128.cpp b/libc/src/math/generic/ufromfpf128.cpp
new file mode 100644
index 000000000000..cc728f35551c
--- /dev/null
+++ b/libc/src/math/generic/ufromfpf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ufromfpf128 function ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpf128.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, ufromfpf128,
+ (float128 x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpl.cpp b/libc/src/math/generic/ufromfpl.cpp
new file mode 100644
index 000000000000..bd353e9ebbb5
--- /dev/null
+++ b/libc/src/math/generic/ufromfpl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ufromfpl function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpl.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, ufromfpl,
+ (long double x, int rnd, unsigned int width)) {
+ return fputil::fromfp</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpx.cpp b/libc/src/math/generic/ufromfpx.cpp
new file mode 100644
index 000000000000..5ad95ff3061e
--- /dev/null
+++ b/libc/src/math/generic/ufromfpx.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ufromfpx function -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpx.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(double, ufromfpx, (double x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpxf.cpp b/libc/src/math/generic/ufromfpxf.cpp
new file mode 100644
index 000000000000..7c878489e8d2
--- /dev/null
+++ b/libc/src/math/generic/ufromfpxf.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of ufromfpxf function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpxf.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float, ufromfpxf, (float x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpxf128.cpp b/libc/src/math/generic/ufromfpxf128.cpp
new file mode 100644
index 000000000000..57c290365e69
--- /dev/null
+++ b/libc/src/math/generic/ufromfpxf128.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ufromfpxf128 function ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpxf128.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, ufromfpxf128,
+ (float128 x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/generic/ufromfpxl.cpp b/libc/src/math/generic/ufromfpxl.cpp
new file mode 100644
index 000000000000..9a8ba7aa5b91
--- /dev/null
+++ b/libc/src/math/generic/ufromfpxl.cpp
@@ -0,0 +1,20 @@
+//===-- Implementation of ufromfpxl function ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/ufromfpxl.h"
+#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(long double, ufromfpxl,
+ (long double x, int rnd, unsigned int width)) {
+ return fputil::fromfpx</*IsSigned=*/false>(x, rnd, width);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/ufromfp.h b/libc/src/math/ufromfp.h
new file mode 100644
index 000000000000..f4667486440c
--- /dev/null
+++ b/libc/src/math/ufromfp.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfp -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFP_H
+#define LLVM_LIBC_SRC_MATH_UFROMFP_H
+
+namespace LIBC_NAMESPACE {
+
+double ufromfp(double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFP_H
diff --git a/libc/src/math/ufromfpf.h b/libc/src/math/ufromfpf.h
new file mode 100644
index 000000000000..40c6773d143e
--- /dev/null
+++ b/libc/src/math/ufromfpf.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfpf ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPF_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPF_H
+
+namespace LIBC_NAMESPACE {
+
+float ufromfpf(float x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPF_H
diff --git a/libc/src/math/ufromfpf128.h b/libc/src/math/ufromfpf128.h
new file mode 100644
index 000000000000..785fa82becbc
--- /dev/null
+++ b/libc/src/math/ufromfpf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for ufromfpf128 -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPF128_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 ufromfpf128(float128 x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPF128_H
diff --git a/libc/src/math/ufromfpl.h b/libc/src/math/ufromfpl.h
new file mode 100644
index 000000000000..f05a77dc2f10
--- /dev/null
+++ b/libc/src/math/ufromfpl.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfpl ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPL_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPL_H
+
+namespace LIBC_NAMESPACE {
+
+long double ufromfpl(long double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPL_H
diff --git a/libc/src/math/ufromfpx.h b/libc/src/math/ufromfpx.h
new file mode 100644
index 000000000000..79c413af968a
--- /dev/null
+++ b/libc/src/math/ufromfpx.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfpx ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPX_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPX_H
+
+namespace LIBC_NAMESPACE {
+
+double ufromfpx(double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPX_H
diff --git a/libc/src/math/ufromfpxf.h b/libc/src/math/ufromfpxf.h
new file mode 100644
index 000000000000..f6bd8f7d5995
--- /dev/null
+++ b/libc/src/math/ufromfpxf.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfpxf ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPXF_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPXF_H
+
+namespace LIBC_NAMESPACE {
+
+float ufromfpxf(float x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPXF_H
diff --git a/libc/src/math/ufromfpxf128.h b/libc/src/math/ufromfpxf128.h
new file mode 100644
index 000000000000..f3b43ff54f37
--- /dev/null
+++ b/libc/src/math/ufromfpxf128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for ufromfpxf128 ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPXF128_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPXF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 ufromfpxf128(float128 x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPXF128_H
diff --git a/libc/src/math/ufromfpxl.h b/libc/src/math/ufromfpxl.h
new file mode 100644
index 000000000000..180b8f93d218
--- /dev/null
+++ b/libc/src/math/ufromfpxl.h
@@ -0,0 +1,18 @@
+//===-- Implementation header for ufromfpxl ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_UFROMFPXL_H
+#define LLVM_LIBC_SRC_MATH_UFROMFPXL_H
+
+namespace LIBC_NAMESPACE {
+
+long double ufromfpxl(long double x, int rnd, unsigned int width);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_UFROMFPXL_H
diff --git a/libc/src/stdio/CMakeLists.txt b/libc/src/stdio/CMakeLists.txt
index ece93fd56ef0..11e15c917351 100644
--- a/libc/src/stdio/CMakeLists.txt
+++ b/libc/src/stdio/CMakeLists.txt
@@ -256,6 +256,13 @@ add_entrypoint_object(
.${LIBC_TARGET_OS}.remove
)
+add_entrypoint_object(
+ rename
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.rename
+)
+
# These entrypoints have multiple potential implementations.
add_stdio_entrypoint_object(feof)
add_stdio_entrypoint_object(feof_unlocked)
diff --git a/libc/src/stdio/linux/CMakeLists.txt b/libc/src/stdio/linux/CMakeLists.txt
index 774f24b2db0b..a08ff0ba4832 100644
--- a/libc/src/stdio/linux/CMakeLists.txt
+++ b/libc/src/stdio/linux/CMakeLists.txt
@@ -12,3 +12,15 @@ add_entrypoint_object(
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
+
+add_entrypoint_object(
+ rename
+ SRCS
+ rename.cpp
+ HDRS
+ ../rename.h
+ DEPENDS
+ libc.include.sys_syscall
+ libc.src.__support.OSUtil.osutil
+ libc.src.errno.errno
+)
diff --git a/libc/src/stdio/linux/rename.cpp b/libc/src/stdio/linux/rename.cpp
new file mode 100644
index 000000000000..379a6ef3c0c8
--- /dev/null
+++ b/libc/src/stdio/linux/rename.cpp
@@ -0,0 +1,28 @@
+//===-- Linux implementation of rename ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdio/rename.h"
+#include "include/llvm-libc-macros/linux/fcntl-macros.h"
+#include "src/__support/OSUtil/syscall.h" // For internal syscall function.
+#include "src/__support/common.h"
+#include "src/errno/libc_errno.h"
+#include <sys/syscall.h> // For syscall numbers.
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, rename, (const char *oldpath, const char *newpath)) {
+ int ret = LIBC_NAMESPACE::syscall_impl<int>(SYS_renameat2, AT_FDCWD, oldpath,
+ AT_FDCWD, newpath, 0);
+
+ if (ret >= 0)
+ return 0;
+ libc_errno = -ret;
+ return -1;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdio/printf_core/converter_utils.h b/libc/src/stdio/printf_core/converter_utils.h
index 948fe816e9b7..a0e96a11be5b 100644
--- a/libc/src/stdio/printf_core/converter_utils.h
+++ b/libc/src/stdio/printf_core/converter_utils.h
@@ -18,7 +18,9 @@
namespace LIBC_NAMESPACE {
namespace printf_core {
-LIBC_INLINE uintmax_t apply_length_modifier(uintmax_t num, LengthModifier lm) {
+LIBC_INLINE uintmax_t apply_length_modifier(uintmax_t num,
+ LengthSpec length_spec) {
+ auto [lm, bw] = length_spec;
switch (lm) {
case LengthModifier::none:
return num & cpp::numeric_limits<unsigned int>::max();
@@ -40,6 +42,18 @@ LIBC_INLINE uintmax_t apply_length_modifier(uintmax_t num, LengthModifier lm) {
return num & cpp::numeric_limits<uintptr_t>::max();
case LengthModifier::j:
return num; // j is intmax, so no mask is necessary.
+ case LengthModifier::w:
+ case LengthModifier::wf: {
+ uintmax_t mask;
+ if (bw == 0) {
+ mask = 0;
+ } else if (bw < sizeof(uintmax_t) * CHAR_BIT) {
+ mask = (static_cast<uintmax_t>(1) << bw) - 1;
+ } else {
+ mask = UINTMAX_MAX;
+ }
+ return num & mask;
+ }
}
__builtin_unreachable();
}
diff --git a/libc/src/stdio/printf_core/core_structs.h b/libc/src/stdio/printf_core/core_structs.h
index d3718b49d1b1..1e78f195a75e 100644
--- a/libc/src/stdio/printf_core/core_structs.h
+++ b/libc/src/stdio/printf_core/core_structs.h
@@ -22,7 +22,12 @@ namespace printf_core {
// These length modifiers match the length modifiers in the format string, which
// is why they are formatted differently from the rest of the file.
-enum class LengthModifier { hh, h, l, ll, j, z, t, L, none };
+enum class LengthModifier { hh, h, l, ll, j, z, t, L, w, wf, none };
+
+struct LengthSpec {
+ LengthModifier lm;
+ size_t bit_width;
+};
enum FormatFlags : uint8_t {
LEFT_JUSTIFIED = 0x01, // -
@@ -44,6 +49,7 @@ struct FormatSection {
// Format Specifier Values
FormatFlags flags = FormatFlags(0);
LengthModifier length_modifier = LengthModifier::none;
+ size_t bit_width = 0;
int min_width = 0;
int precision = -1;
@@ -66,6 +72,7 @@ struct FormatSection {
if (!((static_cast<uint8_t>(flags) ==
static_cast<uint8_t>(other.flags)) &&
(min_width == other.min_width) && (precision == other.precision) &&
+ (bit_width == other.bit_width) &&
(length_modifier == other.length_modifier) &&
(conv_name == other.conv_name)))
return false;
diff --git a/libc/src/stdio/printf_core/int_converter.h b/libc/src/stdio/printf_core/int_converter.h
index 2efbf53d4093..496e7bd1a56d 100644
--- a/libc/src/stdio/printf_core/int_converter.h
+++ b/libc/src/stdio/printf_core/int_converter.h
@@ -71,7 +71,6 @@ LIBC_INLINE int convert_int(Writer *writer, const FormatSection &to_conv) {
uintmax_t num = static_cast<uintmax_t>(to_conv.conv_val_raw);
bool is_negative = false;
FormatFlags flags = to_conv.flags;
-
const char a = is_lower(to_conv.conv_name) ? 'a' : 'A';
// If the conversion is signed, then handle negative values.
@@ -89,8 +88,8 @@ LIBC_INLINE int convert_int(Writer *writer, const FormatSection &to_conv) {
~(FormatFlags::FORCE_SIGN | FormatFlags::SPACE_PREFIX));
}
- num = apply_length_modifier(num, to_conv.length_modifier);
-
+ num =
+ apply_length_modifier(num, {to_conv.length_modifier, to_conv.bit_width});
cpp::array<char, details::num_buf_size()> buf;
auto str = details::num_to_strview(num, buf, to_conv.conv_name);
if (!str)
diff --git a/libc/src/stdio/printf_core/parser.h b/libc/src/stdio/printf_core/parser.h
index 0876116a0bac..8e8c77e219fa 100644
--- a/libc/src/stdio/printf_core/parser.h
+++ b/libc/src/stdio/printf_core/parser.h
@@ -150,10 +150,10 @@ public:
}
}
- LengthModifier lm = parse_length_modifier(&cur_pos);
-
+ auto [lm, bw] = parse_length_modifier(&cur_pos);
section.length_modifier = lm;
section.conv_name = str[cur_pos];
+ section.bit_width = bw;
switch (str[cur_pos]) {
case ('%'):
// Regardless of options, a % conversion is always safe. The standard
@@ -202,6 +202,21 @@ public:
WRITE_ARG_VAL_SIMPLEST(section.conv_val_raw, ptrdiff_t, conv_index);
break;
+
+ case (LengthModifier::w):
+ case (LengthModifier::wf):
+ if (bw == 0) {
+ section.has_conv = false;
+ } else if (bw <= INT_WIDTH) {
+ WRITE_ARG_VAL_SIMPLEST(section.conv_val_raw, int, conv_index);
+ } else if (bw <= LONG_WIDTH) {
+ WRITE_ARG_VAL_SIMPLEST(section.conv_val_raw, long, conv_index);
+ } else if (bw <= LLONG_WIDTH) {
+ WRITE_ARG_VAL_SIMPLEST(section.conv_val_raw, long long, conv_index);
+ } else {
+ WRITE_ARG_VAL_SIMPLEST(section.conv_val_raw, intmax_t, conv_index);
+ }
+ break;
}
break;
#ifndef LIBC_COPT_PRINTF_DISABLE_FLOAT
@@ -306,38 +321,54 @@ private:
// assumes that str[*local_pos] is inside a format specifier. It returns a
// LengthModifier with the length modifier it found. It will advance local_pos
// after the format specifier if one is found.
- LIBC_INLINE LengthModifier parse_length_modifier(size_t *local_pos) {
+ LIBC_INLINE LengthSpec parse_length_modifier(size_t *local_pos) {
switch (str[*local_pos]) {
case ('l'):
if (str[*local_pos + 1] == 'l') {
*local_pos += 2;
- return LengthModifier::ll;
+ return {LengthModifier::ll, 0};
+ } else {
+ ++*local_pos;
+ return {LengthModifier::l, 0};
+ }
+ case ('w'): {
+ LengthModifier lm;
+ if (str[*local_pos + 1] == 'f') {
+ *local_pos += 2;
+ lm = LengthModifier::wf;
} else {
++*local_pos;
- return LengthModifier::l;
+ lm = LengthModifier::w;
}
+ if (internal::isdigit(str[*local_pos])) {
+ const auto result = internal::strtointeger<int>(str + *local_pos, 10);
+ *local_pos += result.parsed_len;
+ return {lm, static_cast<size_t>(cpp::max(0, result.value))};
+ }
+ return {lm, 0};
+ }
case ('h'):
if (str[*local_pos + 1] == 'h') {
*local_pos += 2;
- return LengthModifier::hh;
+ return {LengthModifier::hh, 0};
} else {
++*local_pos;
- return LengthModifier::h;
+ return {LengthModifier::h, 0};
}
case ('L'):
++*local_pos;
- return LengthModifier::L;
+ return {LengthModifier::L, 0};
case ('j'):
++*local_pos;
- return LengthModifier::j;
+ return {LengthModifier::j, 0};
case ('z'):
++*local_pos;
- return LengthModifier::z;
+ return {LengthModifier::z, 0};
case ('t'):
++*local_pos;
- return LengthModifier::t;
+ return {LengthModifier::t, 0};
default:
- return LengthModifier::none;
+ return {LengthModifier::none, 0};
}
}
@@ -509,7 +540,7 @@ private:
}
}
- LengthModifier lm = parse_length_modifier(&local_pos);
+ auto [lm, bw] = parse_length_modifier(&local_pos);
// if we don't have an index for this conversion, then its position is
// unknown and all this information is irrelevant. The rest of this
@@ -560,6 +591,18 @@ private:
case (LengthModifier::t):
conv_size = type_desc_from_type<ptrdiff_t>();
break;
+ case (LengthModifier::w):
+ case (LengthModifier::wf):
+ if (bw <= INT_WIDTH) {
+ conv_size = type_desc_from_type<int>();
+ } else if (bw <= LONG_WIDTH) {
+ conv_size = type_desc_from_type<long>();
+ } else if (bw <= LLONG_WIDTH) {
+ conv_size = type_desc_from_type<long long>();
+ } else {
+ conv_size = type_desc_from_type<intmax_t>();
+ }
+ break;
}
break;
#ifndef LIBC_COPT_PRINTF_DISABLE_FLOAT
diff --git a/libc/src/stdio/printf_core/write_int_converter.h b/libc/src/stdio/printf_core/write_int_converter.h
index 0310905f36f1..18aa5c79897e 100644
--- a/libc/src/stdio/printf_core/write_int_converter.h
+++ b/libc/src/stdio/printf_core/write_int_converter.h
@@ -55,6 +55,8 @@ LIBC_INLINE int convert_write_int(Writer *writer,
*reinterpret_cast<ptrdiff_t *>(to_conv.conv_val_ptr) = written;
break;
case LengthModifier::j:
+ case LengthModifier::w:
+ case LengthModifier::wf:
*reinterpret_cast<uintmax_t *>(to_conv.conv_val_ptr) = written;
break;
}
diff --git a/libc/src/stdio/rename.h b/libc/src/stdio/rename.h
new file mode 100644
index 000000000000..eadda7c3eac9
--- /dev/null
+++ b/libc/src/stdio/rename.h
@@ -0,0 +1,18 @@
+//===-- Implementation header of rename -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDIO_RENAME_H
+#define LLVM_LIBC_SRC_STDIO_RENAME_H
+
+namespace LIBC_NAMESPACE {
+
+int rename(const char *oldpath, const char *newpath);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDIO_RENAME_H
diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index 22f7f990fb08..e526ba040bef 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -62,6 +62,26 @@ add_entrypoint_object(
.str_from_util
)
+add_entrypoint_object(
+ strfromd
+ SRCS
+ strfromd.cpp
+ HDRS
+ strfromd.h
+ DEPENDS
+ .str_from_util
+)
+
+add_entrypoint_object(
+ strfroml
+ SRCS
+ strfroml.cpp
+ HDRS
+ strfroml.h
+ DEPENDS
+ .str_from_util
+)
+
add_header_library(
str_from_util
HDRS
@@ -394,10 +414,11 @@ add_entrypoint_object(
CXX_STANDARD
20 # For constinit of the atexit callback list.
DEPENDS
- libc.src.__support.fixedvector
+ libc.src.__support.CPP.new
+ libc.src.__support.OSUtil.osutil
libc.src.__support.blockstore
+ libc.src.__support.fixedvector
libc.src.__support.threads.mutex
- libc.src.__support.CPP.new
)
add_entrypoint_object(
diff --git a/libc/src/stdlib/_Exit.cpp b/libc/src/stdlib/_Exit.cpp
index 85684d1e9087..233af2097392 100644
--- a/libc/src/stdlib/_Exit.cpp
+++ b/libc/src/stdlib/_Exit.cpp
@@ -13,9 +13,8 @@
namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(void, _Exit, (int status)) {
+[[noreturn]] LLVM_LIBC_FUNCTION(void, _Exit, (int status)) {
quick_exit(status);
- __builtin_unreachable();
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdlib/atexit.cpp b/libc/src/stdlib/atexit.cpp
index 741ea4f25103..fa072b2fdf8d 100644
--- a/libc/src/stdlib/atexit.cpp
+++ b/libc/src/stdlib/atexit.cpp
@@ -55,14 +55,10 @@ void stdc_at_exit_func(void *payload) {
reinterpret_cast<StdCAtExitCallback *>(payload)();
}
-} // namespace
-
-namespace internal {
-
void call_exit_callbacks() {
handler_list_mtx.lock();
while (!exit_callbacks.empty()) {
- auto unit = exit_callbacks.back();
+ AtExitUnit &unit = exit_callbacks.back();
exit_callbacks.pop_back();
handler_list_mtx.unlock();
unit.callback(unit.payload);
@@ -71,20 +67,31 @@ void call_exit_callbacks() {
ExitCallbackList::destroy(&exit_callbacks);
}
-} // namespace internal
-
-static int add_atexit_unit(const AtExitUnit &unit) {
+int add_atexit_unit(const AtExitUnit &unit) {
MutexLock lock(&handler_list_mtx);
- if (!exit_callbacks.push_back(unit))
- return -1;
- return 0;
+ if (exit_callbacks.push_back(unit))
+ return 0;
+ return -1;
}
+} // namespace
+
+extern "C" {
+
// TODO: Handle the last dso handle argument.
-extern "C" int __cxa_atexit(AtExitCallback *callback, void *payload, void *) {
+int __cxa_atexit(AtExitCallback *callback, void *payload, void *) {
return add_atexit_unit({callback, payload});
}
+// TODO: Handle the dso handle argument. call_exit_callbacks should only invoke
+// the callbacks from this DSO. Requires adding support for __dso_handle.
+void __cxa_finalize(void *dso) {
+ if (!dso)
+ call_exit_callbacks();
+}
+
+} // extern "C"
+
LLVM_LIBC_FUNCTION(int, atexit, (StdCAtExitCallback * callback)) {
return add_atexit_unit(
{&stdc_at_exit_func, reinterpret_cast<void *>(callback)});
diff --git a/libc/src/stdlib/exit.cpp b/libc/src/stdlib/exit.cpp
index cc5ae6648d11..ba87bffaeb54 100644
--- a/libc/src/stdlib/exit.cpp
+++ b/libc/src/stdlib/exit.cpp
@@ -10,16 +10,13 @@
#include "src/__support/OSUtil/quick_exit.h"
#include "src/__support/common.h"
-namespace LIBC_NAMESPACE {
+extern "C" void __cxa_finalize(void *);
-namespace internal {
-void call_exit_callbacks();
-}
+namespace LIBC_NAMESPACE {
-LLVM_LIBC_FUNCTION(void, exit, (int status)) {
- internal::call_exit_callbacks();
+[[noreturn]] LLVM_LIBC_FUNCTION(void, exit, (int status)) {
+ __cxa_finalize(nullptr);
quick_exit(status);
- __builtin_unreachable();
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdlib/str_from_util.h b/libc/src/stdlib/str_from_util.h
index c4c1c0a0ba4e..58afa98afc08 100644
--- a/libc/src/stdlib/str_from_util.h
+++ b/libc/src/stdlib/str_from_util.h
@@ -11,7 +11,7 @@
// %{a,A,e,E,f,F,g,G}, are not allowed and any code that does otherwise results
// in undefined behaviour(including use of a '%%' conversion specifier); which
// in this case is that the buffer string is simply populated with the format
-// string. The case of the input being NULL should be handled in the calling
+// string. The case of the input being nullptr should be handled in the calling
// function (strfromf, strfromd, strfroml) itself.
#ifndef LLVM_LIBC_SRC_STDLIB_STRFROM_UTIL_H
diff --git a/libc/src/stdlib/strfromd.cpp b/libc/src/stdlib/strfromd.cpp
new file mode 100644
index 000000000000..329f6fdcaff7
--- /dev/null
+++ b/libc/src/stdlib/strfromd.cpp
@@ -0,0 +1,39 @@
+//===-- Implementation of strfromd ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdlib/strfromd.h"
+#include "src/stdlib/str_from_util.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, strfromd,
+ (char *__restrict s, size_t n, const char *__restrict format,
+ double fp)) {
+ LIBC_ASSERT(s != nullptr);
+
+ printf_core::FormatSection section =
+ internal::parse_format_string(format, fp);
+ printf_core::WriteBuffer wb(s, (n > 0 ? n - 1 : 0));
+ printf_core::Writer writer(&wb);
+
+ int result = 0;
+ if (section.has_conv)
+ result = internal::strfromfloat_convert<double>(&writer, section);
+ else
+ result = writer.write(section.raw_string);
+
+ if (result < 0)
+ return result;
+
+ if (n > 0)
+ wb.buff[wb.buff_cur] = '\0';
+
+ return writer.get_chars_written();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdlib/strfromd.h b/libc/src/stdlib/strfromd.h
new file mode 100644
index 000000000000..d2c3fefb6300
--- /dev/null
+++ b/libc/src/stdlib/strfromd.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for strfromd ------------------------*- C++--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_STRFROMD_H
+#define LLVM_LIBC_SRC_STDLIB_STRFROMD_H
+
+#include <stddef.h>
+
+namespace LIBC_NAMESPACE {
+
+int strfromd(char *__restrict s, size_t n, const char *__restrict format,
+ double fp);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_STRFROMD_H
diff --git a/libc/src/stdlib/strfromf.cpp b/libc/src/stdlib/strfromf.cpp
index 40eff87eb454..80e1d74797c8 100644
--- a/libc/src/stdlib/strfromf.cpp
+++ b/libc/src/stdlib/strfromf.cpp
@@ -9,9 +9,6 @@
#include "src/stdlib/strfromf.h"
#include "src/stdlib/str_from_util.h"
-#include <stdarg.h>
-#include <stddef.h>
-
namespace LIBC_NAMESPACE {
LLVM_LIBC_FUNCTION(int, strfromf,
diff --git a/libc/src/stdlib/strfromf.h b/libc/src/stdlib/strfromf.h
index b551a58af05a..492c2c33cf08 100644
--- a/libc/src/stdlib/strfromf.h
+++ b/libc/src/stdlib/strfromf.h
@@ -18,4 +18,4 @@ int strfromf(char *__restrict s, size_t n, const char *__restrict format,
} // namespace LIBC_NAMESPACE
-#endif // LLVM_LIBC_SRC_STDLIB_STRTOF_H
+#endif // LLVM_LIBC_SRC_STDLIB_STRFROMF_H
diff --git a/libc/src/stdlib/strfroml.cpp b/libc/src/stdlib/strfroml.cpp
new file mode 100644
index 000000000000..f0bc9354c7ad
--- /dev/null
+++ b/libc/src/stdlib/strfroml.cpp
@@ -0,0 +1,44 @@
+//===-- Implementation of strfroml ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/stdlib/strfroml.h"
+#include "src/stdlib/str_from_util.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, strfroml,
+ (char *__restrict s, size_t n, const char *__restrict format,
+ long double fp)) {
+ LIBC_ASSERT(s != nullptr);
+
+ printf_core::FormatSection section =
+ internal::parse_format_string(format, fp);
+
+ // To ensure that the conversion function actually uses long double,
+ // the length modifier has to be set to LenghtModifier::L
+ section.length_modifier = printf_core::LengthModifier::L;
+
+ printf_core::WriteBuffer wb(s, (n > 0 ? n - 1 : 0));
+ printf_core::Writer writer(&wb);
+
+ int result = 0;
+ if (section.has_conv)
+ result = internal::strfromfloat_convert<long double>(&writer, section);
+ else
+ result = writer.write(section.raw_string);
+
+ if (result < 0)
+ return result;
+
+ if (n > 0)
+ wb.buff[wb.buff_cur] = '\0';
+
+ return writer.get_chars_written();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/stdlib/strfroml.h b/libc/src/stdlib/strfroml.h
new file mode 100644
index 000000000000..e99d035e4da6
--- /dev/null
+++ b/libc/src/stdlib/strfroml.h
@@ -0,0 +1,21 @@
+//===-- Implementation header for strfroml ------------------------*- C++--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_STDLIB_STRFROML_H
+#define LLVM_LIBC_SRC_STDLIB_STRFROML_H
+
+#include <stddef.h>
+
+namespace LIBC_NAMESPACE {
+
+int strfroml(char *__restrict s, size_t n, const char *__restrict format,
+ long double fp);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_STDLIB_STRFROML_H
diff --git a/libc/src/stdlib/strtod.cpp b/libc/src/stdlib/strtod.cpp
index db5e0edefb5b..461f7feb5bf6 100644
--- a/libc/src/stdlib/strtod.cpp
+++ b/libc/src/stdlib/strtod.cpp
@@ -19,7 +19,7 @@ LLVM_LIBC_FUNCTION(double, strtod,
if (result.has_error())
libc_errno = result.error;
- if (str_end != NULL)
+ if (str_end != nullptr)
*str_end = const_cast<char *>(str + result.parsed_len);
return result.value;
diff --git a/libc/src/stdlib/strtof.cpp b/libc/src/stdlib/strtof.cpp
index 2cc8829f63d3..554d096879c5 100644
--- a/libc/src/stdlib/strtof.cpp
+++ b/libc/src/stdlib/strtof.cpp
@@ -19,7 +19,7 @@ LLVM_LIBC_FUNCTION(float, strtof,
if (result.has_error())
libc_errno = result.error;
- if (str_end != NULL)
+ if (str_end != nullptr)
*str_end = const_cast<char *>(str + result.parsed_len);
return result.value;
diff --git a/libc/src/stdlib/strtold.cpp b/libc/src/stdlib/strtold.cpp
index 7378963f21b2..9c3e1db90067 100644
--- a/libc/src/stdlib/strtold.cpp
+++ b/libc/src/stdlib/strtold.cpp
@@ -19,7 +19,7 @@ LLVM_LIBC_FUNCTION(long double, strtold,
if (result.has_error())
libc_errno = result.error;
- if (str_end != NULL)
+ if (str_end != nullptr)
*str_end = const_cast<char *>(str + result.parsed_len);
return result.value;
diff --git a/libc/src/string/memory_utils/generic/builtin.h b/libc/src/string/memory_utils/generic/builtin.h
index 5239329f653b..ba4f4b898408 100644
--- a/libc/src/string/memory_utils/generic/builtin.h
+++ b/libc/src/string/memory_utils/generic/builtin.h
@@ -10,16 +10,16 @@
#define LLVM_LIBC_SRC_STRING_MEMORY_UTILS_GENERIC_BUILTIN_H
#include "src/__support/macros/attributes.h" // LIBC_INLINE
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
#include "src/string/memory_utils/utils.h" // Ptr, CPtr
#include <stddef.h> // size_t
namespace LIBC_NAMESPACE {
-static_assert(LIBC_HAS_BUILTIN(__builtin_memcpy), "Builtin not defined");
-static_assert(LIBC_HAS_BUILTIN(__builtin_memset), "Builtin not defined");
-static_assert(LIBC_HAS_BUILTIN(__builtin_memmove), "Builtin not defined");
+#if !__has_builtin(__builtin_memcpy) || !__has_builtin(__builtin_memset) || \
+ !__has_builtin(__builtin_memmove)
+#error "Builtin not defined");
+#endif
[[maybe_unused]] LIBC_INLINE void
inline_memcpy_builtin(Ptr dst, CPtr src, size_t count, size_t offset = 0) {
diff --git a/libc/src/string/memory_utils/utils.h b/libc/src/string/memory_utils/utils.h
index 79526d19c6b3..b3e1a26ad996 100644
--- a/libc/src/string/memory_utils/utils.h
+++ b/libc/src/string/memory_utils/utils.h
@@ -14,7 +14,6 @@
#include "src/__support/CPP/type_traits.h"
#include "src/__support/endian.h"
#include "src/__support/macros/attributes.h" // LIBC_INLINE
-#include "src/__support/macros/config.h" // LIBC_HAS_BUILTIN
#include "src/__support/macros/properties/architectures.h"
#include <stddef.h> // size_t
@@ -71,11 +70,11 @@ LIBC_INLINE bool is_disjoint(const void *p1, const void *p2, size_t size) {
return sdiff >= 0 ? size <= udiff : size <= neg_udiff;
}
-#if LIBC_HAS_BUILTIN(__builtin_memcpy_inline)
+#if __has_builtin(__builtin_memcpy_inline)
#define LLVM_LIBC_HAS_BUILTIN_MEMCPY_INLINE
#endif
-#if LIBC_HAS_BUILTIN(__builtin_memset_inline)
+#if __has_builtin(__builtin_memset_inline)
#define LLVM_LIBC_HAS_BUILTIN_MEMSET_INLINE
#endif
diff --git a/libc/src/sys/CMakeLists.txt b/libc/src/sys/CMakeLists.txt
index 57ea7b4beaca..adc666b94202 100644
--- a/libc/src/sys/CMakeLists.txt
+++ b/libc/src/sys/CMakeLists.txt
@@ -7,6 +7,7 @@ add_subdirectory(select)
add_subdirectory(socket)
add_subdirectory(sendfile)
add_subdirectory(stat)
+add_subdirectory(statvfs)
add_subdirectory(utsname)
add_subdirectory(wait)
add_subdirectory(prctl)
diff --git a/libc/src/sys/statvfs/CMakeLists.txt b/libc/src/sys/statvfs/CMakeLists.txt
new file mode 100644
index 000000000000..6a10187a135a
--- /dev/null
+++ b/libc/src/sys/statvfs/CMakeLists.txt
@@ -0,0 +1,17 @@
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
+ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
+endif()
+
+add_entrypoint_object(
+ statvfs
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.statvfs
+)
+
+add_entrypoint_object(
+ fstatvfs
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.fstatvfs
+)
diff --git a/libc/src/sys/statvfs/fstatvfs.h b/libc/src/sys/statvfs/fstatvfs.h
new file mode 100644
index 000000000000..6ca76a459ae5
--- /dev/null
+++ b/libc/src/sys/statvfs/fstatvfs.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fstatvfs ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_SYS_STATVFS_FSTATVFS_H
+#define LLVM_LIBC_SRC_SYS_STATVFS_FSTATVFS_H
+
+#include "llvm-libc-types/struct_statvfs.h"
+
+namespace LIBC_NAMESPACE {
+
+int fstatvfs(int fd, struct statvfs *buf);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SYS_STATVFS_FSTATVFS_H
diff --git a/libc/src/sys/statvfs/linux/CMakeLists.txt b/libc/src/sys/statvfs/linux/CMakeLists.txt
new file mode 100644
index 000000000000..f818863bb470
--- /dev/null
+++ b/libc/src/sys/statvfs/linux/CMakeLists.txt
@@ -0,0 +1,36 @@
+add_header_library(
+ statfs_utils
+ HDRS
+ statfs_utils.h
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.__support.OSUtil.osutil
+ libc.src.__support.common
+ libc.src.__support.CPP.optional
+ libc.include.sys_syscall
+)
+
+add_entrypoint_object(
+ statvfs
+ SRCS
+ statvfs.cpp
+ HDRS
+ ../statvfs.h
+ DEPENDS
+ libc.src.__support.libc_assert
+ libc.include.llvm-libc-types.struct_statvfs
+ .statfs_utils
+)
+
+add_entrypoint_object(
+ fstatvfs
+ SRCS
+ fstatvfs.cpp
+ HDRS
+ ../fstatvfs.h
+ DEPENDS
+ libc.src.__support.libc_assert
+ libc.include.llvm-libc-types.struct_statvfs
+ .statfs_utils
+)
+
diff --git a/libc/src/sys/statvfs/linux/fstatvfs.cpp b/libc/src/sys/statvfs/linux/fstatvfs.cpp
new file mode 100644
index 000000000000..488989abbad7
--- /dev/null
+++ b/libc/src/sys/statvfs/linux/fstatvfs.cpp
@@ -0,0 +1,26 @@
+//===-- Linux implementation of fstatvfs ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/sys/statvfs/fstatvfs.h"
+#include "src/__support/common.h"
+#include "src/__support/libc_assert.h"
+#include "src/sys/statvfs/linux/statfs_utils.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, fstatvfs, (int fd, struct statvfs *buf)) {
+ using namespace statfs_utils;
+ cpp::optional<LinuxStatFs> result = linux_fstatfs(fd);
+ if (result) {
+ LIBC_ASSERT(buf != nullptr);
+ *buf = statfs_to_statvfs(*result);
+ }
+ return result ? 0 : -1;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/sys/statvfs/linux/statfs_utils.h b/libc/src/sys/statvfs/linux/statfs_utils.h
new file mode 100644
index 000000000000..606786a57183
--- /dev/null
+++ b/libc/src/sys/statvfs/linux/statfs_utils.h
@@ -0,0 +1,95 @@
+//===-- Convert Statfs to Statvfs -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_SYS_STATVFS_LINUX_STATFS_TO_STATVFS_H
+#define LLVM_LIBC_SRC_SYS_STATVFS_LINUX_STATFS_TO_STATVFS_H
+
+#include "llvm-libc-types/struct_statvfs.h"
+#include "src/__support/CPP/optional.h"
+#include "src/__support/OSUtil/syscall.h"
+#include "src/__support/macros/attributes.h"
+#include "src/errno/libc_errno.h"
+#include <asm/statfs.h>
+#include <sys/syscall.h>
+namespace LIBC_NAMESPACE {
+
+namespace statfs_utils {
+#ifdef SYS_statfs64
+using LinuxStatFs = statfs64;
+#else
+using LinuxStatFs = statfs;
+#endif
+
+// Linux kernel set an additional flag to f_flags. Libc should mask it out.
+LIBC_INLINE_VAR constexpr decltype(LinuxStatFs::f_flags) ST_VALID = 0x0020;
+
+LIBC_INLINE cpp::optional<LinuxStatFs> linux_statfs(const char *path) {
+ // The kernel syscall routine checks the validity of the path before filling
+ // the statfs structure. So, it is possible that the result is not initialized
+ // after the syscall. Since the struct is trvial, the compiler will generate
+ // pattern filling for the struct.
+ LinuxStatFs result;
+ // On 32-bit platforms, original statfs cannot handle large file systems.
+ // In such cases, SYS_statfs64 is defined and should be used.
+#ifdef SYS_statfs64
+ int ret = syscall_impl<int>(SYS_statfs64, path, sizeof(result), &result);
+#else
+ int ret = syscall_impl<int>(SYS_statfs, path, &result);
+#endif
+ if (ret < 0) {
+ libc_errno = -ret;
+ return cpp::nullopt;
+ }
+ result.f_flags &= ~ST_VALID;
+ return result;
+}
+
+LIBC_INLINE cpp::optional<LinuxStatFs> linux_fstatfs(int fd) {
+ // The kernel syscall routine checks the validity of the path before filling
+ // the statfs structure. So, it is possible that the result is not initialized
+ // after the syscall. Since the struct is trvial, the compiler will generate
+ // pattern filling for the struct.
+ LinuxStatFs result;
+ // On 32-bit platforms, original fstatfs cannot handle large file systems.
+ // In such cases, SYS_fstatfs64 is defined and should be used.
+#ifdef SYS_fstatfs64
+ int ret = syscall_impl<int>(SYS_fstatfs64, fd, sizeof(result), &result);
+#else
+ int ret = syscall_impl<int>(SYS_fstatfs, fd, &result);
+#endif
+ if (ret < 0) {
+ libc_errno = -ret;
+ return cpp::nullopt;
+ }
+ result.f_flags &= ~ST_VALID;
+ return result;
+}
+
+// must use 'struct' tag to refer to type 'statvfs' in this scope. There will be
+// a function in the same namespace with the same name. For consistency, we use
+// struct prefix for all statvfs/statfs related types.
+LIBC_INLINE struct statvfs statfs_to_statvfs(const LinuxStatFs &in) {
+ struct statvfs out;
+ out.f_bsize = in.f_bsize;
+ out.f_frsize = in.f_frsize;
+ out.f_blocks = in.f_blocks;
+ out.f_bfree = in.f_bfree;
+ out.f_bavail = in.f_bavail;
+ out.f_files = in.f_files;
+ out.f_ffree = in.f_ffree;
+ out.f_favail = in.f_ffree;
+ out.f_fsid = in.f_fsid.val[0] |
+ static_cast<decltype(out.f_fsid)>(in.f_fsid.val[1]) << 32;
+ out.f_flag = in.f_flags;
+ out.f_namemax = in.f_namelen;
+ return out;
+}
+} // namespace statfs_utils
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SYS_STATVFS_LINUX_STATFS_TO_STATVFS_H
diff --git a/libc/src/sys/statvfs/linux/statvfs.cpp b/libc/src/sys/statvfs/linux/statvfs.cpp
new file mode 100644
index 000000000000..a438ef1f0117
--- /dev/null
+++ b/libc/src/sys/statvfs/linux/statvfs.cpp
@@ -0,0 +1,28 @@
+//===-- Linux implementation of statvfs -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/sys/statvfs/statvfs.h"
+#include "src/__support/common.h"
+#include "src/__support/libc_assert.h"
+#include "src/sys/statvfs/linux/statfs_utils.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(int, statvfs,
+ (const char *__restrict path,
+ struct statvfs *__restrict buf)) {
+ using namespace statfs_utils;
+ cpp::optional<LinuxStatFs> result = linux_statfs(path);
+ if (result) {
+ LIBC_ASSERT(buf != nullptr);
+ *buf = statfs_to_statvfs(*result);
+ }
+ return result ? 0 : -1;
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/sys/statvfs/statvfs.h b/libc/src/sys/statvfs/statvfs.h
new file mode 100644
index 000000000000..792c7ddd0164
--- /dev/null
+++ b/libc/src/sys/statvfs/statvfs.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for statvfs -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_SYS_STATVFS_STATVFS_H
+#define LLVM_LIBC_SRC_SYS_STATVFS_STATVFS_H
+
+#include "llvm-libc-types/struct_statvfs.h"
+
+namespace LIBC_NAMESPACE {
+
+int statvfs(const char *__restrict path, struct statvfs *__restrict buf);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_SYS_STATVFS_STATVFS_H
diff --git a/libc/test/UnitTest/CMakeLists.txt b/libc/test/UnitTest/CMakeLists.txt
index f7a6f4a91fab..d830d22bb540 100644
--- a/libc/test/UnitTest/CMakeLists.txt
+++ b/libc/test/UnitTest/CMakeLists.txt
@@ -118,6 +118,7 @@ add_unittest_framework_library(
DEPENDS
LibcTest
libc.test.UnitTest.string_utils
+ libc.src.__support.CPP.array
libc.src.__support.FPUtil.fp_bits
libc.src.__support.FPUtil.fpbits_str
libc.src.__support.FPUtil.fenv_impl
diff --git a/libc/test/UnitTest/FPMatcher.h b/libc/test/UnitTest/FPMatcher.h
index ee618a623efe..f4553eac5c8a 100644
--- a/libc/test/UnitTest/FPMatcher.h
+++ b/libc/test/UnitTest/FPMatcher.h
@@ -9,6 +9,7 @@
#ifndef LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
#define LLVM_LIBC_TEST_UNITTEST_FPMATCHER_H
+#include "src/__support/CPP/array.h"
#include "src/__support/CPP/type_traits.h"
#include "src/__support/FPUtil/FEnvImpl.h"
#include "src/__support/FPUtil/FPBits.h"
@@ -105,7 +106,14 @@ template <typename T> struct FPTest : public Test {
const T neg_max_normal = FPBits::max_normal(Sign::NEG).get_val(); \
const T min_denormal = FPBits::min_subnormal(Sign::POS).get_val(); \
const T neg_min_denormal = FPBits::min_subnormal(Sign::NEG).get_val(); \
- const T max_denormal = FPBits::max_subnormal().get_val();
+ const T max_denormal = FPBits::max_subnormal().get_val(); \
+ static constexpr int UNKNOWN_MATH_ROUNDING_DIRECTION = 99; \
+ static constexpr LIBC_NAMESPACE::cpp::array<int, 6> \
+ MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN = { \
+ FP_INT_UPWARD, FP_INT_DOWNWARD, \
+ FP_INT_TOWARDZERO, FP_INT_TONEARESTFROMZERO, \
+ FP_INT_TONEAREST, UNKNOWN_MATH_ROUNDING_DIRECTION, \
+ };
#define EXPECT_FP_EQ(expected, actual) \
EXPECT_THAT(actual, LIBC_NAMESPACE::testing::getMatcher< \
diff --git a/libc/test/UnitTest/PrintfMatcher.cpp b/libc/test/UnitTest/PrintfMatcher.cpp
index 32f3be73307e..c8303815c922 100644
--- a/libc/test/UnitTest/PrintfMatcher.cpp
+++ b/libc/test/UnitTest/PrintfMatcher.cpp
@@ -39,6 +39,10 @@ namespace {
case (LengthModifier::lm): \
tlog << #lm; \
break
+#define CASE_LM_BIT_WIDTH(lm, bw) \
+ case (LengthModifier::lm): \
+ tlog << #lm << "\n\tbit width: :" << bw; \
+ break
static void display(FormatSection form) {
tlog << "Raw String (len " << form.raw_string.size() << "): \"";
@@ -67,6 +71,8 @@ static void display(FormatSection form) {
CASE_LM(z);
CASE_LM(t);
CASE_LM(L);
+ CASE_LM_BIT_WIDTH(w, form.bit_width);
+ CASE_LM_BIT_WIDTH(wf, form.bit_width);
}
tlog << "\n";
tlog << "\tconversion name: " << form.conv_name << "\n";
diff --git a/libc/test/src/__support/CPP/bit_test.cpp b/libc/test/src/__support/CPP/bit_test.cpp
index cee5b90c8f4b..875b47e6a198 100644
--- a/libc/test/src/__support/CPP/bit_test.cpp
+++ b/libc/test/src/__support/CPP/bit_test.cpp
@@ -15,13 +15,6 @@
namespace LIBC_NAMESPACE::cpp {
-using UnsignedTypesNoBigInt = testing::TypeList<
-#if defined(LIBC_TYPES_HAS_INT128)
- __uint128_t,
-#endif // LIBC_TYPES_HAS_INT128
- unsigned char, unsigned short, unsigned int, unsigned long,
- unsigned long long>;
-
using UnsignedTypes = testing::TypeList<
#if defined(LIBC_TYPES_HAS_INT128)
__uint128_t,
@@ -228,7 +221,7 @@ TEST(LlvmLibcBitTest, Rotr) {
rotr<uint64_t>(0x12345678deadbeefULL, -19));
}
-TYPED_TEST(LlvmLibcBitTest, CountOnes, UnsignedTypesNoBigInt) {
+TYPED_TEST(LlvmLibcBitTest, CountOnes, UnsignedTypes) {
EXPECT_EQ(popcount(T(0)), 0);
for (int i = 0; i != cpp::numeric_limits<T>::digits; ++i)
EXPECT_EQ(popcount<T>(cpp::numeric_limits<T>::max() >> i),
diff --git a/libc/test/src/__support/arg_list_test.cpp b/libc/test/src/__support/arg_list_test.cpp
index 1876cf7f70b4..4f229e2bfe69 100644
--- a/libc/test/src/__support/arg_list_test.cpp
+++ b/libc/test/src/__support/arg_list_test.cpp
@@ -120,7 +120,7 @@ TEST(LlvmLibcArgListTest, TestStructTypes) {
}
// Test vector extensions from clang.
-#if LIBC_HAS_ATTRIBUTE(ext_vector_type)
+#if __has_attribute(ext_vector_type)
using int1 = int __attribute__((ext_vector_type(1)));
using int2 = int __attribute__((ext_vector_type(2)));
diff --git a/libc/test/src/__support/fixedvector_test.cpp b/libc/test/src/__support/fixedvector_test.cpp
index a70ebfabed22..4e92081321de 100644
--- a/libc/test/src/__support/fixedvector_test.cpp
+++ b/libc/test/src/__support/fixedvector_test.cpp
@@ -43,3 +43,19 @@ TEST(LlvmLibcFixedVectorTest, Destroy) {
LIBC_NAMESPACE::FixedVector<int, 20>::destroy(&fixed_vector);
ASSERT_TRUE(fixed_vector.empty());
}
+
+TEST(LlvmLibcFixedVectorTest, Iteration) {
+ LIBC_NAMESPACE::FixedVector<int, 20> v;
+ for (int i = 0; i < 3; i++)
+ v.push_back(i);
+ auto it = v.rbegin();
+ ASSERT_EQ(*it, 2);
+ ASSERT_EQ(*++it, 1);
+ ASSERT_EQ(*++it, 0);
+ // TODO: need an overload of Test::test for iterators?
+ // ASSERT_EQ(++it, v.rend());
+ // ASSERT_EQ(v.rbegin(), v.rbegin());
+ ASSERT_TRUE(++it == v.rend());
+ for (auto it = v.rbegin(), e = v.rend(); it != e; ++it)
+ ASSERT_GT(*it, -1);
+}
diff --git a/libc/test/src/__support/math_extras_test.cpp b/libc/test/src/__support/math_extras_test.cpp
index e642248881a4..e88b3e1d6b68 100644
--- a/libc/test/src/__support/math_extras_test.cpp
+++ b/libc/test/src/__support/math_extras_test.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "src/__support/UInt128.h" // UInt128
+#include "src/__support/UInt128.h" // UInt<128>
#include "src/__support/integer_literals.h"
#include "src/__support/math_extras.h"
#include "test/UnitTest/Test.h"
@@ -19,7 +19,7 @@ using UnsignedTypesNoBigInt = testing::TypeList<
__uint128_t,
#endif // LIBC_TYPES_HAS_INT128
unsigned char, unsigned short, unsigned int, unsigned long,
- unsigned long long>;
+ unsigned long long, UInt<128>>;
TEST(LlvmLibcBlockMathExtrasTest, mask_trailing_ones) {
EXPECT_EQ(0_u8, (mask_leading_ones<uint8_t, 0>()));
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 85dacce3b21d..ae2cbad7d5a7 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -166,6 +166,70 @@ add_fp_unittest(
)
add_fp_unittest(
+ canonicalize_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ canonicalize_test.cpp
+ HDRS
+ CanonicalizeTest.h
+ DEPENDS
+ libc.include.math
+ libc.src.math.canonicalize
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.integer_literals
+)
+
+add_fp_unittest(
+ canonicalizef_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ canonicalizef_test.cpp
+ HDRS
+ CanonicalizeTest.h
+ DEPENDS
+ libc.include.math
+ libc.src.math.canonicalizef
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.integer_literals
+)
+
+add_fp_unittest(
+ canonicalizef128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ canonicalizef128_test.cpp
+ HDRS
+ CanonicalizeTest.h
+ DEPENDS
+ libc.include.math
+ libc.src.math.canonicalizef128
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.integer_literals
+)
+
+add_fp_unittest(
+ canonicalizel_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ canonicalizel_test.cpp
+ HDRS
+ CanonicalizeTest.h
+ DEPENDS
+ libc.include.math
+ libc.src.math.canonicalizel
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.integer_literals
+)
+
+add_fp_unittest(
ceil_test
SUITE
libc-math-smoke-tests
@@ -840,6 +904,198 @@ add_fp_unittest(
)
add_fp_unittest(
+ fromfp_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfp_test.cpp
+ HDRS
+ FromfpTest.h
+ DEPENDS
+ libc.src.math.fromfp
+)
+
+add_fp_unittest(
+ fromfpf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpf_test.cpp
+ HDRS
+ FromfpTest.h
+ DEPENDS
+ libc.src.math.fromfpf
+)
+
+add_fp_unittest(
+ fromfpl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpl_test.cpp
+ HDRS
+ FromfpTest.h
+ DEPENDS
+ libc.src.math.fromfpl
+)
+
+add_fp_unittest(
+ fromfpf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpf128_test.cpp
+ HDRS
+ FromfpTest.h
+ DEPENDS
+ libc.src.math.fromfpf128
+)
+
+add_fp_unittest(
+ fromfpx_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpx_test.cpp
+ HDRS
+ FromfpxTest.h
+ DEPENDS
+ libc.src.math.fromfpx
+)
+
+add_fp_unittest(
+ fromfpxf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpxf_test.cpp
+ HDRS
+ FromfpxTest.h
+ DEPENDS
+ libc.src.math.fromfpxf
+)
+
+add_fp_unittest(
+ fromfpxl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpxl_test.cpp
+ HDRS
+ FromfpxTest.h
+ DEPENDS
+ libc.src.math.fromfpxl
+)
+
+add_fp_unittest(
+ fromfpxf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fromfpxf128_test.cpp
+ HDRS
+ FromfpxTest.h
+ DEPENDS
+ libc.src.math.fromfpxf128
+)
+
+add_fp_unittest(
+ ufromfp_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfp_test.cpp
+ HDRS
+ UfromfpTest.h
+ DEPENDS
+ libc.src.math.ufromfp
+)
+
+add_fp_unittest(
+ ufromfpf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpf_test.cpp
+ HDRS
+ UfromfpTest.h
+ DEPENDS
+ libc.src.math.ufromfpf
+)
+
+add_fp_unittest(
+ ufromfpl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpl_test.cpp
+ HDRS
+ UfromfpTest.h
+ DEPENDS
+ libc.src.math.ufromfpl
+)
+
+add_fp_unittest(
+ ufromfpf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpf128_test.cpp
+ HDRS
+ UfromfpTest.h
+ DEPENDS
+ libc.src.math.ufromfpf128
+)
+
+add_fp_unittest(
+ ufromfpx_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpx_test.cpp
+ HDRS
+ UfromfpxTest.h
+ DEPENDS
+ libc.src.math.ufromfpx
+)
+
+add_fp_unittest(
+ ufromfpxf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpxf_test.cpp
+ HDRS
+ UfromfpxTest.h
+ DEPENDS
+ libc.src.math.ufromfpxf
+)
+
+add_fp_unittest(
+ ufromfpxl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpxl_test.cpp
+ HDRS
+ UfromfpxTest.h
+ DEPENDS
+ libc.src.math.ufromfpxl
+)
+
+add_fp_unittest(
+ ufromfpxf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ ufromfpxf128_test.cpp
+ HDRS
+ UfromfpxTest.h
+ DEPENDS
+ libc.src.math.ufromfpxf128
+)
+
+add_fp_unittest(
ilogb_test
SUITE
libc-math-smoke-tests
@@ -1286,6 +1542,424 @@ add_fp_unittest(
)
add_fp_unittest(
+ fmaximuml_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximuml_test.cpp
+ HDRS
+ FMaximumTest.h
+ DEPENDS
+ libc.src.math.fmaximuml
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximumf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximumf128_test.cpp
+ HDRS
+ FMaximumTest.h
+ DEPENDS
+ libc.src.math.fmaximumf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_test.cpp
+ HDRS
+ FMaximumTest.h
+ DEPENDS
+ libc.src.math.fmaximum
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximumf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximumf_test.cpp
+ HDRS
+ FMaximumTest.h
+ DEPENDS
+ libc.src.math.fmaximumf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_numf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_numf_test.cpp
+ HDRS
+ FMaximumNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_numf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_num_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_num_test.cpp
+ HDRS
+ FMaximumNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_num
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_numl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_numl_test.cpp
+ HDRS
+ FMaximumNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_numl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_numf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_numf128_test.cpp
+ HDRS
+ FMaximumNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_numf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_magf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_magf_test.cpp
+ HDRS
+ FMaximumMagTest.h
+ DEPENDS
+ libc.src.math.fmaximum_magf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_mag_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_mag_test.cpp
+ HDRS
+ FMaximumMagTest.h
+ DEPENDS
+ libc.src.math.fmaximum_mag
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_magl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_magl_test.cpp
+ HDRS
+ FMaximumMagTest.h
+ DEPENDS
+ libc.src.math.fmaximum_magl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_magf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_magf128_test.cpp
+ HDRS
+ FMaximumMagTest.h
+ DEPENDS
+ libc.src.math.fmaximum_magf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+
+add_fp_unittest(
+ fmaximum_mag_numf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_mag_numf_test.cpp
+ HDRS
+ FMaximumMagNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_mag_numf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_mag_num_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_mag_num_test.cpp
+ HDRS
+ FMaximumMagNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_mag_num
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_mag_numl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_mag_numl_test.cpp
+ HDRS
+ FMaximumMagNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_mag_numl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fmaximum_mag_numf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmaximum_mag_numf128_test.cpp
+ HDRS
+ FMaximumMagNumTest.h
+ DEPENDS
+ libc.src.math.fmaximum_mag_numf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimuml_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimuml_test.cpp
+ HDRS
+ FMinimumTest.h
+ DEPENDS
+ libc.src.math.fminimuml
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimumf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimumf128_test.cpp
+ HDRS
+ FMinimumTest.h
+ DEPENDS
+ libc.src.math.fminimumf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_test.cpp
+ HDRS
+ FMinimumTest.h
+ DEPENDS
+ libc.src.math.fminimum
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimumf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimumf_test.cpp
+ HDRS
+ FMinimumTest.h
+ DEPENDS
+ libc.src.math.fminimumf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_numf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_numf_test.cpp
+ HDRS
+ FMinimumNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_numf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_num_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_num_test.cpp
+ HDRS
+ FMinimumNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_num
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_numl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_numl_test.cpp
+ HDRS
+ FMinimumNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_numl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_numf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_numf128_test.cpp
+ HDRS
+ FMinimumNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_numf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_magf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_magf_test.cpp
+ HDRS
+ FMinimumMagTest.h
+ DEPENDS
+ libc.src.math.fminimum_magf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_mag_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_mag_test.cpp
+ HDRS
+ FMinimumMagTest.h
+ DEPENDS
+ libc.src.math.fminimum_mag
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_magl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_magl_test.cpp
+ HDRS
+ FMinimumMagTest.h
+ DEPENDS
+ libc.src.math.fminimum_magl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_magf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_magf128_test.cpp
+ HDRS
+ FMinimumMagTest.h
+ DEPENDS
+ libc.src.math.fminimum_magf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+
+add_fp_unittest(
+ fminimum_mag_numf_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_mag_numf_test.cpp
+ HDRS
+ FMinimumMagNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_mag_numf
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_mag_num_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_mag_num_test.cpp
+ HDRS
+ FMinimumMagNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_mag_num
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_mag_numl_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_mag_numl_test.cpp
+ HDRS
+ FMinimumMagNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_mag_numl
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
+ fminimum_mag_numf128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fminimum_mag_numf128_test.cpp
+ HDRS
+ FMinimumMagNumTest.h
+ DEPENDS
+ libc.src.math.fminimum_mag_numf128
+ libc.src.__support.FPUtil.fp_bits
+)
+
+add_fp_unittest(
sqrtf_test
SUITE
libc-math-smoke-tests
diff --git a/libc/test/src/math/smoke/CanonicalizeTest.h b/libc/test/src/math/smoke/CanonicalizeTest.h
new file mode 100644
index 000000000000..4361f7d8ac7a
--- /dev/null
+++ b/libc/test/src/math/smoke/CanonicalizeTest.h
@@ -0,0 +1,209 @@
+//===-- Utility class to test canonicalize[f|l] -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_CANONICALIZETEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_CANONICALIZETEST_H
+
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/integer_literals.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+#include "include/llvm-libc-macros/math-macros.h"
+
+#define TEST_SPECIAL(x, y, expected, expected_exception) \
+ EXPECT_EQ(expected, f(&x, &y)); \
+ EXPECT_FP_EXCEPTION(expected_exception); \
+ LIBC_NAMESPACE::fputil::clear_except(FE_ALL_EXCEPT)
+
+#define TEST_REGULAR(x, y, expected) TEST_SPECIAL(x, y, expected, 0)
+
+using LIBC_NAMESPACE::operator""_u128;
+
+template <typename T>
+class CanonicalizeTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef int (*CanonicalizeFunc)(T *, const T *);
+
+ void testSpecialNumbers(CanonicalizeFunc f) {
+ T cx;
+
+ TEST_SPECIAL(cx, zero, 0, 0);
+ EXPECT_FP_EQ(cx, zero);
+
+ TEST_SPECIAL(cx, neg_zero, 0, 0);
+ EXPECT_FP_EQ(cx, neg_zero);
+
+ TEST_SPECIAL(cx, inf, 0, 0);
+ EXPECT_FP_EQ(cx, inf);
+
+ TEST_SPECIAL(cx, neg_inf, 0, 0);
+ EXPECT_FP_EQ(cx, neg_inf);
+
+ TEST_SPECIAL(cx, sNaN, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+ }
+
+ void testX64_80SpecialNumbers(CanonicalizeFunc f) {
+ if constexpr (LIBC_NAMESPACE::fputil::get_fp_type<T>() ==
+ LIBC_NAMESPACE::fputil::FPType::X86_Binary80) {
+ T cx;
+ // Exponent | Significand | Meaning
+ // | Bits 63-62 | Bits 61-0 |
+ // All Ones | 00 | Zero | Pseudo Infinity, Value = SNaN
+ FPBits test1(0x00000000'00007FFF'00000000'00000000_u128);
+ const T test1_val = test1.get_val();
+ TEST_SPECIAL(cx, test1_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ // Exponent | Significand | Meaning
+ // | Bits 63-62 | Bits 61-0 |
+ // All Ones | 00 | Non-Zero | Pseudo NaN, Value = SNaN
+ FPBits test2_1(0x00000000'00007FFF'00000000'00000001_u128);
+ const T test2_1_val = test2_1.get_val();
+ TEST_SPECIAL(cx, test2_1_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test2_2(0x00000000'00007FFF'00000042'70000001_u128);
+ const T test2_2_val = test2_2.get_val();
+ TEST_SPECIAL(cx, test2_2_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test2_3(0x00000000'00007FFF'00000000'08261001_u128);
+ const T test2_3_val = test2_3.get_val();
+ TEST_SPECIAL(cx, test2_3_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test2_4(0x00000000'00007FFF'00007800'08261001_u128);
+ const T test2_4_val = test2_4.get_val();
+ TEST_SPECIAL(cx, test2_4_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ // Exponent | Significand | Meaning
+ // | Bits 63-62 | Bits 61-0 |
+ // All Ones | 01 | Anything | Pseudo NaN, Value = SNaN
+ FPBits test3_1(0x00000000'00007FFF'40000000'00000000_u128);
+ const T test3_1_val = test3_1.get_val();
+ TEST_SPECIAL(cx, test3_1_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test3_2(0x00000000'00007FFF'40000042'70000001_u128);
+ const T test3_2_val = test3_2.get_val();
+ TEST_SPECIAL(cx, test3_2_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test3_3(0x00000000'00007FFF'40000000'08261001_u128);
+ const T test3_3_val = test3_3.get_val();
+ TEST_SPECIAL(cx, test3_3_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test3_4(0x00000000'00007FFF'40007800'08261001_u128);
+ const T test3_4_val = test3_4.get_val();
+ TEST_SPECIAL(cx, test3_4_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ // Exponent | Significand | Meaning
+ // | Bit 63 | Bits 62-0 |
+ // All zeroes | One | Anything | Pseudo Denormal, Value =
+ // | | | (−1)**s × m × 2**−16382
+ FPBits test4_1(0x00000000'00000000'80000000'00000000_u128);
+ const T test4_1_val = test4_1.get_val();
+ TEST_SPECIAL(cx, test4_1_val, 0, 0);
+ EXPECT_FP_EQ(
+ cx, FPBits::make_value(test4_1.get_explicit_mantissa(), 0).get_val());
+
+ FPBits test4_2(0x00000000'00000000'80000042'70000001_u128);
+ const T test4_2_val = test4_2.get_val();
+ TEST_SPECIAL(cx, test4_2_val, 0, 0);
+ EXPECT_FP_EQ(
+ cx, FPBits::make_value(test4_2.get_explicit_mantissa(), 0).get_val());
+
+ FPBits test4_3(0x00000000'00000000'80000000'08261001_u128);
+ const T test4_3_val = test4_3.get_val();
+ TEST_SPECIAL(cx, test4_3_val, 0, 0);
+ EXPECT_FP_EQ(
+ cx, FPBits::make_value(test4_3.get_explicit_mantissa(), 0).get_val());
+
+ // Exponent | Significand | Meaning
+ // | Bit 63 | Bits 62-0 |
+ // All Other | Zero | Anything | Unnormal, Value = SNaN
+ // Values | | |
+ FPBits test5_1(0x00000000'00000040'00000000'00000001_u128);
+ const T test5_1_val = test5_1.get_val();
+ TEST_SPECIAL(cx, test5_1_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test5_2(0x00000000'00000230'00000042'70000001_u128);
+ const T test5_2_val = test5_2.get_val();
+ TEST_SPECIAL(cx, test5_2_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test5_3(0x00000000'00000560'00000000'08261001_u128);
+ const T test5_3_val = test5_3.get_val();
+ TEST_SPECIAL(cx, test5_3_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test5_4(0x00000000'00000780'00000028'16000000_u128);
+ const T test5_4_val = test5_4.get_val();
+ TEST_SPECIAL(cx, test5_4_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test5_5(0x00000000'00000900'00000042'70000001_u128);
+ const T test5_5_val = test5_5.get_val();
+ TEST_SPECIAL(cx, test5_5_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+
+ FPBits test5_6(0x00000000'00000AB0'00000000'08261001_u128);
+ const T test5_6_val = test5_6.get_val();
+ TEST_SPECIAL(cx, test5_6_val, 1, FE_INVALID);
+ EXPECT_FP_EQ(cx, aNaN);
+ }
+ }
+
+ void testRegularNumbers(CanonicalizeFunc f) {
+ T cx;
+ const T test_var_1 = T(1.0);
+ TEST_REGULAR(cx, test_var_1, 0);
+ EXPECT_FP_EQ(cx, test_var_1);
+ const T test_var_2 = T(-1.0);
+ TEST_REGULAR(cx, test_var_2, 0);
+ EXPECT_FP_EQ(cx, test_var_2);
+ const T test_var_3 = T(10.0);
+ TEST_REGULAR(cx, test_var_3, 0);
+ EXPECT_FP_EQ(cx, test_var_3);
+ const T test_var_4 = T(-10.0);
+ TEST_REGULAR(cx, test_var_4, 0);
+ EXPECT_FP_EQ(cx, test_var_4);
+ const T test_var_5 = T(1234.0);
+ TEST_REGULAR(cx, test_var_5, 0);
+ EXPECT_FP_EQ(cx, test_var_5);
+ const T test_var_6 = T(-1234.0);
+ TEST_REGULAR(cx, test_var_6, 0);
+ EXPECT_FP_EQ(cx, test_var_6);
+ }
+};
+
+#define LIST_CANONICALIZE_TESTS(T, func) \
+ using LlvmLibcCanonicalizeTest = CanonicalizeTest<T>; \
+ TEST_F(LlvmLibcCanonicalizeTest, SpecialNumbers) { \
+ testSpecialNumbers(&func); \
+ } \
+ TEST_F(LlvmLibcCanonicalizeTest, RegularNubmers) { \
+ testRegularNumbers(&func); \
+ }
+
+#define X86_80_SPECIAL_CANONICALIZE_TEST(T, func) \
+ using LlvmLibcCanonicalizeTest = CanonicalizeTest<T>; \
+ TEST_F(LlvmLibcCanonicalizeTest, X64_80SpecialNumbers) { \
+ testX64_80SpecialNumbers(&func); \
+ }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_CANONICALIZETEST_H
diff --git a/libc/test/src/math/smoke/FMaximumMagNumTest.h b/libc/test/src/math/smoke/FMaximumMagNumTest.h
new file mode 100644
index 000000000000..715dd4ed913f
--- /dev/null
+++ b/libc/test/src/math/smoke/FMaximumMagNumTest.h
@@ -0,0 +1,101 @@
+//===-- Utility class to test fmaximum_mag_num[f|l] -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMMAG_NUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMMAG_NUMTEST_H
+
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMaximumMagNumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMaximumMagNumFunc)(T, T);
+
+ void testNaN(FMaximumMagNumFunc func) {
+ EXPECT_FP_EQ(inf, func(aNaN, inf));
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(sNaN, inf), FE_INVALID);
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_inf, sNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, aNaN)).uintval());
+ EXPECT_FP_EQ(0.0, func(aNaN, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(0.0, func(sNaN, 0.0), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(-0.0, func(-0.0, sNaN), FE_INVALID);
+ EXPECT_FP_EQ(T(-1.2345), func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.2345), func(sNaN, T(-1.2345)), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.2345), func(T(1.2345), sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(aNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, aNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, sNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, aNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, sNaN)).uintval());
+ }
+
+ void testInfArg(FMaximumMagNumFunc func) {
+ EXPECT_FP_EQ(inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(inf, func(inf, 0.0));
+ EXPECT_FP_EQ(inf, func(-0.0, inf));
+ EXPECT_FP_EQ(inf, func(inf, T(1.2345)));
+ EXPECT_FP_EQ(inf, func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMaximumMagNumFunc func) {
+ EXPECT_FP_EQ(inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(neg_inf, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(neg_inf, func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMaximumMagNumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMaximumMagNumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (LIBC_NAMESPACE::fputil::abs(x) > LIBC_NAMESPACE::fputil::abs(y)) {
+ EXPECT_FP_EQ(x, func(x, y));
+ } else {
+ EXPECT_FP_EQ(y, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMAXIMUM_MAG_NUM_TESTS(T, func) \
+ using LlvmLibcFMaximumMagNumTest = FMaximumMagNumTest<T>; \
+ TEST_F(LlvmLibcFMaximumMagNumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMaximumMagNumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumMagNumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumMagNumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMaximumMagNumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMMAG_NUMTEST_H
diff --git a/libc/test/src/math/smoke/FMaximumMagTest.h b/libc/test/src/math/smoke/FMaximumMagTest.h
new file mode 100644
index 000000000000..38276e0fe2fd
--- /dev/null
+++ b/libc/test/src/math/smoke/FMaximumMagTest.h
@@ -0,0 +1,89 @@
+//===-- Utility class to test fmaximum_mag[f|l] -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUM_MAGTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUM_MAGTEST_H
+
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMaximumMagTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMaximumMagFunc)(T, T);
+
+ void testNaN(FMaximumMagFunc func) {
+ EXPECT_FP_EQ(aNaN, func(aNaN, inf));
+ EXPECT_FP_EQ(aNaN, func(neg_inf, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, 0.0));
+ EXPECT_FP_EQ(aNaN, func(-0.0, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(aNaN, func(T(1.2345), aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, aNaN));
+ }
+
+ void testInfArg(FMaximumMagFunc func) {
+ EXPECT_FP_EQ(inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(inf, func(inf, 0.0));
+ EXPECT_FP_EQ(inf, func(-0.0, inf));
+ EXPECT_FP_EQ(inf, func(inf, T(1.2345)));
+ EXPECT_FP_EQ(inf, func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMaximumMagFunc func) {
+ EXPECT_FP_EQ(inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(neg_inf, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(neg_inf, func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMaximumMagFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMaximumMagFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (LIBC_NAMESPACE::fputil::abs(x) > LIBC_NAMESPACE::fputil::abs(y)) {
+ EXPECT_FP_EQ(x, func(x, y));
+ } else {
+ EXPECT_FP_EQ(y, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMAXIMUM_MAG_TESTS(T, func) \
+ using LlvmLibcFMaximumMagTest = FMaximumMagTest<T>; \
+ TEST_F(LlvmLibcFMaximumMagTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMaximumMagTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumMagTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumMagTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMaximumMagTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUM_MAGTEST_H
diff --git a/libc/test/src/math/smoke/FMaximumNumTest.h b/libc/test/src/math/smoke/FMaximumNumTest.h
new file mode 100644
index 000000000000..57096f6b614a
--- /dev/null
+++ b/libc/test/src/math/smoke/FMaximumNumTest.h
@@ -0,0 +1,100 @@
+//===-- Utility class to test fmaximum_num[f|l] -----------------*- C++ -*-===//
+//
+// Part Of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMNUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMNUMTEST_H
+
+#include "src/__support/FPUtil/FPBits.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMaximumNumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMaximumNumFunc)(T, T);
+
+ void testNaN(FMaximumNumFunc func) {
+ EXPECT_FP_EQ(inf, func(aNaN, inf));
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(sNaN, inf), FE_INVALID);
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_inf, sNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, aNaN)).uintval());
+ EXPECT_FP_EQ(0.0, func(aNaN, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(0.0, func(sNaN, 0.0), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(-0.0, func(-0.0, sNaN), FE_INVALID);
+ EXPECT_FP_EQ(T(-1.2345), func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.2345), func(sNaN, T(-1.2345)), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.2345), func(T(1.2345), sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(aNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, aNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, sNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, aNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, sNaN)).uintval());
+ }
+
+ void testInfArg(FMaximumNumFunc func) {
+ EXPECT_FP_EQ(inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(inf, func(inf, 0.0));
+ EXPECT_FP_EQ(inf, func(-0.0, inf));
+ EXPECT_FP_EQ(inf, func(inf, T(1.2345)));
+ EXPECT_FP_EQ(inf, func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMaximumNumFunc func) {
+ EXPECT_FP_EQ(inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(0.0, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(T(-1.2345), func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMaximumNumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMaximumNumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (x > y) {
+ EXPECT_FP_EQ(x, func(x, y));
+ } else {
+ EXPECT_FP_EQ(y, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMAXIMUM_NUM_TESTS(T, func) \
+ using LlvmLibcFMaximumNumTest = FMaximumNumTest<T>; \
+ TEST_F(LlvmLibcFMaximumNumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMaximumNumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumNumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumNumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMaximumNumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMNUMTEST_H
diff --git a/libc/test/src/math/smoke/FMaximumTest.h b/libc/test/src/math/smoke/FMaximumTest.h
new file mode 100644
index 000000000000..4db8bb93baae
--- /dev/null
+++ b/libc/test/src/math/smoke/FMaximumTest.h
@@ -0,0 +1,88 @@
+//===-- Utility class to test fmaximum[f|l] ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMaximumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMaximumFunc)(T, T);
+
+ void testNaN(FMaximumFunc func) {
+ EXPECT_FP_EQ(aNaN, func(aNaN, inf));
+ EXPECT_FP_EQ(aNaN, func(neg_inf, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, 0.0));
+ EXPECT_FP_EQ(aNaN, func(-0.0, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(aNaN, func(T(1.2345), aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, aNaN));
+ }
+
+ void testInfArg(FMaximumFunc func) {
+ EXPECT_FP_EQ(inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(inf, func(inf, 0.0));
+ EXPECT_FP_EQ(inf, func(-0.0, inf));
+ EXPECT_FP_EQ(inf, func(inf, T(1.2345)));
+ EXPECT_FP_EQ(inf, func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMaximumFunc func) {
+ EXPECT_FP_EQ(inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(0.0, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(T(-1.2345), func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMaximumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMaximumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (x > y) {
+ EXPECT_FP_EQ(x, func(x, y));
+ } else {
+ EXPECT_FP_EQ(y, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMAXIMUM_TESTS(T, func) \
+ using LlvmLibcFMaximumTest = FMaximumTest<T>; \
+ TEST_F(LlvmLibcFMaximumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMaximumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMaximumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMaximumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMAXIMUMTEST_H
diff --git a/libc/test/src/math/smoke/FMinimumMagNumTest.h b/libc/test/src/math/smoke/FMinimumMagNumTest.h
new file mode 100644
index 000000000000..dec8b70740ca
--- /dev/null
+++ b/libc/test/src/math/smoke/FMinimumMagNumTest.h
@@ -0,0 +1,101 @@
+//===-- Utility class to test fminimum_mag_num[f|l] -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMMAG_NUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMMAG_NUMTEST_H
+
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMinimumMagNumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMinimumMagNumFunc)(T, T);
+
+ void testNaN(FMinimumMagNumFunc func) {
+ EXPECT_FP_EQ(inf, func(aNaN, inf));
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(sNaN, inf), FE_INVALID);
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_inf, sNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, aNaN)).uintval());
+ EXPECT_FP_EQ(0.0, func(aNaN, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(0.0, func(sNaN, 0.0), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(-0.0, func(-0.0, sNaN), FE_INVALID);
+ EXPECT_FP_EQ(T(-1.2345), func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.2345), func(sNaN, T(-1.2345)), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.2345), func(T(1.2345), sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(aNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, aNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, sNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, aNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, sNaN)).uintval());
+ }
+
+ void testInfArg(FMinimumMagNumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(0.0, func(inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, inf));
+ EXPECT_FP_EQ(T(1.2345), func(inf, T(1.2345)));
+ EXPECT_FP_EQ(T(-1.2345), func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMinimumMagNumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(0.0, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(T(-1.2345), func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMinimumMagNumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMinimumMagNumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (LIBC_NAMESPACE::fputil::abs(x) > LIBC_NAMESPACE::fputil::abs(y)) {
+ EXPECT_FP_EQ(y, func(x, y));
+ } else {
+ EXPECT_FP_EQ(x, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMINIMUM_MAG_NUM_TESTS(T, func) \
+ using LlvmLibcFMinimumMagNumTest = FMinimumMagNumTest<T>; \
+ TEST_F(LlvmLibcFMinimumMagNumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMinimumMagNumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumMagNumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumMagNumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMinimumMagNumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMMAG_NUMTEST_H
diff --git a/libc/test/src/math/smoke/FMinimumMagTest.h b/libc/test/src/math/smoke/FMinimumMagTest.h
new file mode 100644
index 000000000000..b11092e5379b
--- /dev/null
+++ b/libc/test/src/math/smoke/FMinimumMagTest.h
@@ -0,0 +1,89 @@
+//===-- Utility class to test fminimum_mag[f|l] -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUM_MAGTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUM_MAGTEST_H
+
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMinimumMagTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMinimumMagFunc)(T, T);
+
+ void testNaN(FMinimumMagFunc func) {
+ EXPECT_FP_EQ(aNaN, func(aNaN, inf));
+ EXPECT_FP_EQ(aNaN, func(neg_inf, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, 0.0));
+ EXPECT_FP_EQ(aNaN, func(-0.0, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(aNaN, func(T(1.2345), aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, aNaN));
+ }
+
+ void testInfArg(FMinimumMagFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(0.0, func(inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, inf));
+ EXPECT_FP_EQ(T(1.2345), func(inf, T(1.2345)));
+ EXPECT_FP_EQ(T(-1.2345), func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMinimumMagFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(0.0, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(T(-1.2345), func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMinimumMagFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMinimumMagFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (LIBC_NAMESPACE::fputil::abs(x) < LIBC_NAMESPACE::fputil::abs(y)) {
+ EXPECT_FP_EQ(x, func(x, y));
+ } else {
+ EXPECT_FP_EQ(y, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMINIMUM_MAG_TESTS(T, func) \
+ using LlvmLibcFMinimumMagTest = FMinimumMagTest<T>; \
+ TEST_F(LlvmLibcFMinimumMagTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMinimumMagTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumMagTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumMagTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMinimumMagTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUM_MAGTEST_H
diff --git a/libc/test/src/math/smoke/FMinimumNumTest.h b/libc/test/src/math/smoke/FMinimumNumTest.h
new file mode 100644
index 000000000000..7fcc291b4c00
--- /dev/null
+++ b/libc/test/src/math/smoke/FMinimumNumTest.h
@@ -0,0 +1,100 @@
+//===-- Utility class to test fminimum_num[f|l] -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMNUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMNUMTEST_H
+
+#include "src/__support/FPUtil/FPBits.h"
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMinimumNumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMinimumNumFunc)(T, T);
+
+ void testNaN(FMinimumNumFunc func) {
+ EXPECT_FP_EQ(inf, func(aNaN, inf));
+ EXPECT_FP_EQ_WITH_EXCEPTION(inf, func(sNaN, inf), FE_INVALID);
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(neg_inf, func(neg_inf, sNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, aNaN)).uintval());
+ EXPECT_FP_EQ(0.0, func(aNaN, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(0.0, func(sNaN, 0.0), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(-0.0, func(-0.0, sNaN), FE_INVALID);
+ EXPECT_FP_EQ(T(-1.2345), func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), aNaN));
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.2345), func(sNaN, T(-1.2345)), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.2345), func(T(1.2345), sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(aNaN, sNaN), FE_INVALID);
+ EXPECT_FP_IS_NAN_WITH_EXCEPTION(func(sNaN, aNaN), FE_INVALID);
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(aNaN, sNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, aNaN)).uintval());
+ EXPECT_EQ(FPBits(aNaN).uintval(), FPBits(func(sNaN, sNaN)).uintval());
+ }
+
+ void testInfArg(FMinimumNumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(0.0, func(inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, inf));
+ EXPECT_FP_EQ(T(1.2345), func(inf, T(1.2345)));
+ EXPECT_FP_EQ(T(-1.2345), func(T(-1.2345), inf));
+ }
+
+ void testNegInfArg(FMinimumNumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(neg_inf, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(neg_inf, func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMinimumNumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMinimumNumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (x > y) {
+ EXPECT_FP_EQ(y, func(x, y));
+ } else {
+ EXPECT_FP_EQ(x, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMINIMUM_NUM_TESTS(T, func) \
+ using LlvmLibcFMinimumNumTest = FMinimumNumTest<T>; \
+ TEST_F(LlvmLibcFMinimumNumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMinimumNumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumNumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumNumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMinimumNumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMNUMTEST_H
diff --git a/libc/test/src/math/smoke/FMinimumTest.h b/libc/test/src/math/smoke/FMinimumTest.h
new file mode 100644
index 000000000000..bc04a6d99356
--- /dev/null
+++ b/libc/test/src/math/smoke/FMinimumTest.h
@@ -0,0 +1,88 @@
+//===-- Utility class to test fminimum[f|l] ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMTEST_H
+#define LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FMinimumTest : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FMinimumFunc)(T, T);
+
+ void testNaN(FMinimumFunc func) {
+ EXPECT_FP_EQ(aNaN, func(aNaN, inf));
+ EXPECT_FP_EQ(aNaN, func(neg_inf, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, 0.0));
+ EXPECT_FP_EQ(aNaN, func(-0.0, aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, T(-1.2345)));
+ EXPECT_FP_EQ(aNaN, func(T(1.2345), aNaN));
+ EXPECT_FP_EQ(aNaN, func(aNaN, aNaN));
+ }
+
+ void testInfArg(FMinimumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, inf));
+ EXPECT_FP_EQ(0.0, func(inf, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, inf));
+ EXPECT_FP_EQ(T(1.2345), func(inf, T(1.2345)));
+ EXPECT_FP_EQ(T(1.2345), func(T(1.2345), inf));
+ }
+
+ void testNegInfArg(FMinimumFunc func) {
+ EXPECT_FP_EQ(neg_inf, func(inf, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, 0.0));
+ EXPECT_FP_EQ(neg_inf, func(-0.0, neg_inf));
+ EXPECT_FP_EQ(neg_inf, func(neg_inf, T(-1.2345)));
+ EXPECT_FP_EQ(neg_inf, func(T(1.2345), neg_inf));
+ }
+
+ void testBothZero(FMinimumFunc func) {
+ EXPECT_FP_EQ(0.0, func(0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, 0.0));
+ EXPECT_FP_EQ(-0.0, func(0.0, -0.0));
+ EXPECT_FP_EQ(-0.0, func(-0.0, -0.0));
+ }
+
+ void testRange(FMinimumFunc func) {
+ constexpr StorageType COUNT = 100'001;
+ constexpr StorageType STEP = STORAGE_MAX / COUNT;
+ for (StorageType i = 0, v = 0, w = STORAGE_MAX; i <= COUNT;
+ ++i, v += STEP, w -= STEP) {
+ FPBits xbits(v), ybits(w);
+ if (xbits.is_inf_or_nan())
+ continue;
+ if (ybits.is_inf_or_nan())
+ continue;
+ T x = xbits.get_val();
+ T y = ybits.get_val();
+ if ((x == 0) && (y == 0))
+ continue;
+
+ if (x > y) {
+ EXPECT_FP_EQ(y, func(x, y));
+ } else {
+ EXPECT_FP_EQ(x, func(x, y));
+ }
+ }
+ }
+};
+
+#define LIST_FMINIMUM_TESTS(T, func) \
+ using LlvmLibcFMinimumTest = FMinimumTest<T>; \
+ TEST_F(LlvmLibcFMinimumTest, NaN) { testNaN(&func); } \
+ TEST_F(LlvmLibcFMinimumTest, InfArg) { testInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumTest, NegInfArg) { testNegInfArg(&func); } \
+ TEST_F(LlvmLibcFMinimumTest, BothZero) { testBothZero(&func); } \
+ TEST_F(LlvmLibcFMinimumTest, Range) { testRange(&func); }
+
+#endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_FMINIMUMTEST_H
diff --git a/libc/test/src/math/smoke/FromfpTest.h b/libc/test/src/math/smoke/FromfpTest.h
new file mode 100644
index 000000000000..d3a61baafda1
--- /dev/null
+++ b/libc/test/src/math/smoke/FromfpTest.h
@@ -0,0 +1,528 @@
+//===-- Utility class to test different flavors of fromfp -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_TEST_SRC_MATH_SMOKE_FROMFPTEST_H
+#define LIBC_TEST_SRC_MATH_SMOKE_FROMFPTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FromfpTestTemplate : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FromfpFunc)(T, int, unsigned int);
+
+ void testSpecialNumbersNonzeroWidth(FromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(zero, func(zero, rnd, 32U));
+ EXPECT_FP_EQ(neg_zero, func(neg_zero, rnd, 32U));
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 32U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testSpecialNumbersZeroWidth(FromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(zero, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_zero, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 0U), FE_INVALID);
+ }
+ }
+
+ void testRoundedNumbersWithinRange(FromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(T(1.0), func(T(1.0), rnd, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.0), rnd, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.0), rnd, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.0), rnd, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.0), rnd, 12U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 65U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.0), rnd, 65U));
+ }
+ }
+
+ void testRoundedNumbersOutsideRange(FromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.0), rnd, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.0), rnd, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.0), rnd, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.0), rnd, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.0), rnd, 11U), FE_INVALID);
+ }
+ }
+
+ void testFractionsUpwardWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(1.0), func(T(0.5), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.115), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.715), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.3), FP_INT_UPWARD, 3U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.3), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_UPWARD, 3U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.5), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_UPWARD, 3U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.75), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.32), FP_INT_UPWARD, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.32), FP_INT_UPWARD, 5U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_UPWARD, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.65), FP_INT_UPWARD, 5U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.38), FP_INT_UPWARD, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.38), FP_INT_UPWARD, 12U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_UPWARD, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.96), FP_INT_UPWARD, 12U));
+ }
+
+ void testFractionsUpwardOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.5), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.115), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.715), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsDownwardWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.5), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.115), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.715), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.715), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.3), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.5), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.5), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.75), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.75), FP_INT_DOWNWARD, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_DOWNWARD, 5U));
+ EXPECT_FP_EQ(T(-11.0), func(T(-10.32), FP_INT_DOWNWARD, 5U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.65), FP_INT_DOWNWARD, 5U));
+ EXPECT_FP_EQ(T(-11.0), func(T(-10.65), FP_INT_DOWNWARD, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_DOWNWARD, 12U));
+ EXPECT_FP_EQ(T(-1235.0), func(T(-1234.38), FP_INT_DOWNWARD, 12U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.96), FP_INT_DOWNWARD, 12U));
+ EXPECT_FP_EQ(T(-1235.0), func(T(-1234.96), FP_INT_DOWNWARD, 12U));
+ }
+
+ void testFractionsDownwardOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsTowardZeroWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.715), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.715), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TOWARDZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.3), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.5), FP_INT_TOWARDZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.75), FP_INT_TOWARDZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.75), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TOWARDZERO, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.32), FP_INT_TOWARDZERO, 5U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.65), FP_INT_TOWARDZERO, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.65), FP_INT_TOWARDZERO, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TOWARDZERO, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.38), FP_INT_TOWARDZERO, 12U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.96), FP_INT_TOWARDZERO, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.96), FP_INT_TOWARDZERO, 12U));
+ }
+
+ void testFractionsTowardZeroOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFromZeroWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(1.0), func(T(0.5), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.5), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.715), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.3), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_TONEARESTFROMZERO, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.5), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_TONEARESTFROMZERO, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.75), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TONEARESTFROMZERO, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.32), FP_INT_TONEARESTFROMZERO, 5U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_TONEARESTFROMZERO, 5U));
+ EXPECT_FP_EQ(T(-11.0), func(T(-10.65), FP_INT_TONEARESTFROMZERO, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TONEARESTFROMZERO, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 12U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_TONEARESTFROMZERO, 12U));
+ EXPECT_FP_EQ(T(-1235.0), func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 12U));
+ }
+
+ void testFractionsToNearestFromZeroOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.715), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.3), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), FP_INT_TONEARESTFROMZERO, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), FP_INT_TONEARESTFROMZERO, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ }
+
+ void testFractionsToNearestWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.715), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.3), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.5), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.75), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TONEAREST, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.32), FP_INT_TONEAREST, 5U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_TONEAREST, 5U));
+ EXPECT_FP_EQ(T(-11.0), func(T(-10.65), FP_INT_TONEAREST, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TONEAREST, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.38), FP_INT_TONEAREST, 12U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_TONEAREST, 12U));
+ EXPECT_FP_EQ(T(-1235.0), func(T(-1234.96), FP_INT_TONEAREST, 12U));
+
+ EXPECT_FP_EQ(T(2.0), func(T(2.3), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-2.3), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(2.5), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-2.5), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(3.0), func(T(2.75), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(-3.0), func(T(-2.75), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(5.0), func(T(5.3), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(-5.0), func(T(-5.3), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.5), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(-6.0), func(T(-5.5), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.75), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(-6.0), func(T(-5.75), FP_INT_TONEAREST, 4U));
+ }
+
+ void testFractionsToNearestOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.715), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.3), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.3), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.3), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.5), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.5), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.75), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.75), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFallbackWithinRange(FromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U));
+ EXPECT_FP_EQ(T(-10.0),
+ func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U));
+ EXPECT_FP_EQ(T(-11.0),
+ func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U));
+ EXPECT_FP_EQ(T(1234.0),
+ func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U));
+ EXPECT_FP_EQ(T(-1234.0),
+ func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U));
+ EXPECT_FP_EQ(T(1235.0),
+ func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U));
+ EXPECT_FP_EQ(T(-1235.0),
+ func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U));
+
+ EXPECT_FP_EQ(T(2.0), func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(-2.0), func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(3.0), func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(-3.0), func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(5.0), func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(-5.0), func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(-6.0), func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(-6.0), func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ }
+
+ void testFractionsToNearestFallbackOutsideRange(FromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ }
+};
+
+#define LIST_FROMFP_TESTS(T, func) \
+ using LlvmLibcFromfpTest = FromfpTestTemplate<T>; \
+ TEST_F(LlvmLibcFromfpTest, SpecialNumbersNonzeroWidth) { \
+ testSpecialNumbersNonzeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, SpecialNumbersZeroWidth) { \
+ testSpecialNumbersZeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, RoundedNumbersWithinRange) { \
+ testRoundedNumbersWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, RoundedNumbersOutsideRange) { \
+ testRoundedNumbersOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsUpwardWithinRange) { \
+ testFractionsUpwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsUpwardOutsideRange) { \
+ testFractionsUpwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsDownwardWithinRange) { \
+ testFractionsDownwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsDownwardOutsideRange) { \
+ testFractionsDownwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsTowardZeroWithinRange) { \
+ testFractionsTowardZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsTowardZeroOutsideRange) { \
+ testFractionsTowardZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestFromZeroWithinRange) { \
+ testFractionsToNearestFromZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestFromZeroOutsideRange) { \
+ testFractionsToNearestFromZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestWithinRange) { \
+ testFractionsToNearestWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestOutsideRange) { \
+ testFractionsToNearestOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestFallbackWithinRange) { \
+ testFractionsToNearestFallbackWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpTest, FractionsToNearestFallbackOutsideRange) { \
+ testFractionsToNearestFallbackOutsideRange(&func); \
+ }
+
+#endif // LIBC_TEST_SRC_MATH_SMOKE_FROMFPTEST_H
diff --git a/libc/test/src/math/smoke/FromfpxTest.h b/libc/test/src/math/smoke/FromfpxTest.h
new file mode 100644
index 000000000000..f3a1680b05aa
--- /dev/null
+++ b/libc/test/src/math/smoke/FromfpxTest.h
@@ -0,0 +1,690 @@
+//===-- Utility class to test different flavors of fromfpx ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_TEST_SRC_MATH_SMOKE_FROMFPXTEST_H
+#define LIBC_TEST_SRC_MATH_SMOKE_FROMFPXTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class FromfpxTestTemplate : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*FromfpxFunc)(T, int, unsigned int);
+
+ void testSpecialNumbersNonzeroWidth(FromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(zero, func(zero, rnd, 32U));
+ EXPECT_FP_EQ(neg_zero, func(neg_zero, rnd, 32U));
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 32U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testSpecialNumbersZeroWidth(FromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(zero, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_zero, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 0U), FE_INVALID);
+ }
+ }
+
+ void testRoundedNumbersWithinRange(FromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(T(1.0), func(T(1.0), rnd, 2U));
+ EXPECT_FP_EQ(T(-1.0), func(T(-1.0), rnd, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.0), rnd, 5U));
+ EXPECT_FP_EQ(T(-10.0), func(T(-10.0), rnd, 5U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 12U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.0), rnd, 12U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 65U));
+ EXPECT_FP_EQ(T(-1234.0), func(T(-1234.0), rnd, 65U));
+ }
+ }
+
+ void testRoundedNumbersOutsideRange(FromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.0), rnd, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.0), rnd, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.0), rnd, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.0), rnd, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.0), rnd, 11U), FE_INVALID);
+ }
+ }
+
+ void testFractionsUpwardWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.5), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.115), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.715), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.715), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.3), FP_INT_UPWARD, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.3), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.5), FP_INT_UPWARD, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.5), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.75), FP_INT_UPWARD, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.75), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.32), FP_INT_UPWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-10.0), func(T(-10.32), FP_INT_UPWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.65), FP_INT_UPWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-10.0), func(T(-10.65), FP_INT_UPWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1235.0), func(T(1234.38), FP_INT_UPWARD, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.38), FP_INT_UPWARD, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1235.0), func(T(1234.96), FP_INT_UPWARD, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.96), FP_INT_UPWARD, 12U), FE_INEXACT);
+ }
+
+ void testFractionsUpwardOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.5), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.115), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.715), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_UPWARD, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_UPWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_UPWARD, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsDownwardWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-0.5), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-0.115), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.715), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-0.715), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-1.3), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.5), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-1.5), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.75), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-1.75), FP_INT_DOWNWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_DOWNWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-11.0), func(T(-10.32), FP_INT_DOWNWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.65), FP_INT_DOWNWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-11.0), func(T(-10.65), FP_INT_DOWNWARD, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_DOWNWARD, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1235.0), func(T(-1234.38), FP_INT_DOWNWARD, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.96), FP_INT_DOWNWARD, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1235.0), func(T(-1234.96), FP_INT_DOWNWARD, 12U), FE_INEXACT);
+ }
+
+ void testFractionsDownwardOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_DOWNWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_DOWNWARD, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_DOWNWARD, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsTowardZeroWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.715), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.715), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_TOWARDZERO, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.3), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.5), FP_INT_TOWARDZERO, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.75), FP_INT_TOWARDZERO, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.75), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_TOWARDZERO, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-10.0), func(T(-10.32), FP_INT_TOWARDZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.65), FP_INT_TOWARDZERO, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-10.0), func(T(-10.65), FP_INT_TOWARDZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TOWARDZERO, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.38), FP_INT_TOWARDZERO, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.96), FP_INT_TOWARDZERO, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.96), FP_INT_TOWARDZERO, 12U), FE_INEXACT);
+ }
+
+ void testFractionsTowardZeroOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TOWARDZERO, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TOWARDZERO, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TOWARDZERO, 11U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFromZeroWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.5), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1.0), func(T(-0.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.115), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.115), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.715), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1.0), func(T(-0.715), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(1.3), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1.0), func(T(-1.3), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.5), FP_INT_TONEARESTFROMZERO, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-1.5), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.75), FP_INT_TONEARESTFROMZERO, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-1.75), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(10.0), func(T(10.32), FP_INT_TONEARESTFROMZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-10.0), func(T(-10.32), FP_INT_TONEARESTFROMZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(11.0), func(T(10.65), FP_INT_TONEARESTFROMZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-11.0), func(T(-10.65), FP_INT_TONEARESTFROMZERO, 5U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TONEARESTFROMZERO, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), FP_INT_TONEARESTFROMZERO, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1235.0), func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 12U),
+ FE_INEXACT);
+ }
+
+ void testFractionsToNearestFromZeroOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.715), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.3), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), FP_INT_TONEARESTFROMZERO, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), FP_INT_TONEARESTFROMZERO, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), FP_INT_TONEARESTFROMZERO, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 11U), FE_INVALID);
+ }
+
+ void testFractionsToNearestWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.715), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-0.715), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-1.0), func(T(-1.3), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.5), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-1.5), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.75), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-1.75), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_TONEAREST, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-10.0), func(T(-10.32), FP_INT_TONEAREST, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.65), FP_INT_TONEAREST, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-11.0), func(T(-10.65), FP_INT_TONEAREST, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TONEAREST, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.38), FP_INT_TONEAREST, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), FP_INT_TONEAREST, 12U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1235.0), func(T(-1234.96), FP_INT_TONEAREST, 12U), FE_INEXACT);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(2.3), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-2.3), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(2.5), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-2.0), func(T(-2.5), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(3.0), func(T(2.75), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-3.0), func(T(-2.75), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(5.0), func(T(5.3), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-5.0), func(T(-5.3), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(6.0), func(T(5.5), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-6.0), func(T(-5.5), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(6.0), func(T(5.75), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-6.0), func(T(-5.75), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ }
+
+ void testFractionsToNearestOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(0.715), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TONEAREST, 4U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TONEAREST, 11U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.3), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.3), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.3), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.5), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.5), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.75), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.75), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFallbackWithinRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1.0), func(T(-0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1.0), func(T(-1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(10.0), func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-10.0), func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(11.0), func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-11.0), func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 5U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1234.0), func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-1235.0), func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 12U),
+ FE_INEXACT);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-2.0), func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(3.0), func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-3.0), func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(5.0), func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-5.0), func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(6.0), func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-6.0), func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(6.0), func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-6.0), func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U),
+ FE_INEXACT);
+ }
+
+ void testFractionsToNearestFallbackOutsideRange(FromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ }
+};
+
+#define LIST_FROMFPX_TESTS(T, func) \
+ using LlvmLibcFromfpxTest = FromfpxTestTemplate<T>; \
+ TEST_F(LlvmLibcFromfpxTest, SpecialNumbersNonzeroWidth) { \
+ testSpecialNumbersNonzeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, SpecialNumbersZeroWidth) { \
+ testSpecialNumbersZeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, RoundedNumbersWithinRange) { \
+ testRoundedNumbersWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, RoundedNumbersOutsideRange) { \
+ testRoundedNumbersOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsUpwardWithinRange) { \
+ testFractionsUpwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsUpwardOutsideRange) { \
+ testFractionsUpwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsDownwardWithinRange) { \
+ testFractionsDownwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsDownwardOutsideRange) { \
+ testFractionsDownwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsTowardZeroWithinRange) { \
+ testFractionsTowardZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsTowardZeroOutsideRange) { \
+ testFractionsTowardZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestFromZeroWithinRange) { \
+ testFractionsToNearestFromZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestFromZeroOutsideRange) { \
+ testFractionsToNearestFromZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestWithinRange) { \
+ testFractionsToNearestWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestOutsideRange) { \
+ testFractionsToNearestOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestFallbackWithinRange) { \
+ testFractionsToNearestFallbackWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcFromfpxTest, FractionsToNearestFallbackOutsideRange) { \
+ testFractionsToNearestFallbackOutsideRange(&func); \
+ }
+
+#endif // LIBC_TEST_SRC_MATH_SMOKE_FROMFPXTEST_H
diff --git a/libc/test/src/math/smoke/UfromfpTest.h b/libc/test/src/math/smoke/UfromfpTest.h
new file mode 100644
index 000000000000..9ad1e6dce945
--- /dev/null
+++ b/libc/test/src/math/smoke/UfromfpTest.h
@@ -0,0 +1,462 @@
+//===-- Utility class to test different flavors of ufromfp ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_TEST_SRC_MATH_SMOKE_UFROMFPTEST_H
+#define LIBC_TEST_SRC_MATH_SMOKE_UFROMFPTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class UfromfpTestTemplate : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*UfromfpFunc)(T, int, unsigned int);
+
+ void testSpecialNumbersNonzeroWidth(UfromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(zero, func(zero, rnd, 32U));
+ EXPECT_FP_EQ(neg_zero, func(neg_zero, rnd, 32U));
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 32U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testSpecialNumbersZeroWidth(UfromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(zero, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_zero, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 0U), FE_INVALID);
+ }
+ }
+
+ void testRoundedNumbersWithinRange(UfromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(T(1.0), func(T(1.0), rnd, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.0), rnd, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 11U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 64U));
+ }
+ }
+
+ void testRoundedNumbersOutsideRange(UfromfpFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.0), rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.0), rnd, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.0), rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.0), rnd, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.0), rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testFractionsUpwardWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(1.0), func(T(0.5), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.115), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.715), FP_INT_UPWARD, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.3), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_UPWARD, 2U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.32), FP_INT_UPWARD, 4U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_UPWARD, 4U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.38), FP_INT_UPWARD, 11U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_UPWARD, 11U));
+ }
+
+ void testFractionsUpwardOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_UPWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_UPWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsDownwardWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.715), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.5), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.75), FP_INT_DOWNWARD, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_DOWNWARD, 4U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.65), FP_INT_DOWNWARD, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_DOWNWARD, 11U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.96), FP_INT_DOWNWARD, 11U));
+ }
+
+ void testFractionsDownwardOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.5), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.115), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.715), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_DOWNWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_DOWNWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_DOWNWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_DOWNWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsTowardZeroWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.715), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.715), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.5), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.75), FP_INT_TOWARDZERO, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TOWARDZERO, 4U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.65), FP_INT_TOWARDZERO, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TOWARDZERO, 11U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.96), FP_INT_TOWARDZERO, 11U));
+ }
+
+ void testFractionsTowardZeroOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TOWARDZERO, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TOWARDZERO, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TOWARDZERO, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TOWARDZERO, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFromZeroWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(1.0), func(T(0.5), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TONEARESTFROMZERO, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_TONEARESTFROMZERO, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TONEARESTFROMZERO, 4U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_TONEARESTFROMZERO, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TONEARESTFROMZERO, 11U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_TONEARESTFROMZERO, 11U));
+ }
+
+ void testFractionsToNearestFromZeroOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.5), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.715), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.3), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), FP_INT_TONEARESTFROMZERO, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), FP_INT_TONEARESTFROMZERO, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), FP_INT_TONEARESTFROMZERO, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), FP_INT_TONEARESTFROMZERO, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ }
+
+ void testFractionsToNearestWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), FP_INT_TONEAREST, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), FP_INT_TONEAREST, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.38), FP_INT_TONEAREST, 11U));
+ EXPECT_FP_EQ(T(1235.0), func(T(1234.96), FP_INT_TONEAREST, 11U));
+
+ EXPECT_FP_EQ(T(2.0), func(T(2.3), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(2.5), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(3.0), func(T(2.75), FP_INT_TONEAREST, 2U));
+ EXPECT_FP_EQ(T(5.0), func(T(5.3), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.5), FP_INT_TONEAREST, 3U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.75), FP_INT_TONEAREST, 3U));
+ }
+
+ void testFractionsToNearestOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.715), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TONEAREST, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TONEAREST, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.3), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFallbackWithinRange(UfromfpFunc func) {
+ EXPECT_FP_EQ(T(0.0), func(T(0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(0.0), func(T(0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(-0.0), func(T(-0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(1.0), func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(11.0), func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U));
+ EXPECT_FP_EQ(T(1234.0),
+ func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U));
+ EXPECT_FP_EQ(T(1235.0),
+ func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U));
+
+ EXPECT_FP_EQ(T(2.0), func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(2.0), func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(3.0), func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U));
+ EXPECT_FP_EQ(T(5.0), func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ EXPECT_FP_EQ(T(6.0), func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U));
+ }
+
+ void testFractionsToNearestFallbackOutsideRange(UfromfpFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ }
+};
+
+#define LIST_UFROMFP_TESTS(T, func) \
+ using LlvmLibcUfromfpTest = UfromfpTestTemplate<T>; \
+ TEST_F(LlvmLibcUfromfpTest, SpecialNumbersNonzeroWidth) { \
+ testSpecialNumbersNonzeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, SpecialNumbersZeroWidth) { \
+ testSpecialNumbersZeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, RoundedNumbersWithinRange) { \
+ testRoundedNumbersWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, RoundedNumbersOutsideRange) { \
+ testRoundedNumbersOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsUpwardWithinRange) { \
+ testFractionsUpwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsUpwardOutsideRange) { \
+ testFractionsUpwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsDownwardWithinRange) { \
+ testFractionsDownwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsDownwardOutsideRange) { \
+ testFractionsDownwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsTowardZeroWithinRange) { \
+ testFractionsTowardZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsTowardZeroOutsideRange) { \
+ testFractionsTowardZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestFromZeroWithinRange) { \
+ testFractionsToNearestFromZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestFromZeroOutsideRange) { \
+ testFractionsToNearestFromZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestWithinRange) { \
+ testFractionsToNearestWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestOutsideRange) { \
+ testFractionsToNearestOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestFallbackWithinRange) { \
+ testFractionsToNearestFallbackWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpTest, FractionsToNearestFallbackOutsideRange) { \
+ testFractionsToNearestFallbackOutsideRange(&func); \
+ }
+
+#endif // LIBC_TEST_SRC_MATH_SMOKE_UFROMFPTEST_H
diff --git a/libc/test/src/math/smoke/UfromfpxTest.h b/libc/test/src/math/smoke/UfromfpxTest.h
new file mode 100644
index 000000000000..09163b8adfa5
--- /dev/null
+++ b/libc/test/src/math/smoke/UfromfpxTest.h
@@ -0,0 +1,551 @@
+//===-- Utility class to test different flavors of ufromfpx -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_TEST_SRC_MATH_SMOKE_UFROMFPXTEST_H
+#define LIBC_TEST_SRC_MATH_SMOKE_UFROMFPXTEST_H
+
+#include "test/UnitTest/FPMatcher.h"
+#include "test/UnitTest/Test.h"
+
+template <typename T>
+class UfromfpxTestTemplate : public LIBC_NAMESPACE::testing::Test {
+
+ DECLARE_SPECIAL_CONSTANTS(T)
+
+public:
+ typedef T (*UfromfpxFunc)(T, int, unsigned int);
+
+ void testSpecialNumbersNonzeroWidth(UfromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(zero, func(zero, rnd, 32U));
+ EXPECT_FP_EQ(neg_zero, func(neg_zero, rnd, 32U));
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 32U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testSpecialNumbersZeroWidth(UfromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(zero, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_zero, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(inf, rnd, 0U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(neg_inf, rnd, 0U), FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(aNaN, rnd, 0U), FE_INVALID);
+ }
+ }
+
+ void testRoundedNumbersWithinRange(UfromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ(T(1.0), func(T(1.0), rnd, 1U));
+ EXPECT_FP_EQ(T(10.0), func(T(10.0), rnd, 4U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 11U));
+ EXPECT_FP_EQ(T(1234.0), func(T(1234.0), rnd, 64U));
+ }
+ }
+
+ void testRoundedNumbersOutsideRange(UfromfpxFunc func) {
+ for (int rnd : MATH_ROUNDING_DIRECTIONS_INCLUDING_UNKNOWN) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.0), rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.0), rnd, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.0), rnd, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.0), rnd, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.0), rnd, 32U), FE_INVALID);
+ }
+ }
+
+ void testFractionsUpwardWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.5), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.115), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.715), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.715), FP_INT_UPWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.3), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.5), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.75), FP_INT_UPWARD, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.32), FP_INT_UPWARD, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.65), FP_INT_UPWARD, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1235.0), func(T(1234.38), FP_INT_UPWARD, 11U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1235.0), func(T(1234.96), FP_INT_UPWARD, 11U),
+ FE_INEXACT);
+ }
+
+ void testFractionsUpwardOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.3), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_UPWARD, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_UPWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_UPWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_UPWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_UPWARD, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsDownwardWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.715), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.5), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.75), FP_INT_DOWNWARD, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_DOWNWARD, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.65), FP_INT_DOWNWARD, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_DOWNWARD, 11U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.96), FP_INT_DOWNWARD, 11U), FE_INEXACT);
+ }
+
+ void testFractionsDownwardOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.5), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.115), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.715), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_DOWNWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_DOWNWARD, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_DOWNWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_DOWNWARD, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_DOWNWARD, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsTowardZeroWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.715), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.715), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.5), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.75), FP_INT_TOWARDZERO, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_TOWARDZERO, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.65), FP_INT_TOWARDZERO, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TOWARDZERO, 11U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.96), FP_INT_TOWARDZERO, 11U), FE_INEXACT);
+ }
+
+ void testFractionsTowardZeroOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TOWARDZERO, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TOWARDZERO, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TOWARDZERO, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TOWARDZERO, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TOWARDZERO, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFromZeroWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.115), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.115), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.715), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(1.3), FP_INT_TONEARESTFROMZERO, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.5), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.75), FP_INT_TONEARESTFROMZERO, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(10.0), func(T(10.32), FP_INT_TONEARESTFROMZERO, 4U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(11.0), func(T(10.65), FP_INT_TONEARESTFROMZERO, 4U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TONEARESTFROMZERO, 11U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), FP_INT_TONEARESTFROMZERO, 11U), FE_INEXACT);
+ }
+
+ void testFractionsToNearestFromZeroOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.5), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.715), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.3), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), FP_INT_TONEARESTFROMZERO, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), FP_INT_TONEARESTFROMZERO, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), FP_INT_TONEARESTFROMZERO, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), FP_INT_TONEARESTFROMZERO, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), FP_INT_TONEARESTFROMZERO, 10U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), FP_INT_TONEARESTFROMZERO, 32U), FE_INVALID);
+ }
+
+ void testFractionsToNearestWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.5), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.5), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(0.0), func(T(0.115), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(-0.0), func(T(-0.115), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(0.715), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(1.0), func(T(1.3), FP_INT_TONEAREST, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.5), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(1.75), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(10.0), func(T(10.32), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(11.0), func(T(10.65), FP_INT_TONEAREST, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), FP_INT_TONEAREST, 11U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), FP_INT_TONEAREST, 11U), FE_INEXACT);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(2.3), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(2.0), func(T(2.5), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(3.0), func(T(2.75), FP_INT_TONEAREST, 2U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(5.0), func(T(5.3), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(6.0), func(T(5.5), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(T(6.0), func(T(5.75), FP_INT_TONEAREST, 3U),
+ FE_INEXACT);
+ }
+
+ void testFractionsToNearestOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-0.715), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.32), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.32), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(10.65), FP_INT_TONEAREST, 3U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-10.65), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.38), FP_INT_TONEAREST, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.38), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(1234.96), FP_INT_TONEAREST, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-1234.96), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.3), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.5), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(2.75), FP_INT_TONEAREST, 1U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-2.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.3), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.3), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.5), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.5), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(5.75), FP_INT_TONEAREST, 2U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, func(T(-5.75), FP_INT_TONEAREST, 32U),
+ FE_INVALID);
+ }
+
+ void testFractionsToNearestFallbackWithinRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(0.0), func(T(0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(-0.0), func(T(-0.115), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1.0), func(T(1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(10.0), func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(11.0), func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 4U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1234.0), func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(1235.0), func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 11U),
+ FE_INEXACT);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(2.0), func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(3.0), func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(5.0), func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(6.0), func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ T(6.0), func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INEXACT);
+ }
+
+ void testFractionsToNearestFallbackOutsideRange(UfromfpxFunc func) {
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-0.715), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.32), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 3U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-10.65), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.38), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 10U),
+ FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-1234.96), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U),
+ FE_INVALID);
+
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 1U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-2.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.3), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.5), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 2U), FE_INVALID);
+ EXPECT_FP_EQ_WITH_EXCEPTION(
+ aNaN, func(T(-5.75), UNKNOWN_MATH_ROUNDING_DIRECTION, 32U), FE_INVALID);
+ }
+};
+
+#define LIST_UFROMFPX_TESTS(T, func) \
+ using LlvmLibcUfromfpxTest = UfromfpxTestTemplate<T>; \
+ TEST_F(LlvmLibcUfromfpxTest, SpecialNumbersNonzeroWidth) { \
+ testSpecialNumbersNonzeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, SpecialNumbersZeroWidth) { \
+ testSpecialNumbersZeroWidth(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, RoundedNumbersWithinRange) { \
+ testRoundedNumbersWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, RoundedNumbersOutsideRange) { \
+ testRoundedNumbersOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsUpwardWithinRange) { \
+ testFractionsUpwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsUpwardOutsideRange) { \
+ testFractionsUpwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsDownwardWithinRange) { \
+ testFractionsDownwardWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsDownwardOutsideRange) { \
+ testFractionsDownwardOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsTowardZeroWithinRange) { \
+ testFractionsTowardZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsTowardZeroOutsideRange) { \
+ testFractionsTowardZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestFromZeroWithinRange) { \
+ testFractionsToNearestFromZeroWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestFromZeroOutsideRange) { \
+ testFractionsToNearestFromZeroOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestWithinRange) { \
+ testFractionsToNearestWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestOutsideRange) { \
+ testFractionsToNearestOutsideRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestFallbackWithinRange) { \
+ testFractionsToNearestFallbackWithinRange(&func); \
+ } \
+ TEST_F(LlvmLibcUfromfpxTest, FractionsToNearestFallbackOutsideRange) { \
+ testFractionsToNearestFallbackOutsideRange(&func); \
+ }
+
+#endif // LIBC_TEST_SRC_MATH_SMOKE_UFROMFPXTEST_H
diff --git a/libc/test/src/math/smoke/canonicalize_test.cpp b/libc/test/src/math/smoke/canonicalize_test.cpp
new file mode 100644
index 000000000000..54a1ddd49ca4
--- /dev/null
+++ b/libc/test/src/math/smoke/canonicalize_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for canonicalize ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CanonicalizeTest.h"
+
+#include "src/math/canonicalize.h"
+
+LIST_CANONICALIZE_TESTS(double, LIBC_NAMESPACE::canonicalize)
diff --git a/libc/test/src/math/smoke/canonicalizef128_test.cpp b/libc/test/src/math/smoke/canonicalizef128_test.cpp
new file mode 100644
index 000000000000..242e2331a29e
--- /dev/null
+++ b/libc/test/src/math/smoke/canonicalizef128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for canonicalizef128 ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CanonicalizeTest.h"
+
+#include "src/math/canonicalizef128.h"
+
+LIST_CANONICALIZE_TESTS(float128, LIBC_NAMESPACE::canonicalizef128)
diff --git a/libc/test/src/math/smoke/canonicalizef_test.cpp b/libc/test/src/math/smoke/canonicalizef_test.cpp
new file mode 100644
index 000000000000..17cf3c3639a4
--- /dev/null
+++ b/libc/test/src/math/smoke/canonicalizef_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for canonicalizef ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CanonicalizeTest.h"
+
+#include "src/math/canonicalizef.h"
+
+LIST_CANONICALIZE_TESTS(float, LIBC_NAMESPACE::canonicalizef)
diff --git a/libc/test/src/math/smoke/canonicalizel_test.cpp b/libc/test/src/math/smoke/canonicalizel_test.cpp
new file mode 100644
index 000000000000..23cb21dce84e
--- /dev/null
+++ b/libc/test/src/math/smoke/canonicalizel_test.cpp
@@ -0,0 +1,19 @@
+//===-- Unittests for canonicalizel ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CanonicalizeTest.h"
+
+#include "src/math/canonicalizel.h"
+
+LIST_CANONICALIZE_TESTS(long double, LIBC_NAMESPACE::canonicalizel)
+
+#ifdef LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80
+
+X86_80_SPECIAL_CANONICALIZE_TEST(long double, LIBC_NAMESPACE::canonicalizel)
+
+#endif // LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80
diff --git a/libc/test/src/math/smoke/fmaximum_mag_num_test.cpp b/libc/test/src/math/smoke/fmaximum_mag_num_test.cpp
new file mode 100644
index 000000000000..16ec1b0ea343
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_mag_num_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_mag_num-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMaximumMagNumTest.h"
+
+#include "src/math/fmaximum_mag_num.h"
+
+LIST_FMAXIMUM_MAG_NUM_TESTS(double, LIBC_NAMESPACE::fmaximum_mag_num)
diff --git a/libc/test/src/math/smoke/fmaximum_mag_numf128_test.cpp b/libc/test/src/math/smoke/fmaximum_mag_numf128_test.cpp
new file mode 100644
index 000000000000..17f2a24f7fd0
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_mag_numf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_mag_numf128---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagNumTest.h"
+
+#include "src/math/fmaximum_mag_numf128.h"
+
+LIST_FMAXIMUM_MAG_NUM_TESTS(float128, LIBC_NAMESPACE::fmaximum_mag_numf128)
diff --git a/libc/test/src/math/smoke/fmaximum_mag_numf_test.cpp b/libc/test/src/math/smoke/fmaximum_mag_numf_test.cpp
new file mode 100644
index 000000000000..a8a46f96970e
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_mag_numf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_mag_numf------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagNumTest.h"
+
+#include "src/math/fmaximum_mag_numf.h"
+
+LIST_FMAXIMUM_MAG_NUM_TESTS(float, LIBC_NAMESPACE::fmaximum_mag_numf)
diff --git a/libc/test/src/math/smoke/fmaximum_mag_numl_test.cpp b/libc/test/src/math/smoke/fmaximum_mag_numl_test.cpp
new file mode 100644
index 000000000000..c03fa20bd367
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_mag_numl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_mag_numl------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagNumTest.h"
+
+#include "src/math/fmaximum_mag_numl.h"
+
+LIST_FMAXIMUM_MAG_NUM_TESTS(long double, LIBC_NAMESPACE::fmaximum_mag_numl)
diff --git a/libc/test/src/math/smoke/fmaximum_mag_test.cpp b/libc/test/src/math/smoke/fmaximum_mag_test.cpp
new file mode 100644
index 000000000000..e70602f044ec
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_mag_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_mag-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMaximumMagTest.h"
+
+#include "src/math/fmaximum_mag.h"
+
+LIST_FMAXIMUM_MAG_TESTS(double, LIBC_NAMESPACE::fmaximum_mag)
diff --git a/libc/test/src/math/smoke/fmaximum_magf128_test.cpp b/libc/test/src/math/smoke/fmaximum_magf128_test.cpp
new file mode 100644
index 000000000000..d7ae8ec8d7a0
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_magf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_magf128-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagTest.h"
+
+#include "src/math/fmaximum_magf128.h"
+
+LIST_FMAXIMUM_MAG_TESTS(float128, LIBC_NAMESPACE::fmaximum_magf128)
diff --git a/libc/test/src/math/smoke/fmaximum_magf_test.cpp b/libc/test/src/math/smoke/fmaximum_magf_test.cpp
new file mode 100644
index 000000000000..efca320b2b37
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_magf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_magf----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagTest.h"
+
+#include "src/math/fmaximum_magf.h"
+
+LIST_FMAXIMUM_MAG_TESTS(float, LIBC_NAMESPACE::fmaximum_magf)
diff --git a/libc/test/src/math/smoke/fmaximum_magl_test.cpp b/libc/test/src/math/smoke/fmaximum_magl_test.cpp
new file mode 100644
index 000000000000..16b420b8714b
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_magl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_magl----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumMagTest.h"
+
+#include "src/math/fmaximum_magl.h"
+
+LIST_FMAXIMUM_MAG_TESTS(long double, LIBC_NAMESPACE::fmaximum_magl)
diff --git a/libc/test/src/math/smoke/fmaximum_num_test.cpp b/libc/test/src/math/smoke/fmaximum_num_test.cpp
new file mode 100644
index 000000000000..cb9afdf78b83
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_num_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_num-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMaximumNumTest.h"
+
+#include "src/math/fmaximum_num.h"
+
+LIST_FMAXIMUM_NUM_TESTS(double, LIBC_NAMESPACE::fmaximum_num)
diff --git a/libc/test/src/math/smoke/fmaximum_numf128_test.cpp b/libc/test/src/math/smoke/fmaximum_numf128_test.cpp
new file mode 100644
index 000000000000..6855ea3b39a9
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_numf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_numf128-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumNumTest.h"
+
+#include "src/math/fmaximum_numf128.h"
+
+LIST_FMAXIMUM_NUM_TESTS(float128, LIBC_NAMESPACE::fmaximum_numf128)
diff --git a/libc/test/src/math/smoke/fmaximum_numf_test.cpp b/libc/test/src/math/smoke/fmaximum_numf_test.cpp
new file mode 100644
index 000000000000..053d18803e07
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_numf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_numf----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumNumTest.h"
+
+#include "src/math/fmaximum_numf.h"
+
+LIST_FMAXIMUM_NUM_TESTS(float, LIBC_NAMESPACE::fmaximum_numf)
diff --git a/libc/test/src/math/smoke/fmaximum_numl_test.cpp b/libc/test/src/math/smoke/fmaximum_numl_test.cpp
new file mode 100644
index 000000000000..bf9612c4111d
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_numl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum_numl----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumNumTest.h"
+
+#include "src/math/fmaximum_numl.h"
+
+LIST_FMAXIMUM_NUM_TESTS(long double, LIBC_NAMESPACE::fmaximum_numl)
diff --git a/libc/test/src/math/smoke/fmaximum_test.cpp b/libc/test/src/math/smoke/fmaximum_test.cpp
new file mode 100644
index 000000000000..990e0177d868
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximum_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximum---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMaximumTest.h"
+
+#include "src/math/fmaximum.h"
+
+LIST_FMAXIMUM_TESTS(double, LIBC_NAMESPACE::fmaximum)
diff --git a/libc/test/src/math/smoke/fmaximumf128_test.cpp b/libc/test/src/math/smoke/fmaximumf128_test.cpp
new file mode 100644
index 000000000000..7e0b97bcdd4d
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximumf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximumf128-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumTest.h"
+
+#include "src/math/fmaximumf128.h"
+
+LIST_FMAXIMUM_TESTS(float128, LIBC_NAMESPACE::fmaximumf128)
diff --git a/libc/test/src/math/smoke/fmaximumf_test.cpp b/libc/test/src/math/smoke/fmaximumf_test.cpp
new file mode 100644
index 000000000000..a92bbd1446e7
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximumf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximumf--------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumTest.h"
+
+#include "src/math/fmaximumf.h"
+
+LIST_FMAXIMUM_TESTS(float, LIBC_NAMESPACE::fmaximumf)
diff --git a/libc/test/src/math/smoke/fmaximuml_test.cpp b/libc/test/src/math/smoke/fmaximuml_test.cpp
new file mode 100644
index 000000000000..080847b9f4de
--- /dev/null
+++ b/libc/test/src/math/smoke/fmaximuml_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmaximuml--------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMaximumTest.h"
+
+#include "src/math/fmaximuml.h"
+
+LIST_FMAXIMUM_TESTS(long double, LIBC_NAMESPACE::fmaximuml)
diff --git a/libc/test/src/math/smoke/fminimum_mag_num_test.cpp b/libc/test/src/math/smoke/fminimum_mag_num_test.cpp
new file mode 100644
index 000000000000..471f2ceb3c2c
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_mag_num_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_mag_num-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMinimumMagNumTest.h"
+
+#include "src/math/fminimum_mag_num.h"
+
+LIST_FMINIMUM_MAG_NUM_TESTS(double, LIBC_NAMESPACE::fminimum_mag_num)
diff --git a/libc/test/src/math/smoke/fminimum_mag_numf128_test.cpp b/libc/test/src/math/smoke/fminimum_mag_numf128_test.cpp
new file mode 100644
index 000000000000..f1db025f4895
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_mag_numf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_mag_numf128---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagNumTest.h"
+
+#include "src/math/fminimum_mag_numf128.h"
+
+LIST_FMINIMUM_MAG_NUM_TESTS(float128, LIBC_NAMESPACE::fminimum_mag_numf128)
diff --git a/libc/test/src/math/smoke/fminimum_mag_numf_test.cpp b/libc/test/src/math/smoke/fminimum_mag_numf_test.cpp
new file mode 100644
index 000000000000..773ba806b99e
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_mag_numf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_mag_numf------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagNumTest.h"
+
+#include "src/math/fminimum_mag_numf.h"
+
+LIST_FMINIMUM_MAG_NUM_TESTS(float, LIBC_NAMESPACE::fminimum_mag_numf)
diff --git a/libc/test/src/math/smoke/fminimum_mag_numl_test.cpp b/libc/test/src/math/smoke/fminimum_mag_numl_test.cpp
new file mode 100644
index 000000000000..b4152779db73
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_mag_numl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_mag_numl------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagNumTest.h"
+
+#include "src/math/fminimum_mag_numl.h"
+
+LIST_FMINIMUM_MAG_NUM_TESTS(long double, LIBC_NAMESPACE::fminimum_mag_numl)
diff --git a/libc/test/src/math/smoke/fminimum_mag_test.cpp b/libc/test/src/math/smoke/fminimum_mag_test.cpp
new file mode 100644
index 000000000000..f4138f3353d1
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_mag_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_mag-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMinimumMagTest.h"
+
+#include "src/math/fminimum_mag.h"
+
+LIST_FMINIMUM_MAG_TESTS(double, LIBC_NAMESPACE::fminimum_mag)
diff --git a/libc/test/src/math/smoke/fminimum_magf128_test.cpp b/libc/test/src/math/smoke/fminimum_magf128_test.cpp
new file mode 100644
index 000000000000..010ee6e73bee
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_magf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_magf128-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagTest.h"
+
+#include "src/math/fminimum_magf128.h"
+
+LIST_FMINIMUM_MAG_TESTS(float128, LIBC_NAMESPACE::fminimum_magf128)
diff --git a/libc/test/src/math/smoke/fminimum_magf_test.cpp b/libc/test/src/math/smoke/fminimum_magf_test.cpp
new file mode 100644
index 000000000000..aa2743148930
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_magf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_magf----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagTest.h"
+
+#include "src/math/fminimum_magf.h"
+
+LIST_FMINIMUM_MAG_TESTS(float, LIBC_NAMESPACE::fminimum_magf)
diff --git a/libc/test/src/math/smoke/fminimum_magl_test.cpp b/libc/test/src/math/smoke/fminimum_magl_test.cpp
new file mode 100644
index 000000000000..c6fbf7df0e70
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_magl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_magl----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumMagTest.h"
+
+#include "src/math/fminimum_magl.h"
+
+LIST_FMINIMUM_MAG_TESTS(long double, LIBC_NAMESPACE::fminimum_magl)
diff --git a/libc/test/src/math/smoke/fminimum_num_test.cpp b/libc/test/src/math/smoke/fminimum_num_test.cpp
new file mode 100644
index 000000000000..1be7ebb5fe8e
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_num_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_num-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMinimumNumTest.h"
+
+#include "src/math/fminimum_num.h"
+
+LIST_FMINIMUM_NUM_TESTS(double, LIBC_NAMESPACE::fminimum_num)
diff --git a/libc/test/src/math/smoke/fminimum_numf128_test.cpp b/libc/test/src/math/smoke/fminimum_numf128_test.cpp
new file mode 100644
index 000000000000..d773d8973286
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_numf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_numf128-------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumNumTest.h"
+
+#include "src/math/fminimum_numf128.h"
+
+LIST_FMINIMUM_NUM_TESTS(float128, LIBC_NAMESPACE::fminimum_numf128)
diff --git a/libc/test/src/math/smoke/fminimum_numf_test.cpp b/libc/test/src/math/smoke/fminimum_numf_test.cpp
new file mode 100644
index 000000000000..9b60f39d55a8
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_numf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_numf----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumNumTest.h"
+
+#include "src/math/fminimum_numf.h"
+
+LIST_FMINIMUM_NUM_TESTS(float, LIBC_NAMESPACE::fminimum_numf)
diff --git a/libc/test/src/math/smoke/fminimum_numl_test.cpp b/libc/test/src/math/smoke/fminimum_numl_test.cpp
new file mode 100644
index 000000000000..8a72cd8adc5b
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_numl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum_numl----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumNumTest.h"
+
+#include "src/math/fminimum_numl.h"
+
+LIST_FMINIMUM_NUM_TESTS(long double, LIBC_NAMESPACE::fminimum_numl)
diff --git a/libc/test/src/math/smoke/fminimum_test.cpp b/libc/test/src/math/smoke/fminimum_test.cpp
new file mode 100644
index 000000000000..7778f1c2311c
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimum_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimum---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "FMinimumTest.h"
+
+#include "src/math/fminimum.h"
+
+LIST_FMINIMUM_TESTS(double, LIBC_NAMESPACE::fminimum)
diff --git a/libc/test/src/math/smoke/fminimumf128_test.cpp b/libc/test/src/math/smoke/fminimumf128_test.cpp
new file mode 100644
index 000000000000..163090b8a9ec
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimumf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimumf128-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumTest.h"
+
+#include "src/math/fminimumf128.h"
+
+LIST_FMINIMUM_TESTS(float128, LIBC_NAMESPACE::fminimumf128)
diff --git a/libc/test/src/math/smoke/fminimumf_test.cpp b/libc/test/src/math/smoke/fminimumf_test.cpp
new file mode 100644
index 000000000000..2ca0f2f35a7f
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimumf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimumf--------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumTest.h"
+
+#include "src/math/fminimumf.h"
+
+LIST_FMINIMUM_TESTS(float, LIBC_NAMESPACE::fminimumf)
diff --git a/libc/test/src/math/smoke/fminimuml_test.cpp b/libc/test/src/math/smoke/fminimuml_test.cpp
new file mode 100644
index 000000000000..3c067ae23f45
--- /dev/null
+++ b/libc/test/src/math/smoke/fminimuml_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fminimuml--------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FMinimumTest.h"
+
+#include "src/math/fminimuml.h"
+
+LIST_FMINIMUM_TESTS(long double, LIBC_NAMESPACE::fminimuml)
diff --git a/libc/test/src/math/smoke/fromfp_test.cpp b/libc/test/src/math/smoke/fromfp_test.cpp
new file mode 100644
index 000000000000..147a9df9afcf
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfp_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpTest.h"
+
+#include "src/math/fromfp.h"
+
+LIST_FROMFP_TESTS(double, LIBC_NAMESPACE::fromfp)
diff --git a/libc/test/src/math/smoke/fromfpf128_test.cpp b/libc/test/src/math/smoke/fromfpf128_test.cpp
new file mode 100644
index 000000000000..288aadb359bf
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpf128 ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpTest.h"
+
+#include "src/math/fromfpf128.h"
+
+LIST_FROMFP_TESTS(float128, LIBC_NAMESPACE::fromfpf128)
diff --git a/libc/test/src/math/smoke/fromfpf_test.cpp b/libc/test/src/math/smoke/fromfpf_test.cpp
new file mode 100644
index 000000000000..63f3f624716e
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpf ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpTest.h"
+
+#include "src/math/fromfpf.h"
+
+LIST_FROMFP_TESTS(float, LIBC_NAMESPACE::fromfpf)
diff --git a/libc/test/src/math/smoke/fromfpl_test.cpp b/libc/test/src/math/smoke/fromfpl_test.cpp
new file mode 100644
index 000000000000..c0072768870b
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpl ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpTest.h"
+
+#include "src/math/fromfpl.h"
+
+LIST_FROMFP_TESTS(long double, LIBC_NAMESPACE::fromfpl)
diff --git a/libc/test/src/math/smoke/fromfpx_test.cpp b/libc/test/src/math/smoke/fromfpx_test.cpp
new file mode 100644
index 000000000000..10b1eee726e1
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpx_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpx ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpxTest.h"
+
+#include "src/math/fromfpx.h"
+
+LIST_FROMFPX_TESTS(double, LIBC_NAMESPACE::fromfpx)
diff --git a/libc/test/src/math/smoke/fromfpxf128_test.cpp b/libc/test/src/math/smoke/fromfpxf128_test.cpp
new file mode 100644
index 000000000000..2839bed30cb7
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpxf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpxf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpxTest.h"
+
+#include "src/math/fromfpxf128.h"
+
+LIST_FROMFPX_TESTS(float128, LIBC_NAMESPACE::fromfpxf128)
diff --git a/libc/test/src/math/smoke/fromfpxf_test.cpp b/libc/test/src/math/smoke/fromfpxf_test.cpp
new file mode 100644
index 000000000000..42e47aba40ae
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpxf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpxf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpxTest.h"
+
+#include "src/math/fromfpxf.h"
+
+LIST_FROMFPX_TESTS(float, LIBC_NAMESPACE::fromfpxf)
diff --git a/libc/test/src/math/smoke/fromfpxl_test.cpp b/libc/test/src/math/smoke/fromfpxl_test.cpp
new file mode 100644
index 000000000000..cbe8d750ff2a
--- /dev/null
+++ b/libc/test/src/math/smoke/fromfpxl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fromfpxl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FromfpxTest.h"
+
+#include "src/math/fromfpxl.h"
+
+LIST_FROMFPX_TESTS(long double, LIBC_NAMESPACE::fromfpxl)
diff --git a/libc/test/src/math/smoke/ufromfp_test.cpp b/libc/test/src/math/smoke/ufromfp_test.cpp
new file mode 100644
index 000000000000..ff4762112400
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfp_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpTest.h"
+
+#include "src/math/ufromfp.h"
+
+LIST_UFROMFP_TESTS(double, LIBC_NAMESPACE::ufromfp)
diff --git a/libc/test/src/math/smoke/ufromfpf128_test.cpp b/libc/test/src/math/smoke/ufromfpf128_test.cpp
new file mode 100644
index 000000000000..9ba3034f6e61
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpf128 -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpTest.h"
+
+#include "src/math/ufromfpf128.h"
+
+LIST_UFROMFP_TESTS(float128, LIBC_NAMESPACE::ufromfpf128)
diff --git a/libc/test/src/math/smoke/ufromfpf_test.cpp b/libc/test/src/math/smoke/ufromfpf_test.cpp
new file mode 100644
index 000000000000..2913b31e20bd
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpf --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpTest.h"
+
+#include "src/math/ufromfpf.h"
+
+LIST_UFROMFP_TESTS(float, LIBC_NAMESPACE::ufromfpf)
diff --git a/libc/test/src/math/smoke/ufromfpl_test.cpp b/libc/test/src/math/smoke/ufromfpl_test.cpp
new file mode 100644
index 000000000000..8976056756e4
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpl --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpTest.h"
+
+#include "src/math/ufromfpl.h"
+
+LIST_UFROMFP_TESTS(long double, LIBC_NAMESPACE::ufromfpl)
diff --git a/libc/test/src/math/smoke/ufromfpx_test.cpp b/libc/test/src/math/smoke/ufromfpx_test.cpp
new file mode 100644
index 000000000000..3bb45e428f28
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpx_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpx --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpxTest.h"
+
+#include "src/math/ufromfpx.h"
+
+LIST_UFROMFPX_TESTS(double, LIBC_NAMESPACE::ufromfpx)
diff --git a/libc/test/src/math/smoke/ufromfpxf128_test.cpp b/libc/test/src/math/smoke/ufromfpxf128_test.cpp
new file mode 100644
index 000000000000..6defaf7fee3d
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpxf128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpxf128 ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpxTest.h"
+
+#include "src/math/ufromfpxf128.h"
+
+LIST_UFROMFPX_TESTS(float128, LIBC_NAMESPACE::ufromfpxf128)
diff --git a/libc/test/src/math/smoke/ufromfpxf_test.cpp b/libc/test/src/math/smoke/ufromfpxf_test.cpp
new file mode 100644
index 000000000000..862fd4c2cdac
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpxf_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpxf -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpxTest.h"
+
+#include "src/math/ufromfpxf.h"
+
+LIST_UFROMFPX_TESTS(float, LIBC_NAMESPACE::ufromfpxf)
diff --git a/libc/test/src/math/smoke/ufromfpxl_test.cpp b/libc/test/src/math/smoke/ufromfpxl_test.cpp
new file mode 100644
index 000000000000..b6b93b38e6be
--- /dev/null
+++ b/libc/test/src/math/smoke/ufromfpxl_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for ufromfpxl -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UfromfpxTest.h"
+
+#include "src/math/ufromfpxl.h"
+
+LIST_UFROMFPX_TESTS(long double, LIBC_NAMESPACE::ufromfpxl)
diff --git a/libc/test/src/stdio/CMakeLists.txt b/libc/test/src/stdio/CMakeLists.txt
index 3ccce16a76a2..4c38e8aba7d7 100644
--- a/libc/test/src/stdio/CMakeLists.txt
+++ b/libc/test/src/stdio/CMakeLists.txt
@@ -354,6 +354,21 @@ if(${LIBC_TARGET_OS} STREQUAL "linux")
libc.src.unistd.access
libc.src.unistd.close
)
+
+ add_libc_test(
+ rename_test
+ SUITE
+ libc_stdio_unittests
+ SRCS
+ rename_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.fcntl.open
+ libc.src.stdio.rename
+ libc.src.unistd.access
+ libc.src.unistd.close
+ libc.test.UnitTest.ErrnoSetterMatcher
+ )
endif()
add_libc_test(
diff --git a/libc/test/src/stdio/printf_core/parser_test.cpp b/libc/test/src/stdio/printf_core/parser_test.cpp
index 0134277c4a1b..66d6dd0a86c4 100644
--- a/libc/test/src/stdio/printf_core/parser_test.cpp
+++ b/libc/test/src/stdio/printf_core/parser_test.cpp
@@ -223,6 +223,42 @@ TEST(LlvmLibcPrintfParserTest, EvalOneArgWithLongLengthModifier) {
ASSERT_PFORMAT_EQ(expected, format_arr[0]);
}
+TEST(LlvmLibcPrintfParserTest, EvalOneArgWithBitWidthLengthModifier) {
+ LIBC_NAMESPACE::printf_core::FormatSection format_arr[10];
+ const char *str = "%w32d";
+ long long arg1 = 12345;
+ evaluate(format_arr, str, arg1);
+
+ LIBC_NAMESPACE::printf_core::FormatSection expected;
+ expected.has_conv = true;
+
+ expected.raw_string = {str, 5};
+ expected.length_modifier = LIBC_NAMESPACE::printf_core::LengthModifier::w;
+ expected.bit_width = 32;
+ expected.conv_val_raw = arg1;
+ expected.conv_name = 'd';
+
+ ASSERT_PFORMAT_EQ(expected, format_arr[0]);
+}
+
+TEST(LlvmLibcPrintfParserTest, EvalOneArgWithFastBitWidthLengthModifier) {
+ LIBC_NAMESPACE::printf_core::FormatSection format_arr[10];
+ const char *str = "%wf32d";
+ long long arg1 = 12345;
+ evaluate(format_arr, str, arg1);
+
+ LIBC_NAMESPACE::printf_core::FormatSection expected;
+ expected.has_conv = true;
+
+ expected.raw_string = {str, 6};
+ expected.length_modifier = LIBC_NAMESPACE::printf_core::LengthModifier::wf;
+ expected.bit_width = 32;
+ expected.conv_val_raw = arg1;
+ expected.conv_name = 'd';
+
+ ASSERT_PFORMAT_EQ(expected, format_arr[0]);
+}
+
TEST(LlvmLibcPrintfParserTest, EvalOneArgWithAllOptions) {
LIBC_NAMESPACE::printf_core::FormatSection format_arr[10];
const char *str = "% -056.78jd";
diff --git a/libc/test/src/stdio/rename_test.cpp b/libc/test/src/stdio/rename_test.cpp
new file mode 100644
index 000000000000..a5dd734c6361
--- /dev/null
+++ b/libc/test/src/stdio/rename_test.cpp
@@ -0,0 +1,51 @@
+//===-- Unittests for rename ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "include/llvm-libc-macros/linux/sys-stat-macros.h"
+#include "include/llvm-libc-macros/linux/unistd-macros.h"
+#include "src/errno/libc_errno.h"
+#include "src/fcntl/open.h"
+#include "src/stdio/rename.h"
+#include "src/unistd/access.h"
+#include "src/unistd/close.h"
+#include "test/UnitTest/ErrnoSetterMatcher.h"
+#include "test/UnitTest/Test.h"
+
+TEST(LlvmLibcRenameTest, CreateAndRenameFile) {
+ // The test strategy is to create a file and rename it, and also verify that
+ // it was renamed.
+ LIBC_NAMESPACE::libc_errno = 0;
+ using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails;
+ using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds;
+
+ constexpr const char *FILENAME0 = "rename.test.file0";
+ auto TEST_FILEPATH0 = libc_make_test_file_path(FILENAME0);
+
+ int fd = LIBC_NAMESPACE::open(TEST_FILEPATH0, O_WRONLY | O_CREAT, S_IRWXU);
+ ASSERT_ERRNO_SUCCESS();
+ ASSERT_GT(fd, 0);
+ ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::access(TEST_FILEPATH0, F_OK), Succeeds(0));
+
+ constexpr const char *FILENAME1 = "rename.test.file1";
+ auto TEST_FILEPATH1 = libc_make_test_file_path(FILENAME1);
+ ASSERT_THAT(LIBC_NAMESPACE::rename(TEST_FILEPATH0, TEST_FILEPATH1),
+ Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::access(TEST_FILEPATH1, F_OK), Succeeds(0));
+ ASSERT_THAT(LIBC_NAMESPACE::access(TEST_FILEPATH0, F_OK), Fails(ENOENT));
+}
+
+TEST(LlvmLibcRenameTest, RenameNonExistent) {
+ using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails;
+
+ constexpr const char *FILENAME1 = "rename.test.file1";
+ auto TEST_FILEPATH1 = libc_make_test_file_path(FILENAME1);
+
+ ASSERT_THAT(LIBC_NAMESPACE::rename("non-existent", TEST_FILEPATH1),
+ Fails(ENOENT));
+}
diff --git a/libc/test/src/stdio/sprintf_test.cpp b/libc/test/src/stdio/sprintf_test.cpp
index 8dde95d02a96..8e9870f71a95 100644
--- a/libc/test/src/stdio/sprintf_test.cpp
+++ b/libc/test/src/stdio/sprintf_test.cpp
@@ -169,6 +169,93 @@ TEST(LlvmLibcSPrintfTest, IntConv) {
EXPECT_EQ(written, 20);
ASSERT_STREQ(buff, "-9223372036854775808"); // ll min
+ written = LIBC_NAMESPACE::sprintf(buff, "%w3d", 5807);
+ EXPECT_EQ(written, 1);
+ ASSERT_STREQ(buff, "7");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w3d", 1);
+ EXPECT_EQ(written, 1);
+ ASSERT_STREQ(buff, "1");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w64d", 9223372036854775807ll);
+ EXPECT_EQ(written, 19);
+ ASSERT_STREQ(buff, "9223372036854775807");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w-1d", 5807);
+ EXPECT_EQ(written, 5);
+ ASSERT_STREQ(buff, "%w-1d");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w0d", 5807);
+ EXPECT_EQ(written, 4);
+ ASSERT_STREQ(buff, "%w0d");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w999d", 9223372036854775807ll);
+ EXPECT_EQ(written, 19);
+ ASSERT_STREQ(buff, "9223372036854775807");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%winvalid%w1d", 5807, 5807);
+ EXPECT_EQ(written, 10);
+ ASSERT_STREQ(buff, "%winvalid1");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w-1d%w1d", 5807, 5807);
+ EXPECT_EQ(written, 6);
+ ASSERT_STREQ(buff, "%w-1d1");
+
+ char format[64];
+ char uintmax[128];
+ LIBC_NAMESPACE::sprintf(format, "%%w%du", sizeof(uintmax_t) * CHAR_BIT);
+ const int uintmax_len =
+ LIBC_NAMESPACE::sprintf(uintmax, "%ju", sizeof(uintmax_t) * CHAR_BIT);
+ written = LIBC_NAMESPACE::sprintf(buff, format, sizeof(uintmax_t) * CHAR_BIT);
+ EXPECT_EQ(written, uintmax_len);
+ ASSERT_STREQ(buff, uintmax);
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%w64u", 18446744073709551615ull);
+ EXPECT_EQ(written, 20);
+ ASSERT_STREQ(buff, "18446744073709551615"); // ull max
+
+ written =
+ LIBC_NAMESPACE::sprintf(buff, "%w64d", -9223372036854775807ll - 1ll);
+ EXPECT_EQ(written, 20);
+ ASSERT_STREQ(buff, "-9223372036854775808"); // ll min
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf3d", 5807);
+ EXPECT_EQ(written, 1);
+ ASSERT_STREQ(buff, "7");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf3d", 1);
+ EXPECT_EQ(written, 1);
+ ASSERT_STREQ(buff, "1");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf64u", 18446744073709551615ull);
+ EXPECT_EQ(written, 20);
+ ASSERT_STREQ(buff, "18446744073709551615"); // ull max
+
+ written =
+ LIBC_NAMESPACE::sprintf(buff, "%wf64d", -9223372036854775807ll - 1ll);
+ EXPECT_EQ(written, 20);
+ ASSERT_STREQ(buff, "-9223372036854775808"); // ll min
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf0d", 5807);
+ EXPECT_EQ(written, 5);
+ ASSERT_STREQ(buff, "%wf0d");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf-1d", 5807);
+ EXPECT_EQ(written, 6);
+ ASSERT_STREQ(buff, "%wf-1d");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wfinvalid%wf1d", 5807, 5807);
+ EXPECT_EQ(written, 11);
+ ASSERT_STREQ(buff, "%wfinvalid1");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf-1d%wf1d", 5807, 5807);
+ EXPECT_EQ(written, 7);
+ ASSERT_STREQ(buff, "%wf-1d1");
+
+ written = LIBC_NAMESPACE::sprintf(buff, "%wf999d", 9223372036854775807ll);
+ EXPECT_EQ(written, 19);
+ ASSERT_STREQ(buff, "9223372036854775807");
+
// Min Width Tests.
written = LIBC_NAMESPACE::sprintf(buff, "%4d", 789);
diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt
index cb42bc56f51c..28c5b566cc47 100644
--- a/libc/test/src/stdlib/CMakeLists.txt
+++ b/libc/test/src/stdlib/CMakeLists.txt
@@ -168,6 +168,15 @@ add_libc_test(
.strtol_test_support
)
+add_header_library(
+ strfrom_test_support
+ HDRS
+ StrfromTest.h
+ DEPENDS
+ libc.src.__support.CPP.type_traits
+ libc.src.__support.FPUtil.fp_bits
+)
+
add_libc_test(
strfromf_test
SUITE
@@ -175,10 +184,33 @@ add_libc_test(
SRCS
strfromf_test.cpp
DEPENDS
+ .strfrom_test_support
libc.src.stdlib.strfromf
)
add_libc_test(
+ strfromd_test
+ SUITE
+ libc-stdlib-tests
+ SRCS
+ strfromd_test.cpp
+ DEPENDS
+ .strfrom_test_support
+ libc.src.stdlib.strfromd
+)
+
+add_libc_test(
+ strfroml_test
+ SUITE
+ libc-stdlib-tests
+ SRCS
+ strfroml_test.cpp
+ DEPENDS
+ .strfrom_test_support
+ libc.src.stdlib.strfroml
+)
+
+add_libc_test(
abs_test
SUITE
libc-stdlib-tests
diff --git a/libc/test/src/stdlib/StrfromTest.h b/libc/test/src/stdlib/StrfromTest.h
new file mode 100644
index 000000000000..0db507ef0716
--- /dev/null
+++ b/libc/test/src/stdlib/StrfromTest.h
@@ -0,0 +1,500 @@
+//===-- A template class for testing strfrom functions ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/CPP/type_traits.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "test/UnitTest/Test.h"
+
+#define ASSERT_STREQ_LEN(actual_written, actual_str, expected_str) \
+ EXPECT_EQ(actual_written, static_cast<int>(sizeof(expected_str) - 1)); \
+ EXPECT_STREQ(actual_str, expected_str);
+
+template <typename InputT>
+class StrfromTest : public LIBC_NAMESPACE::testing::Test {
+
+ static const bool is_single_prec =
+ LIBC_NAMESPACE::cpp::is_same<InputT, float>::value;
+ static const bool is_double_prec =
+ LIBC_NAMESPACE::cpp::is_same<InputT, double>::value;
+
+ using FunctionT = int (*)(char *, size_t, const char *, InputT fp);
+
+public:
+ void floatDecimalFormat(FunctionT func) {
+ if (is_single_prec)
+ floatDecimalSinglePrec(func);
+ else if (is_double_prec)
+ floatDecimalDoublePrec(func);
+ else
+ floatDecimalLongDoublePrec(func);
+ }
+
+ void floatHexExpFormat(FunctionT func) {
+ if (is_single_prec)
+ floatHexExpSinglePrec(func);
+ else if (is_double_prec)
+ floatHexExpDoublePrec(func);
+ else
+ floatHexExpLongDoublePrec(func);
+ }
+
+ void floatDecimalExpFormat(FunctionT func) {
+ if (is_single_prec)
+ floatDecimalExpSinglePrec(func);
+ else if (is_double_prec)
+ floatDecimalExpDoublePrec(func);
+ else
+ floatDecimalExpLongDoublePrec(func);
+ }
+
+ void floatDecimalAutoFormat(FunctionT func) {
+ if (is_single_prec)
+ floatDecimalAutoSinglePrec(func);
+ else if (is_double_prec)
+ floatDecimalAutoDoublePrec(func);
+ else
+ floatDecimalAutoLongDoublePrec(func);
+ }
+
+ void improperFormatString(FunctionT func) {
+ char buff[100];
+ int written;
+ const bool is_long_double = !is_single_prec && !is_double_prec;
+
+ written = func(buff, 37, "A simple string with no conversions.", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "A simple string with no conversions.");
+
+ written =
+ func(buff, 37,
+ "%A simple string with one conversion, should overwrite.", 1.0);
+ if (is_long_double) {
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0X8P-3");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "0X1P+0");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0X1P+0");
+#endif
+ } else {
+ // not long double
+ ASSERT_STREQ_LEN(written, buff, "0X1P+0");
+ }
+ written = func(buff, 74,
+ "A simple string with one conversion in %A "
+ "between, writes string as it is",
+ 1.0);
+ ASSERT_STREQ_LEN(written, buff,
+ "A simple string with one conversion in %A between, "
+ "writes string as it is");
+
+ written = func(buff, 36, "A simple string with one conversion", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "A simple string with one conversion");
+
+ written = func(buff, 20, "%1f", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "%1f");
+ }
+
+ void insufficentBufsize(FunctionT func) {
+ char buff[20];
+ int written;
+
+ written = func(buff, 5, "%f", 1234567890.0);
+ EXPECT_EQ(written, 17);
+ ASSERT_STREQ(buff, "1234");
+
+ written = func(buff, 5, "%.5f", 1.05);
+ EXPECT_EQ(written, 7);
+ ASSERT_STREQ(buff, "1.05");
+
+ written = func(buff, 0, "%g", 1.0);
+ EXPECT_EQ(written, 1);
+ ASSERT_STREQ(buff, "1.05"); // Make sure that buff has not changed
+ }
+
+ void infNanValues(FunctionT func) {
+ if (is_double_prec)
+ doublePrecInfNan(func);
+ else if (!is_single_prec)
+ longDoublePrecInfNan(func);
+ }
+
+ void floatDecimalSinglePrec(FunctionT func) {
+ char buff[70];
+ int written;
+
+ written = func(buff, 16, "%f", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "1.000000");
+
+ written = func(buff, 20, "%f", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "1234567936.000000");
+
+ written = func(buff, 67, "%.3f", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "1.000");
+ }
+
+ void floatDecimalDoublePrec(FunctionT func) {
+ char buff[500];
+ int written;
+
+ written = func(buff, 99, "%f", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "1.000000");
+
+ written = func(buff, 99, "%F", -1.0);
+ ASSERT_STREQ_LEN(written, buff, "-1.000000");
+
+ written = func(buff, 99, "%f", -1.234567);
+ ASSERT_STREQ_LEN(written, buff, "-1.234567");
+
+ written = func(buff, 99, "%f", 0.0);
+ ASSERT_STREQ_LEN(written, buff, "0.000000");
+
+ written = func(buff, 99, "%f", 1.5);
+ ASSERT_STREQ_LEN(written, buff, "1.500000");
+
+ written = func(buff, 499, "%f", 1e300);
+ ASSERT_STREQ_LEN(written, buff,
+ "100000000000000005250476025520442024870446858110815915491"
+ "585411551180245"
+ "798890819578637137508044786404370444383288387817694252323"
+ "536043057564479"
+ "218478670698284838720092657580373783023379478809005936895"
+ "323497079994508"
+ "111903896764088007465274278014249457925878882005684283811"
+ "566947219638686"
+ "5459400540160.000000");
+
+ written = func(buff, 99, "%f", 0.1);
+ ASSERT_STREQ_LEN(written, buff, "0.100000");
+
+ written = func(buff, 99, "%f", 1234567890123456789.0);
+ ASSERT_STREQ_LEN(written, buff, "1234567890123456768.000000");
+
+ written = func(buff, 99, "%f", 9999999999999.99);
+ ASSERT_STREQ_LEN(written, buff, "9999999999999.990234");
+
+ written = func(buff, 99, "%f", 0.1);
+ ASSERT_STREQ_LEN(written, buff, "0.100000");
+
+ written = func(buff, 99, "%f", 1234567890123456789.0);
+ ASSERT_STREQ_LEN(written, buff, "1234567890123456768.000000");
+
+ written = func(buff, 99, "%f", 9999999999999.99);
+ ASSERT_STREQ_LEN(written, buff, "9999999999999.990234");
+
+ // Precision Tests
+ written = func(buff, 100, "%.2f", 9999999999999.99);
+ ASSERT_STREQ_LEN(written, buff, "9999999999999.99");
+
+ written = func(buff, 100, "%.1f", 9999999999999.99);
+ ASSERT_STREQ_LEN(written, buff, "10000000000000.0");
+
+ written = func(buff, 100, "%.5f", 1.25);
+ ASSERT_STREQ_LEN(written, buff, "1.25000");
+
+ written = func(buff, 100, "%.0f", 1.25);
+ ASSERT_STREQ_LEN(written, buff, "1");
+
+ written = func(buff, 100, "%.20f", 1.234e-10);
+ ASSERT_STREQ_LEN(written, buff, "0.00000000012340000000");
+ }
+
+ void floatDecimalLongDoublePrec(FunctionT func) {
+ char buff[45];
+ int written;
+
+ written = func(buff, 40, "%f", 1.0L);
+ ASSERT_STREQ_LEN(written, buff, "1.000000");
+
+ written = func(buff, 10, "%.f", -2.5L);
+ ASSERT_STREQ_LEN(written, buff, "-2");
+ }
+
+ void floatHexExpSinglePrec(FunctionT func) {
+ char buff[25];
+ int written;
+
+ written = func(buff, 0, "%a", 1234567890.0);
+ EXPECT_EQ(written, 14);
+
+ written = func(buff, 20, "%a", 1234567890.0);
+ EXPECT_EQ(written, 14);
+ ASSERT_STREQ(buff, "0x1.26580cp+30");
+
+ written = func(buff, 20, "%A", 1234567890.0);
+ EXPECT_EQ(written, 14);
+ ASSERT_STREQ(buff, "0X1.26580CP+30");
+ }
+
+ void floatHexExpDoublePrec(FunctionT func) {
+ char buff[60];
+ int written;
+
+ written = func(buff, 10, "%a", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "0x1p+0");
+
+ written = func(buff, 10, "%A", -1.0);
+ ASSERT_STREQ_LEN(written, buff, "-0X1P+0");
+
+ written = func(buff, 30, "%a", -0x1.abcdef12345p0);
+ ASSERT_STREQ_LEN(written, buff, "-0x1.abcdef12345p+0");
+
+ written = func(buff, 50, "%A", 0x1.abcdef12345p0);
+ ASSERT_STREQ_LEN(written, buff, "0X1.ABCDEF12345P+0");
+
+ written = func(buff, 10, "%a", 0.0);
+ ASSERT_STREQ_LEN(written, buff, "0x0p+0");
+
+ written = func(buff, 40, "%a", 1.0e100);
+ ASSERT_STREQ_LEN(written, buff, "0x1.249ad2594c37dp+332");
+
+ written = func(buff, 30, "%a", 0.1);
+ ASSERT_STREQ_LEN(written, buff, "0x1.999999999999ap-4");
+ }
+
+ void floatHexExpLongDoublePrec(FunctionT func) {
+ char buff[55];
+ int written;
+
+ written = func(buff, 50, "%a", 0.1L);
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0xc.ccccccccccccccdp-7");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "0x1.999999999999ap-4");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0x1.999999999999999999999999999ap-4");
+#endif
+
+ written = func(buff, 20, "%.1a", 0.1L);
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0xc.dp-7");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "0x1.ap-4");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0x1.ap-4");
+#endif
+
+ written = func(buff, 50, "%a", 1.0e1000L);
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0xf.38db1f9dd3dac05p+3318");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "inf");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0x1.e71b63f3ba7b580af1a52d2a7379p+3321");
+#endif
+
+ written = func(buff, 50, "%a", 1.0e-1000L);
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0x8.68a9188a89e1467p-3325");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "0x0p+0");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0x1.0d152311513c28ce202627c06ec2p-3322");
+#endif
+
+ written = func(buff, 50, "%.1a", 0xf.fffffffffffffffp16380L);
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ ASSERT_STREQ_LEN(written, buff, "0x1.0p+16384");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
+ ASSERT_STREQ_LEN(written, buff, "inf");
+#elif defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT128)
+ ASSERT_STREQ_LEN(written, buff, "0x2.0p+16383");
+#endif
+ }
+
+ void floatDecimalExpSinglePrec(FunctionT func) {
+ char buff[25];
+ int written;
+
+ written = func(buff, 20, "%.9e", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "1.234567936e+09");
+
+ written = func(buff, 20, "%.9E", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "1.234567936E+09");
+ }
+
+ void floatDecimalExpDoublePrec(FunctionT func) {
+ char buff[101];
+ int written;
+
+ written = func(buff, 100, "%e", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "1.000000e+00");
+
+ written = func(buff, 100, "%E", -1.0);
+ ASSERT_STREQ_LEN(written, buff, "-1.000000E+00");
+
+ written = func(buff, 100, "%e", -1.234567);
+ ASSERT_STREQ_LEN(written, buff, "-1.234567e+00");
+
+ written = func(buff, 100, "%e", 0.0);
+ ASSERT_STREQ_LEN(written, buff, "0.000000e+00");
+
+ written = func(buff, 100, "%e", 1.5);
+ ASSERT_STREQ_LEN(written, buff, "1.500000e+00");
+
+ written = func(buff, 100, "%e", 1e300);
+ ASSERT_STREQ_LEN(written, buff, "1.000000e+300");
+
+ written = func(buff, 100, "%e", 1234567890123456789.0);
+ ASSERT_STREQ_LEN(written, buff, "1.234568e+18");
+
+ // Precision Tests
+ written = func(buff, 100, "%.1e", 1.0);
+ ASSERT_STREQ_LEN(written, buff, "1.0e+00");
+
+ written = func(buff, 100, "%.1e", 1.99);
+ ASSERT_STREQ_LEN(written, buff, "2.0e+00");
+
+ written = func(buff, 100, "%.1e", 9.99);
+ ASSERT_STREQ_LEN(written, buff, "1.0e+01");
+ }
+
+ void floatDecimalExpLongDoublePrec(FunctionT func) {
+ // Mark as maybe_unused to silence unused variable
+ // warning when long double is not 80-bit
+ [[maybe_unused]] char buff[100];
+ [[maybe_unused]] int written;
+
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ written = func(buff, 90, "%.9e", 1000000000500000000.1L);
+ ASSERT_STREQ_LEN(written, buff, "1.000000001e+18");
+
+ written = func(buff, 90, "%.9e", 1000000000500000000.0L);
+ ASSERT_STREQ_LEN(written, buff, "1.000000000e+18");
+
+ written = func(buff, 90, "%e", 0xf.fffffffffffffffp+16380L);
+ ASSERT_STREQ_LEN(written, buff, "1.189731e+4932");
+#endif // LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80
+ }
+
+ void floatDecimalAutoSinglePrec(FunctionT func) {
+ char buff[25];
+ int written;
+
+ written = func(buff, 20, "%.9g", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "1.23456794e+09");
+
+ written = func(buff, 20, "%.9G", 1234567890.0);
+ ASSERT_STREQ_LEN(written, buff, "1.23456794E+09");
+ }
+
+ void floatDecimalAutoDoublePrec(FunctionT func) {
+ char buff[120];
+ int written;
+
+ written = func(buff, 100, "%g", 1234567890123456789.0);
+ ASSERT_STREQ_LEN(written, buff, "1.23457e+18");
+
+ written = func(buff, 100, "%g", 9999990000000.00);
+ ASSERT_STREQ_LEN(written, buff, "9.99999e+12");
+
+ written = func(buff, 100, "%g", 9999999000000.00);
+ ASSERT_STREQ_LEN(written, buff, "1e+13");
+
+ written = func(buff, 100, "%g", 0xa.aaaaaaaaaaaaaabp-7);
+ ASSERT_STREQ_LEN(written, buff, "0.0833333");
+
+ written = func(buff, 100, "%g", 0.00001);
+ ASSERT_STREQ_LEN(written, buff, "1e-05");
+
+ // Precision Tests
+ written = func(buff, 100, "%.0g", 0.0);
+ ASSERT_STREQ_LEN(written, buff, "0");
+
+ written = func(buff, 100, "%.2g", 0.1);
+ ASSERT_STREQ_LEN(written, buff, "0.1");
+
+ written = func(buff, 100, "%.2g", 1.09);
+ ASSERT_STREQ_LEN(written, buff, "1.1");
+
+ written = func(buff, 100, "%.15g", 22.25);
+ ASSERT_STREQ_LEN(written, buff, "22.25");
+
+ written = func(buff, 100, "%.20g", 1.234e-10);
+ ASSERT_STREQ_LEN(written, buff, "1.2340000000000000814e-10");
+ }
+
+ void floatDecimalAutoLongDoublePrec(FunctionT func) {
+ // Mark as maybe_unused to silence unused variable
+ // warning when long double is not 80-bit
+ [[maybe_unused]] char buff[100];
+ [[maybe_unused]] int written;
+
+#if defined(LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80)
+ written = func(buff, 99, "%g", 0xf.fffffffffffffffp+16380L);
+ ASSERT_STREQ_LEN(written, buff, "1.18973e+4932");
+
+ written = func(buff, 99, "%g", 0xa.aaaaaaaaaaaaaabp-7L);
+ ASSERT_STREQ_LEN(written, buff, "0.0833333");
+
+ written = func(buff, 99, "%g", 9.99999999999e-100L);
+ ASSERT_STREQ_LEN(written, buff, "1e-99");
+#endif // LIBC_TYPES_LONG_DOUBLE_IS_X86_FLOAT80
+ }
+
+ void doublePrecInfNan(FunctionT func) {
+ char buff[15];
+ int written;
+
+ double inf = LIBC_NAMESPACE::fputil::FPBits<double>::inf().get_val();
+ double nan = LIBC_NAMESPACE::fputil::FPBits<double>::quiet_nan().get_val();
+
+ written = func(buff, 10, "%f", inf);
+ ASSERT_STREQ_LEN(written, buff, "inf");
+
+ written = func(buff, 10, "%A", -inf);
+ ASSERT_STREQ_LEN(written, buff, "-INF");
+
+ written = func(buff, 10, "%f", nan);
+ ASSERT_STREQ_LEN(written, buff, "nan");
+
+ written = func(buff, 10, "%A", -nan);
+ ASSERT_STREQ_LEN(written, buff, "-NAN");
+ }
+
+ void longDoublePrecInfNan(FunctionT func) {
+ char buff[15];
+ int written;
+
+ long double ld_inf =
+ LIBC_NAMESPACE::fputil::FPBits<long double>::inf().get_val();
+ long double ld_nan =
+ LIBC_NAMESPACE::fputil::FPBits<long double>::quiet_nan().get_val();
+
+ written = func(buff, 10, "%f", ld_inf);
+ ASSERT_STREQ_LEN(written, buff, "inf");
+
+ written = func(buff, 10, "%A", -ld_inf);
+ ASSERT_STREQ_LEN(written, buff, "-INF");
+
+ written = func(buff, 10, "%f", ld_nan);
+ ASSERT_STREQ_LEN(written, buff, "nan");
+
+ written = func(buff, 10, "%A", -ld_nan);
+ ASSERT_STREQ_LEN(written, buff, "-NAN");
+ }
+};
+
+#define STRFROM_TEST(InputType, name, func) \
+ using LlvmLibc##name##Test = StrfromTest<InputType>; \
+ TEST_F(LlvmLibc##name##Test, FloatDecimalFormat) { \
+ floatDecimalFormat(func); \
+ } \
+ TEST_F(LlvmLibc##name##Test, FloatHexExpFormat) { floatHexExpFormat(func); } \
+ TEST_F(LlvmLibc##name##Test, FloatDecimalAutoFormat) { \
+ floatDecimalAutoFormat(func); \
+ } \
+ TEST_F(LlvmLibc##name##Test, FloatDecimalExpFormat) { \
+ floatDecimalExpFormat(func); \
+ } \
+ TEST_F(LlvmLibc##name##Test, ImproperFormatString) { \
+ improperFormatString(func); \
+ } \
+ TEST_F(LlvmLibc##name##Test, InsufficientBufferSize) { \
+ insufficentBufsize(func); \
+ } \
+ TEST_F(LlvmLibc##name##Test, InfAndNanValues) { infNanValues(func); }
diff --git a/libc/test/src/stdlib/strfromd_test.cpp b/libc/test/src/stdlib/strfromd_test.cpp
new file mode 100644
index 000000000000..55724d7e902b
--- /dev/null
+++ b/libc/test/src/stdlib/strfromd_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for strfromd --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "StrfromTest.h"
+#include "src/stdlib/strfromd.h"
+#include "test/UnitTest/Test.h"
+
+STRFROM_TEST(double, Strfromd, LIBC_NAMESPACE::strfromd)
diff --git a/libc/test/src/stdlib/strfromf_test.cpp b/libc/test/src/stdlib/strfromf_test.cpp
index c5489f5f3af2..8b987fd434ac 100644
--- a/libc/test/src/stdlib/strfromf_test.cpp
+++ b/libc/test/src/stdlib/strfromf_test.cpp
@@ -6,102 +6,8 @@
//
//===----------------------------------------------------------------------===//
+#include "StrfromTest.h"
#include "src/stdlib/strfromf.h"
#include "test/UnitTest/Test.h"
-TEST(LlvmLibcStrfromfTest, DecimalFloatFormat) {
- char buff[100];
- int written;
-
- written = LIBC_NAMESPACE::strfromf(buff, 16, "%f", 1.0);
- EXPECT_EQ(written, 8);
- ASSERT_STREQ(buff, "1.000000");
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%f", 1234567890.0);
- EXPECT_EQ(written, 17);
- ASSERT_STREQ(buff, "1234567936.000000");
-
- written = LIBC_NAMESPACE::strfromf(buff, 5, "%f", 1234567890.0);
- EXPECT_EQ(written, 17);
- ASSERT_STREQ(buff, "1234");
-
- written = LIBC_NAMESPACE::strfromf(buff, 67, "%.3f", 1.0);
- EXPECT_EQ(written, 5);
- ASSERT_STREQ(buff, "1.000");
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%1f", 1234567890.0);
- EXPECT_EQ(written, 3);
- ASSERT_STREQ(buff, "%1f");
-}
-
-TEST(LlvmLibcStrfromfTest, HexExpFloatFormat) {
- char buff[100];
- int written;
-
- written = LIBC_NAMESPACE::strfromf(buff, 0, "%a", 1234567890.0);
- EXPECT_EQ(written, 14);
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%a", 1234567890.0);
- EXPECT_EQ(written, 14);
- ASSERT_STREQ(buff, "0x1.26580cp+30");
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%A", 1234567890.0);
- EXPECT_EQ(written, 14);
- ASSERT_STREQ(buff, "0X1.26580CP+30");
-}
-
-TEST(LlvmLibcStrfromfTest, DecimalExpFloatFormat) {
- char buff[100];
- int written;
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%.9e", 1234567890.0);
- EXPECT_EQ(written, 15);
- ASSERT_STREQ(buff, "1.234567936e+09");
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%.9E", 1234567890.0);
- EXPECT_EQ(written, 15);
- ASSERT_STREQ(buff, "1.234567936E+09");
-}
-
-TEST(LlvmLibcStrfromfTest, AutoDecimalFloatFormat) {
- char buff[100];
- int written;
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%.9g", 1234567890.0);
- EXPECT_EQ(written, 14);
- ASSERT_STREQ(buff, "1.23456794e+09");
-
- written = LIBC_NAMESPACE::strfromf(buff, 20, "%.9G", 1234567890.0);
- EXPECT_EQ(written, 14);
- ASSERT_STREQ(buff, "1.23456794E+09");
-
- written = LIBC_NAMESPACE::strfromf(buff, 0, "%G", 1.0);
- EXPECT_EQ(written, 1);
-}
-
-TEST(LlvmLibcStrfromfTest, ImproperFormatString) {
-
- char buff[100];
- int retval;
- retval = LIBC_NAMESPACE::strfromf(
- buff, 37, "A simple string with no conversions.", 1.0);
- EXPECT_EQ(retval, 36);
- ASSERT_STREQ(buff, "A simple string with no conversions.");
-
- retval = LIBC_NAMESPACE::strfromf(
- buff, 37, "%A simple string with one conversion, should overwrite.", 1.0);
- EXPECT_EQ(retval, 6);
- ASSERT_STREQ(buff, "0X1P+0");
-
- retval = LIBC_NAMESPACE::strfromf(buff, 74,
- "A simple string with one conversion in %A "
- "between, writes string as it is",
- 1.0);
- EXPECT_EQ(retval, 73);
- ASSERT_STREQ(buff, "A simple string with one conversion in %A between, "
- "writes string as it is");
-
- retval = LIBC_NAMESPACE::strfromf(buff, 36,
- "A simple string with one conversion", 1.0);
- EXPECT_EQ(retval, 35);
- ASSERT_STREQ(buff, "A simple string with one conversion");
-}
+STRFROM_TEST(float, StrFromf, LIBC_NAMESPACE::strfromf)
diff --git a/libc/test/src/stdlib/strfroml_test.cpp b/libc/test/src/stdlib/strfroml_test.cpp
new file mode 100644
index 000000000000..cf472a39a5bf
--- /dev/null
+++ b/libc/test/src/stdlib/strfroml_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for strfroml --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "StrfromTest.h"
+#include "src/stdlib/strfroml.h"
+#include "test/UnitTest/Test.h"
+
+STRFROM_TEST(long double, Strfroml, LIBC_NAMESPACE::strfroml)
diff --git a/libc/test/src/sys/CMakeLists.txt b/libc/test/src/sys/CMakeLists.txt
index 7f228e709046..dc0aa8bf7b75 100644
--- a/libc/test/src/sys/CMakeLists.txt
+++ b/libc/test/src/sys/CMakeLists.txt
@@ -5,6 +5,7 @@ add_subdirectory(select)
add_subdirectory(sendfile)
add_subdirectory(socket)
add_subdirectory(stat)
+add_subdirectory(statvfs)
add_subdirectory(utsname)
add_subdirectory(wait)
add_subdirectory(prctl)
diff --git a/libc/test/src/sys/statvfs/CMakeLists.txt b/libc/test/src/sys/statvfs/CMakeLists.txt
new file mode 100644
index 000000000000..b4bbe81c92ff
--- /dev/null
+++ b/libc/test/src/sys/statvfs/CMakeLists.txt
@@ -0,0 +1,3 @@
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
+ add_subdirectory(${LIBC_TARGET_OS})
+endif()
diff --git a/libc/test/src/sys/statvfs/linux/CMakeLists.txt b/libc/test/src/sys/statvfs/linux/CMakeLists.txt
new file mode 100644
index 000000000000..1f8688868e04
--- /dev/null
+++ b/libc/test/src/sys/statvfs/linux/CMakeLists.txt
@@ -0,0 +1,29 @@
+add_custom_target(libc_sys_statvfs_unittests)
+
+add_libc_unittest(
+ statvfs_test
+ SUITE
+ libc_sys_statvfs_unittests
+ SRCS
+ statvfs_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.sys.statvfs.linux.statfs_utils
+ libc.src.sys.statvfs.statvfs
+ libc.test.UnitTest.ErrnoSetterMatcher
+)
+
+add_libc_unittest(
+ fstatvfs_test
+ SUITE
+ libc_sys_statvfs_unittests
+ SRCS
+ fstatvfs_test.cpp
+ DEPENDS
+ libc.src.errno.errno
+ libc.src.sys.statvfs.linux.statfs_utils
+ libc.src.sys.statvfs.fstatvfs
+ libc.src.fcntl.open
+ libc.src.unistd.close
+ libc.test.UnitTest.ErrnoSetterMatcher
+)
diff --git a/libc/test/src/sys/statvfs/linux/fstatvfs_test.cpp b/libc/test/src/sys/statvfs/linux/fstatvfs_test.cpp
new file mode 100644
index 000000000000..bd5195c7969b
--- /dev/null
+++ b/libc/test/src/sys/statvfs/linux/fstatvfs_test.cpp
@@ -0,0 +1,42 @@
+#include "llvm-libc-macros/linux/fcntl-macros.h"
+#include "src/fcntl/open.h"
+#include "src/sys/statvfs/fstatvfs.h"
+#include "src/sys/statvfs/linux/statfs_utils.h"
+#include "src/unistd/close.h"
+#include "test/UnitTest/ErrnoSetterMatcher.h"
+#include "test/UnitTest/LibcTest.h"
+#include <linux/magic.h>
+using namespace LIBC_NAMESPACE::testing::ErrnoSetterMatcher;
+
+namespace LIBC_NAMESPACE {
+static int fstatfs(int fd, struct statfs *buf) {
+ using namespace statfs_utils;
+ if (cpp::optional<LinuxStatFs> result = linux_fstatfs(fd)) {
+ *buf = *result;
+ return 0;
+ }
+ return -1;
+}
+} // namespace LIBC_NAMESPACE
+
+struct PathFD {
+ int fd;
+ explicit PathFD(const char *path)
+ : fd(LIBC_NAMESPACE::open(path, O_CLOEXEC | O_PATH)) {}
+ ~PathFD() { LIBC_NAMESPACE::close(fd); }
+ operator int() const { return fd; }
+};
+
+TEST(LlvmLibcSysStatvfsTest, FstatfsBasic) {
+ struct statfs buf;
+ ASSERT_THAT(LIBC_NAMESPACE::fstatfs(PathFD("/"), &buf), Succeeds());
+ ASSERT_THAT(LIBC_NAMESPACE::fstatfs(PathFD("/proc"), &buf), Succeeds());
+ ASSERT_EQ(buf.f_type, static_cast<decltype(buf.f_type)>(PROC_SUPER_MAGIC));
+ ASSERT_THAT(LIBC_NAMESPACE::fstatfs(PathFD("/sys"), &buf), Succeeds());
+ ASSERT_EQ(buf.f_type, static_cast<decltype(buf.f_type)>(SYSFS_MAGIC));
+}
+
+TEST(LlvmLibcSysStatvfsTest, FstatvfsInvalidFD) {
+ struct statvfs buf;
+ ASSERT_THAT(LIBC_NAMESPACE::fstatvfs(-1, &buf), Fails(EBADF));
+}
diff --git a/libc/test/src/sys/statvfs/linux/statvfs_test.cpp b/libc/test/src/sys/statvfs/linux/statvfs_test.cpp
new file mode 100644
index 000000000000..695d2c0d5e98
--- /dev/null
+++ b/libc/test/src/sys/statvfs/linux/statvfs_test.cpp
@@ -0,0 +1,47 @@
+#include "src/sys/statvfs/linux/statfs_utils.h"
+#include "src/sys/statvfs/statvfs.h"
+#include "test/UnitTest/ErrnoSetterMatcher.h"
+#include "test/UnitTest/LibcTest.h"
+#include <linux/magic.h>
+using namespace LIBC_NAMESPACE::testing::ErrnoSetterMatcher;
+
+namespace LIBC_NAMESPACE {
+static int statfs(const char *path, struct statfs *buf) {
+ using namespace statfs_utils;
+ if (cpp::optional<LinuxStatFs> result = linux_statfs(path)) {
+ *buf = *result;
+ return 0;
+ }
+ return -1;
+}
+} // namespace LIBC_NAMESPACE
+
+TEST(LlvmLibcSysStatfsTest, StatfsBasic) {
+ struct statfs buf;
+ ASSERT_THAT(LIBC_NAMESPACE::statfs("/", &buf), Succeeds());
+ ASSERT_THAT(LIBC_NAMESPACE::statfs("/proc", &buf), Succeeds());
+ ASSERT_EQ(buf.f_type, static_cast<decltype(buf.f_type)>(PROC_SUPER_MAGIC));
+ ASSERT_THAT(LIBC_NAMESPACE::statfs("/sys", &buf), Succeeds());
+ ASSERT_EQ(buf.f_type, static_cast<decltype(buf.f_type)>(SYSFS_MAGIC));
+}
+
+TEST(LlvmLibcSysStatfsTest, StatvfsInvalidPath) {
+ struct statvfs buf;
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs("", &buf), Fails(ENOENT));
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs("/nonexistent", &buf), Fails(ENOENT));
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs("/dev/null/whatever", &buf),
+ Fails(ENOTDIR));
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs(nullptr, &buf), Fails(EFAULT));
+}
+
+TEST(LlvmLibcSysStatfsTest, StatvfsNameTooLong) {
+ struct statvfs buf;
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs("/", &buf), Succeeds());
+ char *name = static_cast<char *>(__builtin_alloca(buf.f_namemax + 3));
+ name[0] = '/';
+ name[buf.f_namemax + 2] = '\0';
+ for (unsigned i = 1; i < buf.f_namemax + 2; ++i) {
+ name[i] = 'a';
+ }
+ ASSERT_THAT(LIBC_NAMESPACE::statvfs(name, &buf), Fails(ENAMETOOLONG));
+}
diff --git a/libc/utils/gpu/loader/Loader.h b/libc/utils/gpu/loader/Loader.h
index 933803837019..9c7d328930c2 100644
--- a/libc/utils/gpu/loader/Loader.h
+++ b/libc/utils/gpu/loader/Loader.h
@@ -108,11 +108,11 @@ inline void handle_error(rpc_status_t) {
}
template <uint32_t lane_size>
-inline void register_rpc_callbacks(uint32_t device_id) {
+inline void register_rpc_callbacks(rpc_device_t device) {
static_assert(lane_size == 32 || lane_size == 64, "Invalid Lane size");
// Register the ping test for the `libc` tests.
rpc_register_callback(
- device_id, static_cast<rpc_opcode_t>(RPC_TEST_INCREMENT),
+ device, static_cast<rpc_opcode_t>(RPC_TEST_INCREMENT),
[](rpc_port_t port, void *data) {
rpc_recv_and_send(
port,
@@ -125,7 +125,7 @@ inline void register_rpc_callbacks(uint32_t device_id) {
// Register the interface test callbacks.
rpc_register_callback(
- device_id, static_cast<rpc_opcode_t>(RPC_TEST_INTERFACE),
+ device, static_cast<rpc_opcode_t>(RPC_TEST_INTERFACE),
[](rpc_port_t port, void *data) {
uint64_t cnt = 0;
bool end_with_recv;
@@ -207,7 +207,7 @@ inline void register_rpc_callbacks(uint32_t device_id) {
// Register the stream test handler.
rpc_register_callback(
- device_id, static_cast<rpc_opcode_t>(RPC_TEST_STREAM),
+ device, static_cast<rpc_opcode_t>(RPC_TEST_STREAM),
[](rpc_port_t port, void *data) {
uint64_t sizes[lane_size] = {0};
void *dst[lane_size] = {nullptr};
diff --git a/libc/utils/gpu/loader/amdgpu/Loader.cpp b/libc/utils/gpu/loader/amdgpu/Loader.cpp
index e3911eda2bd8..35840c6910bd 100644
--- a/libc/utils/gpu/loader/amdgpu/Loader.cpp
+++ b/libc/utils/gpu/loader/amdgpu/Loader.cpp
@@ -153,7 +153,8 @@ template <typename args_t>
hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
hsa_amd_memory_pool_t kernargs_pool,
hsa_amd_memory_pool_t coarsegrained_pool,
- hsa_queue_t *queue, const LaunchParameters &params,
+ hsa_queue_t *queue, rpc_device_t device,
+ const LaunchParameters &params,
const char *kernel_name, args_t kernel_args) {
// Look up the '_start' kernel in the loaded executable.
hsa_executable_symbol_t symbol;
@@ -162,10 +163,9 @@ hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
return err;
// Register RPC callbacks for the malloc and free functions on HSA.
- uint32_t device_id = 0;
auto tuple = std::make_tuple(dev_agent, coarsegrained_pool);
rpc_register_callback(
- device_id, RPC_MALLOC,
+ device, RPC_MALLOC,
[](rpc_port_t port, void *data) {
auto malloc_handler = [](rpc_buffer_t *buffer, void *data) -> void {
auto &[dev_agent, pool] = *static_cast<decltype(tuple) *>(data);
@@ -182,7 +182,7 @@ hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
},
&tuple);
rpc_register_callback(
- device_id, RPC_FREE,
+ device, RPC_FREE,
[](rpc_port_t port, void *data) {
auto free_handler = [](rpc_buffer_t *buffer, void *) {
if (hsa_status_t err = hsa_amd_memory_pool_free(
@@ -284,12 +284,12 @@ hsa_status_t launch_kernel(hsa_agent_t dev_agent, hsa_executable_t executable,
while (hsa_signal_wait_scacquire(
packet->completion_signal, HSA_SIGNAL_CONDITION_EQ, 0,
/*timeout_hint=*/1024, HSA_WAIT_STATE_ACTIVE) != 0)
- if (rpc_status_t err = rpc_handle_server(device_id))
+ if (rpc_status_t err = rpc_handle_server(device))
handle_error(err);
// Handle the server one more time in case the kernel exited with a pending
// send still in flight.
- if (rpc_status_t err = rpc_handle_server(device_id))
+ if (rpc_status_t err = rpc_handle_server(device))
handle_error(err);
// Destroy the resources acquired to launch the kernel and return.
@@ -342,8 +342,6 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
// Obtain a single agent for the device and host to use the HSA memory model.
- uint32_t num_devices = 1;
- uint32_t device_id = 0;
hsa_agent_t dev_agent;
hsa_agent_t host_agent;
if (hsa_status_t err = get_agent<HSA_DEVICE_TYPE_GPU>(&dev_agent))
@@ -433,8 +431,6 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
// Set up the RPC server.
- if (rpc_status_t err = rpc_init(num_devices))
- handle_error(err);
auto tuple = std::make_tuple(dev_agent, finegrained_pool);
auto rpc_alloc = [](uint64_t size, void *data) {
auto &[dev_agent, finegrained_pool] = *static_cast<decltype(tuple) *>(data);
@@ -445,15 +441,16 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
hsa_amd_agents_allow_access(1, &dev_agent, nullptr, dev_ptr);
return dev_ptr;
};
- if (rpc_status_t err = rpc_server_init(device_id, RPC_MAXIMUM_PORT_COUNT,
+ rpc_device_t device;
+ if (rpc_status_t err = rpc_server_init(&device, RPC_MAXIMUM_PORT_COUNT,
wavefront_size, rpc_alloc, &tuple))
handle_error(err);
// Register callbacks for the RPC unit tests.
if (wavefront_size == 32)
- register_rpc_callbacks<32>(device_id);
+ register_rpc_callbacks<32>(device);
else if (wavefront_size == 64)
- register_rpc_callbacks<64>(device_id);
+ register_rpc_callbacks<64>(device);
else
handle_error("Invalid wavefront size");
@@ -483,10 +480,10 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
void *rpc_client_buffer;
- if (hsa_status_t err = hsa_amd_memory_lock(
- const_cast<void *>(rpc_get_client_buffer(device_id)),
- rpc_get_client_size(),
- /*agents=*/nullptr, 0, &rpc_client_buffer))
+ if (hsa_status_t err =
+ hsa_amd_memory_lock(const_cast<void *>(rpc_get_client_buffer(device)),
+ rpc_get_client_size(),
+ /*agents=*/nullptr, 0, &rpc_client_buffer))
handle_error(err);
// Copy the RPC client buffer to the address pointed to by the symbol.
@@ -496,7 +493,7 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
if (hsa_status_t err = hsa_amd_memory_unlock(
- const_cast<void *>(rpc_get_client_buffer(device_id))))
+ const_cast<void *>(rpc_get_client_buffer(device))))
handle_error(err);
if (hsa_status_t err = hsa_amd_memory_pool_free(rpc_client_host))
handle_error(err);
@@ -549,13 +546,13 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
begin_args_t init_args = {argc, dev_argv, dev_envp};
if (hsa_status_t err = launch_kernel(
dev_agent, executable, kernargs_pool, coarsegrained_pool, queue,
- single_threaded_params, "_begin.kd", init_args))
+ device, single_threaded_params, "_begin.kd", init_args))
handle_error(err);
start_args_t args = {argc, dev_argv, dev_envp, dev_ret};
- if (hsa_status_t err =
- launch_kernel(dev_agent, executable, kernargs_pool,
- coarsegrained_pool, queue, params, "_start.kd", args))
+ if (hsa_status_t err = launch_kernel(dev_agent, executable, kernargs_pool,
+ coarsegrained_pool, queue, device,
+ params, "_start.kd", args))
handle_error(err);
void *host_ret;
@@ -575,11 +572,11 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
end_args_t fini_args = {ret};
if (hsa_status_t err = launch_kernel(
dev_agent, executable, kernargs_pool, coarsegrained_pool, queue,
- single_threaded_params, "_end.kd", fini_args))
+ device, single_threaded_params, "_end.kd", fini_args))
handle_error(err);
if (rpc_status_t err = rpc_server_shutdown(
- device_id, [](void *ptr, void *) { hsa_amd_memory_pool_free(ptr); },
+ device, [](void *ptr, void *) { hsa_amd_memory_pool_free(ptr); },
nullptr))
handle_error(err);
@@ -600,8 +597,6 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
if (hsa_status_t err = hsa_code_object_destroy(object))
handle_error(err);
- if (rpc_status_t err = rpc_shutdown())
- handle_error(err);
if (hsa_status_t err = hsa_shut_down())
handle_error(err);
diff --git a/libc/utils/gpu/loader/nvptx/Loader.cpp b/libc/utils/gpu/loader/nvptx/Loader.cpp
index 5388f287063b..1818932f0a96 100644
--- a/libc/utils/gpu/loader/nvptx/Loader.cpp
+++ b/libc/utils/gpu/loader/nvptx/Loader.cpp
@@ -154,8 +154,8 @@ Expected<void *> get_ctor_dtor_array(const void *image, const size_t size,
template <typename args_t>
CUresult launch_kernel(CUmodule binary, CUstream stream,
- const LaunchParameters &params, const char *kernel_name,
- args_t kernel_args) {
+ rpc_device_t rpc_device, const LaunchParameters &params,
+ const char *kernel_name, args_t kernel_args) {
// look up the '_start' kernel in the loaded module.
CUfunction function;
if (CUresult err = cuModuleGetFunction(&function, binary, kernel_name))
@@ -175,11 +175,10 @@ CUresult launch_kernel(CUmodule binary, CUstream stream,
handle_error(err);
// Register RPC callbacks for the malloc and free functions on HSA.
- uint32_t device_id = 0;
- register_rpc_callbacks<32>(device_id);
+ register_rpc_callbacks<32>(rpc_device);
rpc_register_callback(
- device_id, RPC_MALLOC,
+ rpc_device, RPC_MALLOC,
[](rpc_port_t port, void *data) {
auto malloc_handler = [](rpc_buffer_t *buffer, void *data) -> void {
CUstream memory_stream = *static_cast<CUstream *>(data);
@@ -197,7 +196,7 @@ CUresult launch_kernel(CUmodule binary, CUstream stream,
},
&memory_stream);
rpc_register_callback(
- device_id, RPC_FREE,
+ rpc_device, RPC_FREE,
[](rpc_port_t port, void *data) {
auto free_handler = [](rpc_buffer_t *buffer, void *data) {
CUstream memory_stream = *static_cast<CUstream *>(data);
@@ -219,12 +218,12 @@ CUresult launch_kernel(CUmodule binary, CUstream stream,
// Wait until the kernel has completed execution on the device. Periodically
// check the RPC client for work to be performed on the server.
while (cuStreamQuery(stream) == CUDA_ERROR_NOT_READY)
- if (rpc_status_t err = rpc_handle_server(device_id))
+ if (rpc_status_t err = rpc_handle_server(rpc_device))
handle_error(err);
// Handle the server one more time in case the kernel exited with a pending
// send still in flight.
- if (rpc_status_t err = rpc_handle_server(device_id))
+ if (rpc_status_t err = rpc_handle_server(rpc_device))
handle_error(err);
return CUDA_SUCCESS;
@@ -235,7 +234,6 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
if (CUresult err = cuInit(0))
handle_error(err);
// Obtain the first device found on the system.
- uint32_t num_devices = 1;
uint32_t device_id = 0;
CUdevice device;
if (CUresult err = cuDeviceGet(&device, device_id))
@@ -294,9 +292,6 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
if (CUresult err = cuMemsetD32(dev_ret, 0, 1))
handle_error(err);
- if (rpc_status_t err = rpc_init(num_devices))
- handle_error(err);
-
uint32_t warp_size = 32;
auto rpc_alloc = [](uint64_t size, void *) -> void * {
void *dev_ptr;
@@ -304,7 +299,8 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
return dev_ptr;
};
- if (rpc_status_t err = rpc_server_init(device_id, RPC_MAXIMUM_PORT_COUNT,
+ rpc_device_t rpc_device;
+ if (rpc_status_t err = rpc_server_init(&rpc_device, RPC_MAXIMUM_PORT_COUNT,
warp_size, rpc_alloc, nullptr))
handle_error(err);
@@ -321,19 +317,20 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
cuMemcpyDtoH(&rpc_client_host, rpc_client_dev, sizeof(void *)))
handle_error(err);
if (CUresult err =
- cuMemcpyHtoD(rpc_client_host, rpc_get_client_buffer(device_id),
+ cuMemcpyHtoD(rpc_client_host, rpc_get_client_buffer(rpc_device),
rpc_get_client_size()))
handle_error(err);
LaunchParameters single_threaded_params = {1, 1, 1, 1, 1, 1};
begin_args_t init_args = {argc, dev_argv, dev_envp};
- if (CUresult err = launch_kernel(binary, stream, single_threaded_params,
- "_begin", init_args))
+ if (CUresult err = launch_kernel(binary, stream, rpc_device,
+ single_threaded_params, "_begin", init_args))
handle_error(err);
start_args_t args = {argc, dev_argv, dev_envp,
reinterpret_cast<void *>(dev_ret)};
- if (CUresult err = launch_kernel(binary, stream, params, "_start", args))
+ if (CUresult err =
+ launch_kernel(binary, stream, rpc_device, params, "_start", args))
handle_error(err);
// Copy the return value back from the kernel and wait.
@@ -345,8 +342,8 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
end_args_t fini_args = {host_ret};
- if (CUresult err = launch_kernel(binary, stream, single_threaded_params,
- "_end", fini_args))
+ if (CUresult err = launch_kernel(binary, stream, rpc_device,
+ single_threaded_params, "_end", fini_args))
handle_error(err);
// Free the memory allocated for the device.
@@ -357,7 +354,7 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
if (CUresult err = cuMemFreeHost(dev_argv))
handle_error(err);
if (rpc_status_t err = rpc_server_shutdown(
- device_id, [](void *ptr, void *) { cuMemFreeHost(ptr); }, nullptr))
+ rpc_device, [](void *ptr, void *) { cuMemFreeHost(ptr); }, nullptr))
handle_error(err);
// Destroy the context and the loaded binary.
@@ -365,7 +362,5 @@ int load(int argc, char **argv, char **envp, void *image, size_t size,
handle_error(err);
if (CUresult err = cuDevicePrimaryCtxRelease(device))
handle_error(err);
- if (rpc_status_t err = rpc_shutdown())
- handle_error(err);
return host_ret;
}
diff --git a/libc/utils/gpu/server/llvmlibc_rpc_server.h b/libc/utils/gpu/server/llvmlibc_rpc_server.h
index b7f2a463b1f5..b0cf2f916b38 100644
--- a/libc/utils/gpu/server/llvmlibc_rpc_server.h
+++ b/libc/utils/gpu/server/llvmlibc_rpc_server.h
@@ -27,10 +27,8 @@ typedef enum {
RPC_STATUS_SUCCESS = 0x0,
RPC_STATUS_CONTINUE = 0x1,
RPC_STATUS_ERROR = 0x1000,
- RPC_STATUS_OUT_OF_RANGE = 0x1001,
- RPC_STATUS_UNHANDLED_OPCODE = 0x1002,
- RPC_STATUS_INVALID_LANE_SIZE = 0x1003,
- RPC_STATUS_NOT_INITIALIZED = 0x1004,
+ RPC_STATUS_UNHANDLED_OPCODE = 0x1001,
+ RPC_STATUS_INVALID_LANE_SIZE = 0x1002,
} rpc_status_t;
/// A struct containing an opaque handle to an RPC port. This is what allows the
@@ -45,6 +43,11 @@ typedef struct rpc_buffer_s {
uint64_t data[8];
} rpc_buffer_t;
+/// An opaque handle to an RPC server that can be attached to a device.
+typedef struct rpc_device_s {
+ uintptr_t handle;
+} rpc_device_t;
+
/// A function used to allocate \p bytes for use by the RPC server and client.
/// The memory should support asynchronous and atomic access from both the
/// client and server.
@@ -60,34 +63,28 @@ typedef void (*rpc_opcode_callback_ty)(rpc_port_t port, void *data);
/// A callback function to use the port to receive or send a \p buffer.
typedef void (*rpc_port_callback_ty)(rpc_buffer_t *buffer, void *data);
-/// Initialize the rpc library for general use on \p num_devices.
-rpc_status_t rpc_init(uint32_t num_devices);
-
-/// Shut down the rpc interface.
-rpc_status_t rpc_shutdown(void);
-
-/// Initialize the server for a given device.
-rpc_status_t rpc_server_init(uint32_t device_id, uint64_t num_ports,
+/// Initialize the server for a given device and return it in \p device.
+rpc_status_t rpc_server_init(rpc_device_t *rpc_device, uint64_t num_ports,
uint32_t lane_size, rpc_alloc_ty alloc,
void *data);
/// Shut down the server for a given device.
-rpc_status_t rpc_server_shutdown(uint32_t device_id, rpc_free_ty dealloc,
+rpc_status_t rpc_server_shutdown(rpc_device_t rpc_device, rpc_free_ty dealloc,
void *data);
/// Queries the RPC clients at least once and performs server-side work if there
/// are any active requests. Runs until all work on the server is completed.
-rpc_status_t rpc_handle_server(uint32_t device_id);
+rpc_status_t rpc_handle_server(rpc_device_t rpc_device);
/// Register a callback to handle an opcode from the RPC client. The associated
/// data must remain accessible as long as the user intends to handle the server
/// with this callback.
-rpc_status_t rpc_register_callback(uint32_t device_id, uint16_t opcode,
+rpc_status_t rpc_register_callback(rpc_device_t rpc_device, uint16_t opcode,
rpc_opcode_callback_ty callback, void *data);
/// Obtain a pointer to a local client buffer that can be copied directly to the
/// other process using the address stored at the rpc client symbol name.
-const void *rpc_get_client_buffer(uint32_t device_id);
+const void *rpc_get_client_buffer(rpc_device_t device);
/// Returns the size of the client in bytes to be used for a memory copy.
uint64_t rpc_get_client_size();
diff --git a/libc/utils/gpu/server/rpc_server.cpp b/libc/utils/gpu/server/rpc_server.cpp
index 90af1569c4c5..fd306642fdcc 100644
--- a/libc/utils/gpu/server/rpc_server.cpp
+++ b/libc/utils/gpu/server/rpc_server.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// Workaround for missing __has_builtin in < GCC 10.
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
#include "llvmlibc_rpc_server.h"
#include "src/__support/RPC/rpc.h"
@@ -243,127 +248,75 @@ struct Device {
std::unordered_map<uint16_t, void *> callback_data;
};
-// A struct containing all the runtime state required to run the RPC server.
-struct State {
- State(uint32_t num_devices)
- : num_devices(num_devices), devices(num_devices), reference_count(0u) {}
- uint32_t num_devices;
- std::vector<std::unique_ptr<Device>> devices;
- std::atomic_uint32_t reference_count;
-};
-
-static std::mutex startup_mutex;
-
-static State *state;
-
-rpc_status_t rpc_init(uint32_t num_devices) {
- std::scoped_lock<decltype(startup_mutex)> lock(startup_mutex);
- if (!state)
- state = new State(num_devices);
-
- if (state->reference_count == std::numeric_limits<uint32_t>::max())
- return RPC_STATUS_ERROR;
-
- state->reference_count++;
-
- return RPC_STATUS_SUCCESS;
-}
-
-rpc_status_t rpc_shutdown(void) {
- if (state && state->reference_count-- == 1)
- delete state;
-
- return RPC_STATUS_SUCCESS;
-}
-
-rpc_status_t rpc_server_init(uint32_t device_id, uint64_t num_ports,
+rpc_status_t rpc_server_init(rpc_device_t *rpc_device, uint64_t num_ports,
uint32_t lane_size, rpc_alloc_ty alloc,
void *data) {
- if (!state)
- return RPC_STATUS_NOT_INITIALIZED;
- if (device_id >= state->num_devices)
- return RPC_STATUS_OUT_OF_RANGE;
+ if (!rpc_device)
+ return RPC_STATUS_ERROR;
if (lane_size != 1 && lane_size != 32 && lane_size != 64)
return RPC_STATUS_INVALID_LANE_SIZE;
- if (!state->devices[device_id]) {
- uint64_t size = rpc::Server::allocation_size(lane_size, num_ports);
- void *buffer = alloc(size, data);
+ uint64_t size = rpc::Server::allocation_size(lane_size, num_ports);
+ void *buffer = alloc(size, data);
- if (!buffer)
- return RPC_STATUS_ERROR;
+ if (!buffer)
+ return RPC_STATUS_ERROR;
- state->devices[device_id] =
- std::make_unique<Device>(lane_size, num_ports, buffer);
- if (!state->devices[device_id])
- return RPC_STATUS_ERROR;
- }
+ Device *device = new Device(lane_size, num_ports, buffer);
+ if (!device)
+ return RPC_STATUS_ERROR;
+ rpc_device->handle = reinterpret_cast<uintptr_t>(device);
return RPC_STATUS_SUCCESS;
}
-rpc_status_t rpc_server_shutdown(uint32_t device_id, rpc_free_ty dealloc,
+rpc_status_t rpc_server_shutdown(rpc_device_t rpc_device, rpc_free_ty dealloc,
void *data) {
- if (!state)
- return RPC_STATUS_NOT_INITIALIZED;
- if (device_id >= state->num_devices)
- return RPC_STATUS_OUT_OF_RANGE;
- if (!state->devices[device_id])
+ if (!rpc_device.handle)
return RPC_STATUS_ERROR;
- dealloc(state->devices[device_id]->buffer, data);
- if (state->devices[device_id])
- state->devices[device_id].release();
+ Device *device = reinterpret_cast<Device *>(rpc_device.handle);
+ dealloc(device->buffer, data);
+ delete device;
return RPC_STATUS_SUCCESS;
}
-rpc_status_t rpc_handle_server(uint32_t device_id) {
- if (!state)
- return RPC_STATUS_NOT_INITIALIZED;
- if (device_id >= state->num_devices)
- return RPC_STATUS_OUT_OF_RANGE;
- if (!state->devices[device_id])
+rpc_status_t rpc_handle_server(rpc_device_t rpc_device) {
+ if (!rpc_device.handle)
return RPC_STATUS_ERROR;
+ Device *device = reinterpret_cast<Device *>(rpc_device.handle);
uint32_t index = 0;
for (;;) {
- Device &device = *state->devices[device_id];
- rpc_status_t status = device.handle_server(index);
+ rpc_status_t status = device->handle_server(index);
if (status != RPC_STATUS_CONTINUE)
return status;
}
}
-rpc_status_t rpc_register_callback(uint32_t device_id, uint16_t opcode,
+rpc_status_t rpc_register_callback(rpc_device_t rpc_device, uint16_t opcode,
rpc_opcode_callback_ty callback,
void *data) {
- if (!state)
- return RPC_STATUS_NOT_INITIALIZED;
- if (device_id >= state->num_devices)
- return RPC_STATUS_OUT_OF_RANGE;
- if (!state->devices[device_id])
+ if (!rpc_device.handle)
return RPC_STATUS_ERROR;
- state->devices[device_id]->callbacks[opcode] = callback;
- state->devices[device_id]->callback_data[opcode] = data;
+ Device *device = reinterpret_cast<Device *>(rpc_device.handle);
+
+ device->callbacks[opcode] = callback;
+ device->callback_data[opcode] = data;
return RPC_STATUS_SUCCESS;
}
-const void *rpc_get_client_buffer(uint32_t device_id) {
- if (!state || device_id >= state->num_devices || !state->devices[device_id])
+const void *rpc_get_client_buffer(rpc_device_t rpc_device) {
+ if (!rpc_device.handle)
return nullptr;
- return &state->devices[device_id]->client;
+ Device *device = reinterpret_cast<Device *>(rpc_device.handle);
+ return &device->client;
}
uint64_t rpc_get_client_size() { return sizeof(rpc::Client); }
-using ServerPort = std::variant<rpc::Server::Port *>;
-
-ServerPort get_port(rpc_port_t ref) {
- return reinterpret_cast<rpc::Server::Port *>(ref.handle);
-}
-
void rpc_send(rpc_port_t ref, rpc_port_callback_ty callback, void *data) {
auto port = reinterpret_cast<rpc::Server::Port *>(ref.handle);
port->send([=](rpc::Buffer *buffer) {
diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt
index 745b848fba49..9236f09d3667 100644
--- a/libclc/CMakeLists.txt
+++ b/libclc/CMakeLists.txt
@@ -45,7 +45,7 @@ option( ENABLE_RUNTIME_SUBNORMAL "Enable runtime linking of subnormal support."
find_package(LLVM REQUIRED HINTS "${LLVM_CMAKE_DIR}")
include(AddLLVM)
-message( "LLVM version: ${LLVM_PACKAGE_VERSION}" )
+message( STATUS "libclc LLVM version: ${LLVM_PACKAGE_VERSION}" )
if( ${LLVM_PACKAGE_VERSION} VERSION_LESS ${LIBCLC_MIN_LLVM} )
message( FATAL_ERROR "libclc needs at least LLVM ${LIBCLC_MIN_LLVM}" )
@@ -67,14 +67,13 @@ find_program( LLVM_OPT opt PATHS ${LLVM_TOOLS_BINARY_DIR} NO_DEFAULT_PATH )
find_program( LLVM_SPIRV llvm-spirv PATHS ${LLVM_TOOLS_BINARY_DIR} NO_DEFAULT_PATH )
# Print toolchain
-message( "clang: ${LLVM_CLANG}" )
-message( "llvm-as: ${LLVM_AS}" )
-message( "llvm-link: ${LLVM_LINK}" )
-message( "opt: ${LLVM_OPT}" )
-message( "llvm-spirv: ${LLVM_SPIRV}" )
-message( "" )
+message( STATUS "libclc toolchain - clang: ${LLVM_CLANG}" )
+message( STATUS "libclc toolchain - llvm-as: ${LLVM_AS}" )
+message( STATUS "libclc toolchain - llvm-link: ${LLVM_LINK}" )
+message( STATUS "libclc toolchain - opt: ${LLVM_OPT}" )
+message( STATUS "libclc toolchain - llvm-spirv: ${LLVM_SPIRV}" )
if( NOT LLVM_CLANG OR NOT LLVM_OPT OR NOT LLVM_AS OR NOT LLVM_LINK )
- message( FATAL_ERROR "toolchain incomplete!" )
+ message( FATAL_ERROR "libclc toolchain incomplete!" )
endif()
list( SORT LIBCLC_TARGETS_TO_BUILD )
@@ -182,7 +181,7 @@ add_custom_target( "clspv-generate_convert.cl" DEPENDS clspv-convert.cl )
enable_testing()
foreach( t ${LIBCLC_TARGETS_TO_BUILD} )
- message( "BUILDING ${t}" )
+ message( STATUS "libclc target '${t}' is enabled" )
string( REPLACE "-" ";" TRIPLE ${t} )
list( GET TRIPLE 0 ARCH )
list( GET TRIPLE 1 VENDOR )
@@ -265,7 +264,7 @@ foreach( t ${LIBCLC_TARGETS_TO_BUILD} )
set( mcpu "-mcpu=${d}" )
set( arch_suffix "${d}-${t}" )
endif()
- message( " DEVICE: ${d} ( ${${d}_aliases} )" )
+ message( STATUS " device: ${d} ( ${${d}_aliases} )" )
if ( ${ARCH} STREQUAL "spirv" OR ${ARCH} STREQUAL "spirv64" )
if( ${ARCH} STREQUAL "spirv" )
diff --git a/libclc/cmake/CMakeCLCInformation.cmake b/libclc/cmake/CMakeCLCInformation.cmake
index 6eecf4edf0e7..95327e443972 100644
--- a/libclc/cmake/CMakeCLCInformation.cmake
+++ b/libclc/cmake/CMakeCLCInformation.cmake
@@ -9,3 +9,4 @@ if(NOT CMAKE_CLC_CREATE_STATIC_LIBRARY)
endif()
set(CMAKE_INCLUDE_FLAG_CLC "-I")
+set(CMAKE_DEPFILE_FLAGS_CLC "-MD -MT <DEP_TARGET> -MF <DEP_FILE>")
diff --git a/libcxx/benchmarks/CMakeLists.txt b/libcxx/benchmarks/CMakeLists.txt
index 3dec6faea13a..387e013afeb6 100644
--- a/libcxx/benchmarks/CMakeLists.txt
+++ b/libcxx/benchmarks/CMakeLists.txt
@@ -183,6 +183,7 @@ set(BENCHMARK_TESTS
algorithms/make_heap_then_sort_heap.bench.cpp
algorithms/min.bench.cpp
algorithms/min_max_element.bench.cpp
+ algorithms/mismatch.bench.cpp
algorithms/pop_heap.bench.cpp
algorithms/pstl.stable_sort.bench.cpp
algorithms/push_heap.bench.cpp
diff --git a/libcxx/benchmarks/algorithms/mismatch.bench.cpp b/libcxx/benchmarks/algorithms/mismatch.bench.cpp
new file mode 100644
index 000000000000..06289068bb04
--- /dev/null
+++ b/libcxx/benchmarks/algorithms/mismatch.bench.cpp
@@ -0,0 +1,40 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <algorithm>
+#include <benchmark/benchmark.h>
+#include <random>
+
+void BenchmarkSizes(benchmark::internal::Benchmark* Benchmark) {
+ Benchmark->DenseRange(1, 8);
+ for (size_t i = 16; i != 1 << 20; i *= 2) {
+ Benchmark->Arg(i - 1);
+ Benchmark->Arg(i);
+ Benchmark->Arg(i + 1);
+ }
+}
+
+// TODO: Look into benchmarking aligned and unaligned memory explicitly
+// (currently things happen to be aligned because they are malloced that way)
+template <class T>
+static void bm_mismatch(benchmark::State& state) {
+ std::vector<T> vec1(state.range(), '1');
+ std::vector<T> vec2(state.range(), '1');
+ std::mt19937_64 rng(std::random_device{}());
+
+ vec1.back() = '2';
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(vec1);
+ benchmark::DoNotOptimize(std::mismatch(vec1.begin(), vec1.end(), vec2.begin()));
+ }
+}
+BENCHMARK(bm_mismatch<char>)->Apply(BenchmarkSizes);
+BENCHMARK(bm_mismatch<short>)->Apply(BenchmarkSizes);
+BENCHMARK(bm_mismatch<int>)->Apply(BenchmarkSizes);
+
+BENCHMARK_MAIN();
diff --git a/libcxx/docs/DesignDocs/NodiscardPolicy.rst b/libcxx/docs/DesignDocs/NodiscardPolicy.rst
new file mode 100644
index 000000000000..afbb18b0096d
--- /dev/null
+++ b/libcxx/docs/DesignDocs/NodiscardPolicy.rst
@@ -0,0 +1,42 @@
+===================================================
+Guidelines for applying ``[[nodiscard]]`` in libc++
+===================================================
+
+Libc++ adds ``[[nodiscard]]`` to functions in a lot of places. The standards
+committee has decided to not have a recommended practice where to put them, so
+this document lists where ``[[nodiscard]]`` should be applied in libc++.
+
+When should ``[[nodiscard]]`` be added to functions?
+====================================================
+
+``[[nodiscard]]`` should be applied to functions
+
+- where discarding the return value is most likely a correctness issue.
+ For example a locking constructor in ``unique_lock``.
+
+- where discarding the return value likely points to the user wanting to do
+ something different. For example ``vector::empty()``, which probably should
+ have been ``vector::clear()``.
+
+ This can help spotting bugs easily which otherwise may take a very long time
+ to find.
+
+- which return a constant. For example ``numeric_limits::min()``.
+- which only observe a value. For example ``string::size()``.
+
+ Code that discards values from these kinds of functions is dead code. It can
+ either be removed, or the programmer meant to do something different.
+
+- where discarding the value is most likely a misuse of the function. For
+ example ``find``.
+
+ This protects programmers from assuming too much about how the internals of
+ a function work, making code more robust in the presence of future
+ optimizations.
+
+What should be done when adding ``[[nodiscard]]`` to a function?
+================================================================
+
+Applications of ``[[nodiscard]]`` are code like any other code, so we aim to
+test them. This can be done with a ``.verify.cpp`` test. Many examples are
+available. Just look for tests with the suffix ``.nodiscard.verify.cpp``.
diff --git a/libcxx/docs/ReleaseNotes/19.rst b/libcxx/docs/ReleaseNotes/19.rst
index c70ae477fafc..dd39c1bbbc78 100644
--- a/libcxx/docs/ReleaseNotes/19.rst
+++ b/libcxx/docs/ReleaseNotes/19.rst
@@ -51,6 +51,8 @@ Improvements and New Features
- The performance of growing ``std::vector`` has been improved for trivially relocatable types.
- The performance of ``ranges::fill`` and ``ranges::fill_n`` has been improved for ``vector<bool>::iterator``\s,
resulting in a performance increase of up to 1400x.
+- The ``std::mismatch`` algorithm has been optimized for integral types, which can lead up to 40x performance
+ improvements.
Deprecations and Removals
-------------------------
@@ -70,7 +72,8 @@ Deprecations and Removals
- The ``_LIBCPP_ENABLE_NARROWING_CONVERSIONS_IN_VARIANT`` macro that changed the behavior for narrowing conversions
in ``std::variant`` has been removed in LLVM 19.
-- TODO: The ``_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS`` macro has been removed in LLVM 19.
+- The ``_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS`` and ``_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION``
+ macros have been removed in LLVM 19.
- TODO: The ``_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES`` and ``_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES`` macros have
been removed in LLVM 19. C++17 and C++20 removed features can still be re-enabled individually.
diff --git a/libcxx/docs/Status/Cxx23Issues.csv b/libcxx/docs/Status/Cxx23Issues.csv
index a10319235006..ebdc4a745c9f 100644
--- a/libcxx/docs/Status/Cxx23Issues.csv
+++ b/libcxx/docs/Status/Cxx23Issues.csv
@@ -77,7 +77,7 @@
`3523 <https://wg21.link/LWG3523>`__,"``iota_view::sentinel`` is not always ``iota_view``'s sentinel","June 2021","","","|ranges|"
`3526 <https://wg21.link/LWG3526>`__,"Return types of ``uses_allocator_construction_args`` unspecified","June 2021","",""
`3527 <https://wg21.link/LWG3527>`__,"``uses_allocator_construction_args`` handles rvalue pairs of rvalue references incorrectly","June 2021","",""
-`3528 <https://wg21.link/LWG3528>`__,"``make_from_tuple`` can perform (the equivalent of) a C-style cast","June 2021","",""
+`3528 <https://wg21.link/LWG3528>`__,"``make_from_tuple`` can perform (the equivalent of) a C-style cast","June 2021","|Complete|","19.0"
`3529 <https://wg21.link/LWG3529>`__,"``priority_queue(first, last)`` should construct ``c`` with ``(first, last)``","June 2021","|Complete|","14.0"
`3530 <https://wg21.link/LWG3530>`__,"``BUILTIN-PTR-MEOW`` should not opt the type out of syntactic checks","June 2021","",""
`3532 <https://wg21.link/LWG3532>`__,"``split_view<V, P>::inner-iterator<true>::operator++(int)`` should depend on ``Base``","June 2021","","","|ranges|"
@@ -295,7 +295,7 @@
"`3847 <https://wg21.link/LWG3847>`__","``ranges::to`` can still return views","February 2023","|Complete|","17.0","|ranges|"
"`3862 <https://wg21.link/LWG3862>`__","``basic_const_iterator``'s ``common_type`` specialization is underconstrained","February 2023","","",""
"`3865 <https://wg21.link/LWG3865>`__","Sorting a range of ``pairs``","February 2023","|Complete|","17.0","|ranges|"
-"`3869 <https://wg21.link/LWG3869>`__","Deprecate ``std::errc`` constants related to UNIX STREAMS","February 2023","","",""
+"`3869 <https://wg21.link/LWG3869>`__","Deprecate ``std::errc`` constants related to UNIX STREAMS","February 2023","|Complete|","19.0",""
"`3870 <https://wg21.link/LWG3870>`__","Remove ``voidify``","February 2023","","",""
"`3871 <https://wg21.link/LWG3871>`__","Adjust note about ``terminate``","February 2023","","",""
"`3872 <https://wg21.link/LWG3872>`__","``basic_const_iterator`` should have custom ``iter_move``","February 2023","","",""
diff --git a/libcxx/docs/UsingLibcxx.rst b/libcxx/docs/UsingLibcxx.rst
index 3b1be286c169..ac12b0b96950 100644
--- a/libcxx/docs/UsingLibcxx.rst
+++ b/libcxx/docs/UsingLibcxx.rst
@@ -244,18 +244,6 @@ C++20 Specific Configuration Macros
This macro is deprecated and will be removed in LLVM-19. Use the
individual macros listed below.
-**_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS**:
- This macro is used to re-enable redundant members of `allocator<T>`,
- including `pointer`, `reference`, `rebind`, `address`, `max_size`,
- `construct`, `destroy`, and the two-argument overload of `allocate`.
- This macro has been deprecated and will be removed in LLVM-19.
-
-**_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION**:
- This macro is used to re-enable the library-provided specializations of
- `allocator<void>` and `allocator<const void>`.
- Use it in conjunction with `_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS`
- to ensure that removed members of `allocator<void>` can be accessed.
-
**_LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS**:
This macro is used to re-enable the `argument_type`, `result_type`,
`first_argument_type`, and `second_argument_type` members of class
diff --git a/libcxx/docs/index.rst b/libcxx/docs/index.rst
index aa1bd4b83b26..2a7e47dfe6d8 100644
--- a/libcxx/docs/index.rst
+++ b/libcxx/docs/index.rst
@@ -189,6 +189,7 @@ Design Documents
DesignDocs/FeatureTestMacros
DesignDocs/FileTimeType
DesignDocs/HeaderRemovalPolicy
+ DesignDocs/NodiscardPolicy
DesignDocs/NoexceptPolicy
DesignDocs/PSTLIntegration
DesignDocs/ThreadingSupportAPI
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 6ed8d21d98a1..07b5e974eaf5 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -217,6 +217,7 @@ set(files
__algorithm/shift_right.h
__algorithm/shuffle.h
__algorithm/sift_down.h
+ __algorithm/simd_utils.h
__algorithm/sort.h
__algorithm/sort_heap.h
__algorithm/stable_partition.h
@@ -429,22 +430,27 @@ set(files
__fwd/array.h
__fwd/bit_reference.h
__fwd/complex.h
+ __fwd/deque.h
__fwd/format.h
__fwd/fstream.h
__fwd/functional.h
__fwd/ios.h
__fwd/istream.h
__fwd/mdspan.h
+ __fwd/memory.h
__fwd/memory_resource.h
__fwd/ostream.h
__fwd/pair.h
+ __fwd/queue.h
__fwd/span.h
__fwd/sstream.h
+ __fwd/stack.h
__fwd/streambuf.h
__fwd/string.h
__fwd/string_view.h
__fwd/subrange.h
__fwd/tuple.h
+ __fwd/vector.h
__hash_table
__ios/fpos.h
__iterator/access.h
diff --git a/libcxx/include/__algorithm/copy.h b/libcxx/include/__algorithm/copy.h
index 4c3815405af0..0890b895f540 100644
--- a/libcxx/include/__algorithm/copy.h
+++ b/libcxx/include/__algorithm/copy.h
@@ -32,7 +32,7 @@ template <class, class _InIter, class _Sent, class _OutIter>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __copy(_InIter, _Sent, _OutIter);
template <class _AlgPolicy>
-struct __copy_loop {
+struct __copy_impl {
template <class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
operator()(_InIter __first, _Sent __last, _OutIter __result) const {
@@ -94,9 +94,7 @@ struct __copy_loop {
__local_first = _Traits::__begin(++__segment_iterator);
}
}
-};
-struct __copy_trivial {
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_copy_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>
@@ -108,7 +106,7 @@ struct __copy_trivial {
template <class _AlgPolicy, class _InIter, class _Sent, class _OutIter>
pair<_InIter, _OutIter> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14
__copy(_InIter __first, _Sent __last, _OutIter __result) {
- return std::__dispatch_copy_or_move<_AlgPolicy, __copy_loop<_AlgPolicy>, __copy_trivial>(
+ return std::__copy_move_unwrap_iters<__copy_impl<_AlgPolicy> >(
std::move(__first), std::move(__last), std::move(__result));
}
diff --git a/libcxx/include/__algorithm/copy_backward.h b/libcxx/include/__algorithm/copy_backward.h
index 591dd21e2b03..73dc846a975a 100644
--- a/libcxx/include/__algorithm/copy_backward.h
+++ b/libcxx/include/__algorithm/copy_backward.h
@@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter>
__copy_backward(_InIter __first, _Sent __last, _OutIter __result);
template <class _AlgPolicy>
-struct __copy_backward_loop {
+struct __copy_backward_impl {
template <class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
operator()(_InIter __first, _Sent __last, _OutIter __result) const {
@@ -104,9 +104,7 @@ struct __copy_backward_loop {
__local_last = _Traits::__end(__segment_iterator);
}
}
-};
-struct __copy_backward_trivial {
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_copy_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>
@@ -118,7 +116,7 @@ struct __copy_backward_trivial {
template <class _AlgPolicy, class _BidirectionalIterator1, class _Sentinel, class _BidirectionalIterator2>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1, _BidirectionalIterator2>
__copy_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result) {
- return std::__dispatch_copy_or_move<_AlgPolicy, __copy_backward_loop<_AlgPolicy>, __copy_backward_trivial>(
+ return std::__copy_move_unwrap_iters<__copy_backward_impl<_AlgPolicy> >(
std::move(__first), std::move(__last), std::move(__result));
}
diff --git a/libcxx/include/__algorithm/copy_move_common.h b/libcxx/include/__algorithm/copy_move_common.h
index 845967b05038..12a26c6d6a64 100644
--- a/libcxx/include/__algorithm/copy_move_common.h
+++ b/libcxx/include/__algorithm/copy_move_common.h
@@ -81,30 +81,17 @@ __copy_backward_trivial_impl(_In* __first, _In* __last, _Out* __result) {
// Iterator unwrapping and dispatching to the correct overload.
-template <class _F1, class _F2>
-struct __overload : _F1, _F2 {
- using _F1::operator();
- using _F2::operator();
-};
-
-template <class _InIter, class _Sent, class _OutIter, class = void>
-struct __can_rewrap : false_type {};
-
-template <class _InIter, class _Sent, class _OutIter>
-struct __can_rewrap<_InIter,
- _Sent,
- _OutIter,
- // Note that sentinels are always copy-constructible.
- __enable_if_t< is_copy_constructible<_InIter>::value && is_copy_constructible<_OutIter>::value > >
- : true_type {};
+template <class _InIter, class _OutIter>
+struct __can_rewrap
+ : integral_constant<bool, is_copy_constructible<_InIter>::value && is_copy_constructible<_OutIter>::value> {};
template <class _Algorithm,
class _InIter,
class _Sent,
class _OutIter,
- __enable_if_t<__can_rewrap<_InIter, _Sent, _OutIter>::value, int> = 0>
+ __enable_if_t<__can_rewrap<_InIter, _OutIter>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter>
-__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) {
+__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) {
auto __range = std::__unwrap_range(__first, std::move(__last));
auto __result = _Algorithm()(std::move(__range.first), std::move(__range.second), std::__unwrap_iter(__out_first));
return std::make_pair(std::__rewrap_range<_Sent>(std::move(__first), std::move(__result.first)),
@@ -115,24 +102,12 @@ template <class _Algorithm,
class _InIter,
class _Sent,
class _OutIter,
- __enable_if_t<!__can_rewrap<_InIter, _Sent, _OutIter>::value, int> = 0>
+ __enable_if_t<!__can_rewrap<_InIter, _OutIter>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter>
-__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) {
+__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) {
return _Algorithm()(std::move(__first), std::move(__last), std::move(__out_first));
}
-template <class _AlgPolicy,
- class _NaiveAlgorithm,
- class _OptimizedAlgorithm,
- class _InIter,
- class _Sent,
- class _OutIter>
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter>
-__dispatch_copy_or_move(_InIter __first, _Sent __last, _OutIter __out_first) {
- using _Algorithm = __overload<_NaiveAlgorithm, _OptimizedAlgorithm>;
- return std::__unwrap_and_dispatch<_Algorithm>(std::move(__first), std::move(__last), std::move(__out_first));
-}
-
_LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
diff --git a/libcxx/include/__algorithm/mismatch.h b/libcxx/include/__algorithm/mismatch.h
index d345b6048a7e..d933a84cada9 100644
--- a/libcxx/include/__algorithm/mismatch.h
+++ b/libcxx/include/__algorithm/mismatch.h
@@ -11,23 +11,120 @@
#define _LIBCPP___ALGORITHM_MISMATCH_H
#include <__algorithm/comp.h>
+#include <__algorithm/simd_utils.h>
+#include <__algorithm/unwrap_iter.h>
#include <__config>
-#include <__iterator/iterator_traits.h>
+#include <__functional/identity.h>
+#include <__type_traits/invoke.h>
+#include <__type_traits/is_constant_evaluated.h>
+#include <__type_traits/is_equality_comparable.h>
+#include <__type_traits/operation_traits.h>
+#include <__utility/move.h>
#include <__utility/pair.h>
+#include <__utility/unreachable.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
_LIBCPP_BEGIN_NAMESPACE_STD
+template <class _Iter1, class _Sent1, class _Iter2, class _Pred, class _Proj1, class _Proj2>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2>
+__mismatch_loop(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) {
+ while (__first1 != __last1) {
+ if (!std::__invoke(__pred, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2)))
+ break;
+ ++__first1;
+ ++__first2;
+ }
+ return std::make_pair(std::move(__first1), std::move(__first2));
+}
+
+template <class _Iter1, class _Sent1, class _Iter2, class _Pred, class _Proj1, class _Proj2>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2>
+__mismatch(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) {
+ return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2);
+}
+
+#if _LIBCPP_VECTORIZE_ALGORITHMS
+
+template <class _Tp,
+ class _Pred,
+ class _Proj1,
+ class _Proj2,
+ __enable_if_t<is_integral<_Tp>::value && __desugars_to<__equal_tag, _Pred, _Tp, _Tp>::value &&
+ __is_identity<_Proj1>::value && __is_identity<_Proj2>::value,
+ int> = 0>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*>
+__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) {
+ constexpr size_t __unroll_count = 4;
+ constexpr size_t __vec_size = __native_vector_size<_Tp>;
+ using __vec = __simd_vector<_Tp, __vec_size>;
+
+ if (!__libcpp_is_constant_evaluated()) {
+ auto __orig_first1 = __first1;
+ auto __last2 = __first2 + (__last1 - __first1);
+ while (static_cast<size_t>(__last1 - __first1) >= __unroll_count * __vec_size) [[__unlikely__]] {
+ __vec __lhs[__unroll_count];
+ __vec __rhs[__unroll_count];
+
+ for (size_t __i = 0; __i != __unroll_count; ++__i) {
+ __lhs[__i] = std::__load_vector<__vec>(__first1 + __i * __vec_size);
+ __rhs[__i] = std::__load_vector<__vec>(__first2 + __i * __vec_size);
+ }
+
+ for (size_t __i = 0; __i != __unroll_count; ++__i) {
+ if (auto __cmp_res = __lhs[__i] == __rhs[__i]; !std::__all_of(__cmp_res)) {
+ auto __offset = __i * __vec_size + std::__find_first_not_set(__cmp_res);
+ return {__first1 + __offset, __first2 + __offset};
+ }
+ }
+
+ __first1 += __unroll_count * __vec_size;
+ __first2 += __unroll_count * __vec_size;
+ }
+
+ // check the remaining 0-3 vectors
+ while (static_cast<size_t>(__last1 - __first1) >= __vec_size) {
+ if (auto __cmp_res = std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2);
+ !std::__all_of(__cmp_res)) {
+ auto __offset = std::__find_first_not_set(__cmp_res);
+ return {__first1 + __offset, __first2 + __offset};
+ }
+ __first1 += __vec_size;
+ __first2 += __vec_size;
+ }
+
+ if (__last1 - __first1 == 0)
+ return {__first1, __first2};
+
+ // Check if we can load elements in front of the current pointer. If that's the case load a vector at
+ // (last - vector_size) to check the remaining elements
+ if (static_cast<size_t>(__first1 - __orig_first1) >= __vec_size) {
+ __first1 = __last1 - __vec_size;
+ __first2 = __last2 - __vec_size;
+ auto __offset =
+ std::__find_first_not_set(std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2));
+ return {__first1 + __offset, __first2 + __offset};
+ } // else loop over the elements individually
+ }
+
+ return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2);
+}
+
+#endif // _LIBCPP_VECTORIZE_ALGORITHMS
+
template <class _InputIterator1, class _InputIterator2, class _BinaryPredicate>
_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2>
mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) {
- for (; __first1 != __last1; ++__first1, (void)++__first2)
- if (!__pred(*__first1, *__first2))
- break;
- return pair<_InputIterator1, _InputIterator2>(__first1, __first2);
+ __identity __proj;
+ auto __res = std::__mismatch(
+ std::__unwrap_iter(__first1), std::__unwrap_iter(__last1), std::__unwrap_iter(__first2), __pred, __proj, __proj);
+ return std::make_pair(std::__rewrap_iter(__first1, __res.first), std::__rewrap_iter(__first2, __res.second));
}
template <class _InputIterator1, class _InputIterator2>
@@ -59,4 +156,6 @@ mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __fi
_LIBCPP_END_NAMESPACE_STD
+_LIBCPP_POP_MACROS
+
#endif // _LIBCPP___ALGORITHM_MISMATCH_H
diff --git a/libcxx/include/__algorithm/move.h b/libcxx/include/__algorithm/move.h
index bf574b527409..1716d43e2a61 100644
--- a/libcxx/include/__algorithm/move.h
+++ b/libcxx/include/__algorithm/move.h
@@ -34,7 +34,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIte
__move(_InIter __first, _Sent __last, _OutIter __result);
template <class _AlgPolicy>
-struct __move_loop {
+struct __move_impl {
template <class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
operator()(_InIter __first, _Sent __last, _OutIter __result) const {
@@ -95,9 +95,7 @@ struct __move_loop {
__local_first = _Traits::__begin(++__segment_iterator);
}
}
-};
-struct __move_trivial {
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_move_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>
@@ -109,7 +107,7 @@ struct __move_trivial {
template <class _AlgPolicy, class _InIter, class _Sent, class _OutIter>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
__move(_InIter __first, _Sent __last, _OutIter __result) {
- return std::__dispatch_copy_or_move<_AlgPolicy, __move_loop<_AlgPolicy>, __move_trivial>(
+ return std::__copy_move_unwrap_iters<__move_impl<_AlgPolicy> >(
std::move(__first), std::move(__last), std::move(__result));
}
diff --git a/libcxx/include/__algorithm/move_backward.h b/libcxx/include/__algorithm/move_backward.h
index 6bb7c91d66c7..4beb7bdbaac0 100644
--- a/libcxx/include/__algorithm/move_backward.h
+++ b/libcxx/include/__algorithm/move_backward.h
@@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1
__move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result);
template <class _AlgPolicy>
-struct __move_backward_loop {
+struct __move_backward_impl {
template <class _InIter, class _Sent, class _OutIter>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter>
operator()(_InIter __first, _Sent __last, _OutIter __result) const {
@@ -104,9 +104,7 @@ struct __move_backward_loop {
__local_last = _Traits::__end(--__segment_iterator);
}
}
-};
-struct __move_backward_trivial {
// At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer.
template <class _In, class _Out, __enable_if_t<__can_lower_move_assignment_to_memmove<_In, _Out>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*>
@@ -122,7 +120,7 @@ __move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _Bidirectiona
std::is_copy_constructible<_BidirectionalIterator1>::value,
"Iterators must be copy constructible.");
- return std::__dispatch_copy_or_move<_AlgPolicy, __move_backward_loop<_AlgPolicy>, __move_backward_trivial>(
+ return std::__copy_move_unwrap_iters<__move_backward_impl<_AlgPolicy> >(
std::move(__first), std::move(__last), std::move(__result));
}
diff --git a/libcxx/include/__algorithm/ranges_ends_with.h b/libcxx/include/__algorithm/ranges_ends_with.h
index c2a3cae9f3b1..bb01918326b8 100644
--- a/libcxx/include/__algorithm/ranges_ends_with.h
+++ b/libcxx/include/__algorithm/ranges_ends_with.h
@@ -39,7 +39,7 @@ namespace ranges {
namespace __ends_with {
struct __fn {
template <class _Iter1, class _Sent1, class _Iter2, class _Sent2, class _Pred, class _Proj1, class _Proj2>
- static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl_bidirectional(
+ _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl_bidirectional(
_Iter1 __first1,
_Sent1 __last1,
_Iter2 __first2,
@@ -56,7 +56,7 @@ struct __fn {
}
template <class _Iter1, class _Sent1, class _Iter2, class _Sent2, class _Pred, class _Proj1, class _Proj2>
- static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl(
+ _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl(
_Iter1 __first1,
_Sent1 __last1,
_Iter2 __first2,
@@ -65,7 +65,7 @@ struct __fn {
_Proj1& __proj1,
_Proj2& __proj2) {
if constexpr (std::bidirectional_iterator<_Sent1> && std::bidirectional_iterator<_Sent2> &&
- (!std::random_access_iterator<_Sent1>)&&(!std::random_access_iterator<_Sent2>)) {
+ (!std::random_access_iterator<_Sent1>) && (!std::random_access_iterator<_Sent2>)) {
return __ends_with_fn_impl_bidirectional(__first1, __last1, __first2, __last2, __pred, __proj1, __proj2);
} else {
diff --git a/libcxx/include/__algorithm/ranges_starts_with.h b/libcxx/include/__algorithm/ranges_starts_with.h
index 90e184aa9bcc..7ba8af13a8d1 100644
--- a/libcxx/include/__algorithm/ranges_starts_with.h
+++ b/libcxx/include/__algorithm/ranges_starts_with.h
@@ -42,14 +42,14 @@ struct __fn {
class _Proj1 = identity,
class _Proj2 = identity>
requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr bool operator()(
_Iter1 __first1,
_Sent1 __last1,
_Iter2 __first2,
_Sent2 __last2,
_Pred __pred = {},
_Proj1 __proj1 = {},
- _Proj2 __proj2 = {}) const {
+ _Proj2 __proj2 = {}) {
return __mismatch::__fn::__go(
std::move(__first1),
std::move(__last1),
@@ -67,8 +67,8 @@ struct __fn {
class _Proj1 = identity,
class _Proj2 = identity>
requires indirectly_comparable<iterator_t<_Range1>, iterator_t<_Range2>, _Pred, _Proj1, _Proj2>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(
- _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const {
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr bool
+ operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) {
return __mismatch::__fn::__go(
ranges::begin(__range1),
ranges::end(__range1),
diff --git a/libcxx/include/__algorithm/simd_utils.h b/libcxx/include/__algorithm/simd_utils.h
new file mode 100644
index 000000000000..1aedb3db010f
--- /dev/null
+++ b/libcxx/include/__algorithm/simd_utils.h
@@ -0,0 +1,123 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ALGORITHM_SIMD_UTILS_H
+#define _LIBCPP___ALGORITHM_SIMD_UTILS_H
+
+#include <__bit/bit_cast.h>
+#include <__bit/countr.h>
+#include <__config>
+#include <__type_traits/is_arithmetic.h>
+#include <__type_traits/is_same.h>
+#include <__utility/integer_sequence.h>
+#include <cstddef>
+#include <cstdint>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+// TODO: Find out how altivec changes things and allow vectorizations there too.
+#if _LIBCPP_STD_VER >= 14 && defined(_LIBCPP_CLANG_VER) && _LIBCPP_CLANG_VER >= 1700 && !defined(__ALTIVEC__)
+# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 1
+#else
+# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 0
+#endif
+
+#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS && !defined(__OPTIMIZE_SIZE__)
+# define _LIBCPP_VECTORIZE_ALGORITHMS 1
+#else
+# define _LIBCPP_VECTORIZE_ALGORITHMS 0
+#endif
+
+#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// This isn't specialized for 64 byte vectors on purpose. They have the potential to significantly reduce performance
+// in mixed simd/non-simd workloads and don't provide any performance improvement for currently vectorized algorithms
+// as far as benchmarks are concerned.
+# if defined(__AVX__)
+template <class _Tp>
+inline constexpr size_t __native_vector_size = 32 / sizeof(_Tp);
+# elif defined(__SSE__) || defined(__ARM_NEON__)
+template <class _Tp>
+inline constexpr size_t __native_vector_size = 16 / sizeof(_Tp);
+# elif defined(__MMX__)
+template <class _Tp>
+inline constexpr size_t __native_vector_size = 8 / sizeof(_Tp);
+# else
+template <class _Tp>
+inline constexpr size_t __native_vector_size = 1;
+# endif
+
+template <class _ArithmeticT, size_t _Np>
+using __simd_vector __attribute__((__ext_vector_type__(_Np))) = _ArithmeticT;
+
+template <class _VecT>
+inline constexpr size_t __simd_vector_size_v = []<bool _False = false>() -> size_t {
+ static_assert(_False, "Not a vector!");
+}();
+
+template <class _Tp, size_t _Np>
+inline constexpr size_t __simd_vector_size_v<__simd_vector<_Tp, _Np>> = _Np;
+
+template <class _Tp, size_t _Np>
+_LIBCPP_HIDE_FROM_ABI _Tp __simd_vector_underlying_type_impl(__simd_vector<_Tp, _Np>) {
+ return _Tp{};
+}
+
+template <class _VecT>
+using __simd_vector_underlying_type_t = decltype(std::__simd_vector_underlying_type_impl(_VecT{}));
+
+// This isn't inlined without always_inline when loading chars.
+template <class _VecT, class _Tp>
+_LIBCPP_NODISCARD _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI _VecT __load_vector(const _Tp* __ptr) noexcept {
+ return [=]<size_t... _Indices>(index_sequence<_Indices...>) _LIBCPP_ALWAYS_INLINE noexcept {
+ return _VecT{__ptr[_Indices]...};
+ }(make_index_sequence<__simd_vector_size_v<_VecT>>{});
+}
+
+template <class _Tp, size_t _Np>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool __all_of(__simd_vector<_Tp, _Np> __vec) noexcept {
+ return __builtin_reduce_and(__builtin_convertvector(__vec, __simd_vector<bool, _Np>));
+}
+
+template <class _Tp, size_t _Np>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_set(__simd_vector<_Tp, _Np> __vec) noexcept {
+ using __mask_vec = __simd_vector<bool, _Np>;
+
+ // This has MSan disabled du to https://github.com/llvm/llvm-project/issues/85876
+ auto __impl = [&]<class _MaskT>(_MaskT) _LIBCPP_NO_SANITIZE("memory") noexcept {
+ return std::__countr_zero(__builtin_bit_cast(_MaskT, __builtin_convertvector(__vec, __mask_vec)));
+ };
+
+ if constexpr (sizeof(__mask_vec) == sizeof(uint8_t)) {
+ return __impl(uint8_t{});
+ } else if constexpr (sizeof(__mask_vec) == sizeof(uint16_t)) {
+ return __impl(uint16_t{});
+ } else if constexpr (sizeof(__mask_vec) == sizeof(uint32_t)) {
+ return __impl(uint32_t{});
+ } else if constexpr (sizeof(__mask_vec) == sizeof(uint64_t)) {
+ return __impl(uint64_t{});
+ } else {
+ static_assert(sizeof(__mask_vec) == 0, "unexpected required size for mask integer type");
+ return 0;
+ }
+}
+
+template <class _Tp, size_t _Np>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_not_set(__simd_vector<_Tp, _Np> __vec) noexcept {
+ return std::__find_first_set(~__vec);
+}
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS
+
+#endif // _LIBCPP___ALGORITHM_SIMD_UTILS_H
diff --git a/libcxx/include/__bit/bit_cast.h b/libcxx/include/__bit/bit_cast.h
index f20b39ae748b..6298810f3733 100644
--- a/libcxx/include/__bit/bit_cast.h
+++ b/libcxx/include/__bit/bit_cast.h
@@ -19,6 +19,15 @@
_LIBCPP_BEGIN_NAMESPACE_STD
+#ifndef _LIBCPP_CXX03_LANG
+
+template <class _ToType, class _FromType>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr _ToType __bit_cast(const _FromType& __from) noexcept {
+ return __builtin_bit_cast(_ToType, __from);
+}
+
+#endif // _LIBCPP_CXX03_LANG
+
#if _LIBCPP_STD_VER >= 20
template <class _ToType, class _FromType>
diff --git a/libcxx/include/__bit/countr.h b/libcxx/include/__bit/countr.h
index 0cc679f87a99..b6b3ac52ca4e 100644
--- a/libcxx/include/__bit/countr.h
+++ b/libcxx/include/__bit/countr.h
@@ -35,10 +35,8 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_ct
return __builtin_ctzll(__x);
}
-#if _LIBCPP_STD_VER >= 20
-
-template <__libcpp_unsigned_integer _Tp>
-_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept {
+template <class _Tp>
+_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countr_zero(_Tp __t) _NOEXCEPT {
if (__t == 0)
return numeric_limits<_Tp>::digits;
@@ -59,6 +57,13 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) n
}
}
+#if _LIBCPP_STD_VER >= 20
+
+template <__libcpp_unsigned_integer _Tp>
+_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept {
+ return std::__countr_zero(__t);
+}
+
template <__libcpp_unsigned_integer _Tp>
_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept {
return __t != numeric_limits<_Tp>::max() ? std::countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits;
diff --git a/libcxx/include/__chrono/tzdb_list.h b/libcxx/include/__chrono/tzdb_list.h
index 112e04ff2ee6..e8aaf31e3631 100644
--- a/libcxx/include/__chrono/tzdb_list.h
+++ b/libcxx/include/__chrono/tzdb_list.h
@@ -52,19 +52,29 @@ public:
using const_iterator = forward_list<tzdb>::const_iterator;
- _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const tzdb& front() const noexcept;
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI const tzdb& front() const noexcept { return __front(); }
- _LIBCPP_EXPORTED_FROM_ABI const_iterator erase_after(const_iterator __p);
+ _LIBCPP_HIDE_FROM_ABI const_iterator erase_after(const_iterator __p) { return __erase_after(__p); }
- _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator begin() const noexcept;
- _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator end() const noexcept;
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI const_iterator begin() const noexcept { return __begin(); }
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI const_iterator end() const noexcept { return __end(); }
- _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cbegin() const noexcept;
- _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cend() const noexcept;
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const noexcept { return __cbegin(); }
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI const_iterator cend() const noexcept { return __cend(); }
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI __impl& __implementation() { return *__impl_; }
private:
+ [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const tzdb& __front() const noexcept;
+
+ _LIBCPP_EXPORTED_FROM_ABI const_iterator __erase_after(const_iterator __p);
+
+ [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __begin() const noexcept;
+ [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __end() const noexcept;
+
+ [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cbegin() const noexcept;
+ [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cend() const noexcept;
+
__impl* __impl_;
};
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 11e13e0c2498..8550b1da4a27 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -805,6 +805,12 @@ typedef __char32_t char32_t;
// the implementation of a virtual function in an ABI-incompatible way in the first place,
// since that would be an ABI break anyway. Hence, the lack of ABI tag should not be noticeable.
//
+// The macro can be applied to record and enum types. When the tagged type is nested in
+// a record this "parent" record needs to have the macro too. Another use case for applying
+// this macro to records and unions is to apply an ABI tag to inline constexpr variables.
+// This can be useful for inline variables that are implementation details which are expected
+// to change in the future.
+//
// TODO: We provide a escape hatch with _LIBCPP_NO_ABI_TAG for folks who want to avoid increasing
// the length of symbols with an ABI tag. In practice, we should remove the escape hatch and
// use compression mangling instead, see https://github.com/itanium-cxx-abi/cxx-abi/issues/70.
@@ -838,21 +844,33 @@ typedef __char32_t char32_t;
# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++2b-extensions")
#endif
+// Clang modules take a significant compile time hit when pushing and popping diagnostics.
+// Since all the headers are marked as system headers in the modulemap, we can simply disable this
+// pushing and popping when building with clang modules.
+# if !__has_feature(modules)
+# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \
+ _LIBCPP_DIAGNOSTIC_PUSH \
+ _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++11-extensions") \
+ _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \
+ _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \
+ _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \
+ _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION \
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \
+ _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++23-extensions")
+# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS _LIBCPP_DIAGNOSTIC_POP
+# else
+# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS
+# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS
+# endif
+
// Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect.
// clang-format off
-# define _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_DIAGNOSTIC_PUSH \
- _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++11-extensions") \
- _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \
- _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \
- _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \
- _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION \
- _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \
- _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \
- _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \
- _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++23-extensions") \
+# define _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \
namespace _LIBCPP_TYPE_VISIBILITY_DEFAULT std { \
inline namespace _LIBCPP_ABI_NAMESPACE {
-# define _LIBCPP_END_NAMESPACE_STD }} _LIBCPP_DIAGNOSTIC_POP
+# define _LIBCPP_END_NAMESPACE_STD }} _LIBCPP_POP_EXTENSION_DIAGNOSTICS
# define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM _LIBCPP_BEGIN_NAMESPACE_STD \
inline namespace __fs { namespace filesystem {
@@ -1238,8 +1256,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c
# endif // _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES
# if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES)
-# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION
# define _LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS
# define _LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS
# define _LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR
diff --git a/libcxx/include/__format/container_adaptor.h b/libcxx/include/__format/container_adaptor.h
index ec806ef16bf5..9f49ca03bf4f 100644
--- a/libcxx/include/__format/container_adaptor.h
+++ b/libcxx/include/__format/container_adaptor.h
@@ -18,11 +18,11 @@
#include <__format/concepts.h>
#include <__format/formatter.h>
#include <__format/range_default_formatter.h>
+#include <__fwd/queue.h>
+#include <__fwd/stack.h>
#include <__ranges/ref_view.h>
#include <__type_traits/is_const.h>
#include <__type_traits/maybe_const.h>
-#include <queue>
-#include <stack>
_LIBCPP_BEGIN_NAMESPACE_STD
diff --git a/libcxx/include/__format/escaped_output_table.h b/libcxx/include/__format/escaped_output_table.h
index 495a2fbc7b03..e9f4a6e4f63f 100644
--- a/libcxx/include/__format/escaped_output_table.h
+++ b/libcxx/include/__format/escaped_output_table.h
@@ -110,7 +110,7 @@ namespace __escaped_output_table {
/// - bits [0, 10] The size of the range, allowing 2048 elements.
/// - bits [11, 31] The lower bound code point of the range. The upper bound of
/// the range is lower bound + size.
-inline constexpr uint32_t __entries[893] = {
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[893] = {
0x00000020,
0x0003f821,
0x00056800,
diff --git a/libcxx/include/__format/extended_grapheme_cluster_table.h b/libcxx/include/__format/extended_grapheme_cluster_table.h
index 9616dfecd604..48581d8a5dde 100644
--- a/libcxx/include/__format/extended_grapheme_cluster_table.h
+++ b/libcxx/include/__format/extended_grapheme_cluster_table.h
@@ -125,7 +125,7 @@ enum class __property : uint8_t {
/// following benchmark.
/// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp
// clang-format off
-inline constexpr uint32_t __entries[1496] = {
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[1496] = {
0x00000091,
0x00005005,
0x00005811,
diff --git a/libcxx/include/__format/parser_std_format_spec.h b/libcxx/include/__format/parser_std_format_spec.h
index a4b47abff40d..9818f37b518e 100644
--- a/libcxx/include/__format/parser_std_format_spec.h
+++ b/libcxx/include/__format/parser_std_format_spec.h
@@ -129,8 +129,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr uint32_t __substitute_arg_id(basic_format_arg<_C
///
/// They default to false so when a new field is added it needs to be opted in
/// explicitly.
-// TODO FMT Use an ABI tag for this struct.
-struct __fields {
+struct _LIBCPP_HIDE_FROM_ABI __fields {
uint16_t __sign_ : 1 {false};
uint16_t __alternate_form_ : 1 {false};
uint16_t __zero_padding_ : 1 {false};
diff --git a/libcxx/include/__format/width_estimation_table.h b/libcxx/include/__format/width_estimation_table.h
index cfb488975d57..6309483367f1 100644
--- a/libcxx/include/__format/width_estimation_table.h
+++ b/libcxx/include/__format/width_estimation_table.h
@@ -119,7 +119,7 @@ namespace __width_estimation_table {
/// - bits [0, 13] The size of the range, allowing 16384 elements.
/// - bits [14, 31] The lower bound code point of the range. The upper bound of
/// the range is lower bound + size.
-inline constexpr uint32_t __entries[108] = {
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[108] = {
0x0440005f /* 00001100 - 0000115f [ 96] */, //
0x08c68001 /* 0000231a - 0000231b [ 2] */, //
0x08ca4001 /* 00002329 - 0000232a [ 2] */, //
diff --git a/libcxx/include/__fwd/deque.h b/libcxx/include/__fwd/deque.h
new file mode 100644
index 000000000000..fd2fb5bb4b8e
--- /dev/null
+++ b/libcxx/include/__fwd/deque.h
@@ -0,0 +1,26 @@
+//===---------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FWD_DEQUE_H
+#define _LIBCPP___FWD_DEQUE_H
+
+#include <__config>
+#include <__fwd/memory.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp, class _Allocator = allocator<_Tp> >
+class _LIBCPP_TEMPLATE_VIS deque;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FWD_DEQUE_H
diff --git a/libcxx/include/__fwd/memory.h b/libcxx/include/__fwd/memory.h
new file mode 100644
index 000000000000..b9e151855ad7
--- /dev/null
+++ b/libcxx/include/__fwd/memory.h
@@ -0,0 +1,25 @@
+//===---------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FWD_MEMORY_H
+#define _LIBCPP___FWD_MEMORY_H
+
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp>
+class _LIBCPP_TEMPLATE_VIS allocator;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FWD_MEMORY_H
diff --git a/libcxx/include/__fwd/queue.h b/libcxx/include/__fwd/queue.h
new file mode 100644
index 000000000000..50d99ad9c29f
--- /dev/null
+++ b/libcxx/include/__fwd/queue.h
@@ -0,0 +1,31 @@
+//===---------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FWD_QUEUE_H
+#define _LIBCPP___FWD_QUEUE_H
+
+#include <__config>
+#include <__functional/operations.h>
+#include <__fwd/deque.h>
+#include <__fwd/vector.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp, class _Container = deque<_Tp> >
+class _LIBCPP_TEMPLATE_VIS queue;
+
+template <class _Tp, class _Container = vector<_Tp>, class _Compare = less<typename _Container::value_type> >
+class _LIBCPP_TEMPLATE_VIS priority_queue;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FWD_QUEUE_H
diff --git a/libcxx/include/__fwd/sstream.h b/libcxx/include/__fwd/sstream.h
index e2d46fbe1d9b..39a9c3faf1f8 100644
--- a/libcxx/include/__fwd/sstream.h
+++ b/libcxx/include/__fwd/sstream.h
@@ -10,6 +10,7 @@
#define _LIBCPP___FWD_SSTREAM_H
#include <__config>
+#include <__fwd/memory.h>
#include <__fwd/string.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/libcxx/include/__fwd/stack.h b/libcxx/include/__fwd/stack.h
new file mode 100644
index 000000000000..7dab6c1a4f4e
--- /dev/null
+++ b/libcxx/include/__fwd/stack.h
@@ -0,0 +1,26 @@
+//===---------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FWD_STACK_H
+#define _LIBCPP___FWD_STACK_H
+
+#include <__config>
+#include <__fwd/deque.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp, class _Container = deque<_Tp> >
+class _LIBCPP_TEMPLATE_VIS stack;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FWD_STACK_H
diff --git a/libcxx/include/__fwd/string.h b/libcxx/include/__fwd/string.h
index 032132374de5..320c4e4c8183 100644
--- a/libcxx/include/__fwd/string.h
+++ b/libcxx/include/__fwd/string.h
@@ -11,6 +11,7 @@
#include <__availability>
#include <__config>
+#include <__fwd/memory.h>
#include <__fwd/memory_resource.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -39,9 +40,6 @@ template <>
struct char_traits<wchar_t>;
#endif
-template <class _Tp>
-class _LIBCPP_TEMPLATE_VIS allocator;
-
template <class _CharT, class _Traits = char_traits<_CharT>, class _Allocator = allocator<_CharT> >
class _LIBCPP_TEMPLATE_VIS basic_string;
diff --git a/libcxx/include/__fwd/vector.h b/libcxx/include/__fwd/vector.h
new file mode 100644
index 000000000000..c9cc96137449
--- /dev/null
+++ b/libcxx/include/__fwd/vector.h
@@ -0,0 +1,26 @@
+//===---------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___FWD_VECTOR_H
+#define _LIBCPP___FWD_VECTOR_H
+
+#include <__config>
+#include <__fwd/memory.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp, class _Alloc = allocator<_Tp> >
+class _LIBCPP_TEMPLATE_VIS vector;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___FWD_VECTOR_H
diff --git a/libcxx/include/__memory/allocator.h b/libcxx/include/__memory/allocator.h
index 4e6303914c38..26e5d4978b15 100644
--- a/libcxx/include/__memory/allocator.h
+++ b/libcxx/include/__memory/allocator.h
@@ -31,18 +31,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Tp>
class allocator;
-#if defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS)
-# pragma clang deprecated( \
- _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS, \
- "_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS is deprecated in LLVM 18 and will be removed in LLVM 19")
-#endif
-
-#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION)
+#if _LIBCPP_STD_VER <= 17
// These specializations shouldn't be marked _LIBCPP_DEPRECATED_IN_CXX17.
// Specializing allocator<void> is deprecated, but not using it.
template <>
class _LIBCPP_TEMPLATE_VIS allocator<void> {
-# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS)
+# if _LIBCPP_STD_VER <= 17
public:
_LIBCPP_DEPRECATED_IN_CXX17 typedef void* pointer;
@@ -58,7 +52,7 @@ public:
template <>
class _LIBCPP_TEMPLATE_VIS allocator<const void> {
-# if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS)
+# if _LIBCPP_STD_VER <= 17
public:
_LIBCPP_DEPRECATED_IN_CXX17 typedef const void* pointer;
@@ -141,7 +135,7 @@ public:
}
// C++20 Removed members
-#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS)
+#if _LIBCPP_STD_VER <= 17
_LIBCPP_DEPRECATED_IN_CXX17 typedef _Tp* pointer;
_LIBCPP_DEPRECATED_IN_CXX17 typedef const _Tp* const_pointer;
_LIBCPP_DEPRECATED_IN_CXX17 typedef _Tp& reference;
@@ -221,7 +215,7 @@ public:
}
// C++20 Removed members
-#if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS)
+#if _LIBCPP_STD_VER <= 17
_LIBCPP_DEPRECATED_IN_CXX17 typedef const _Tp* pointer;
_LIBCPP_DEPRECATED_IN_CXX17 typedef const _Tp* const_pointer;
_LIBCPP_DEPRECATED_IN_CXX17 typedef const _Tp& reference;
diff --git a/libcxx/include/__ranges/as_rvalue_view.h b/libcxx/include/__ranges/as_rvalue_view.h
index 295aa94ed9fe..2fc272e798d6 100644
--- a/libcxx/include/__ranges/as_rvalue_view.h
+++ b/libcxx/include/__ranges/as_rvalue_view.h
@@ -111,18 +111,18 @@ namespace views {
namespace __as_rvalue {
struct __fn : __range_adaptor_closure<__fn> {
template <class _Range>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Range&& __range) const
- noexcept(noexcept(/**/ as_rvalue_view(std::forward<_Range>(__range))))
- -> decltype(/*--*/ as_rvalue_view(std::forward<_Range>(__range))) {
- return /*-------------*/ as_rvalue_view(std::forward<_Range>(__range));
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto
+ operator()(_Range&& __range) noexcept(noexcept(as_rvalue_view(std::forward<_Range>(__range))))
+ -> decltype(/*--------------------------*/ as_rvalue_view(std::forward<_Range>(__range))) {
+ return /*---------------------------------*/ as_rvalue_view(std::forward<_Range>(__range));
}
template <class _Range>
requires same_as<range_rvalue_reference_t<_Range>, range_reference_t<_Range>>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Range&& __range) const
- noexcept(noexcept(/**/ views::all(std::forward<_Range>(__range))))
- -> decltype(/*--*/ views::all(std::forward<_Range>(__range))) {
- return /*-------------*/ views::all(std::forward<_Range>(__range));
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto
+ operator()(_Range&& __range) noexcept(noexcept(views::all(std::forward<_Range>(__range))))
+ -> decltype(/*--------------------------*/ views::all(std::forward<_Range>(__range))) {
+ return /*---------------------------------*/ views::all(std::forward<_Range>(__range));
}
};
} // namespace __as_rvalue
diff --git a/libcxx/include/__ranges/repeat_view.h b/libcxx/include/__ranges/repeat_view.h
index 620a26454972..5caea757a393 100644
--- a/libcxx/include/__ranges/repeat_view.h
+++ b/libcxx/include/__ranges/repeat_view.h
@@ -229,14 +229,13 @@ namespace views {
namespace __repeat {
struct __fn {
template <class _Tp>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp&& __value) const
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Tp&& __value)
noexcept(noexcept(ranges::repeat_view(std::forward<_Tp>(__value))))
-> decltype( ranges::repeat_view(std::forward<_Tp>(__value)))
{ return ranges::repeat_view(std::forward<_Tp>(__value)); }
-
template <class _Tp, class _Bound>
- _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Tp&& __value, _Bound&& __bound_sentinel) const
+ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Tp&& __value, _Bound&& __bound_sentinel)
noexcept(noexcept(ranges::repeat_view(std::forward<_Tp>(__value), std::forward<_Bound>(__bound_sentinel))))
-> decltype( ranges::repeat_view(std::forward<_Tp>(__value), std::forward<_Bound>(__bound_sentinel)))
{ return ranges::repeat_view(std::forward<_Tp>(__value), std::forward<_Bound>(__bound_sentinel)); }
diff --git a/libcxx/include/__ranges/to.h b/libcxx/include/__ranges/to.h
index cf162100ee46..67818c521b15 100644
--- a/libcxx/include/__ranges/to.h
+++ b/libcxx/include/__ranges/to.h
@@ -207,7 +207,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto to(_Args&&... __args)
static_assert(
!is_volatile_v<_Container>, "The target container cannot be volatile-qualified, please remove the volatile");
- auto __to_func = []<input_range _Range, class... _Tail>(_Range&& __range, _Tail&&... __tail)
+ auto __to_func = []<input_range _Range, class... _Tail>(_Range&& __range, _Tail&&... __tail) static
requires requires { //
/**/ ranges::to<_Container>(std::forward<_Range>(__range), std::forward<_Tail>(__tail)...);
}
@@ -223,7 +223,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto to(_Args&&... __args)
// clang-format off
auto __to_func = []<input_range _Range, class... _Tail,
class _DeducedExpr = typename _Deducer<_Container, _Range, _Tail...>::type>
- (_Range&& __range, _Tail&& ... __tail)
+ (_Range&& __range, _Tail&& ... __tail) static
requires requires { //
/**/ ranges::to<_DeducedExpr>(std::forward<_Range>(__range), std::forward<_Tail>(__tail)...);
}
diff --git a/libcxx/include/__ranges/zip_view.h b/libcxx/include/__ranges/zip_view.h
index ce00a4e53a48..d3665a149a7c 100644
--- a/libcxx/include/__ranges/zip_view.h
+++ b/libcxx/include/__ranges/zip_view.h
@@ -489,12 +489,12 @@ namespace views {
namespace __zip {
struct __fn {
- _LIBCPP_HIDE_FROM_ABI constexpr auto operator()() const noexcept { return empty_view<tuple<>>{}; }
+ _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()() noexcept { return empty_view<tuple<>>{}; }
template <class... _Ranges>
- _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Ranges&&... __rs) const
- noexcept(noexcept(zip_view<all_t<_Ranges&&>...>(std::forward<_Ranges>(__rs)...)))
- -> decltype(zip_view<all_t<_Ranges&&>...>(std::forward<_Ranges>(__rs)...)) {
+ _LIBCPP_HIDE_FROM_ABI static constexpr auto
+ operator()(_Ranges&&... __rs) noexcept(noexcept(zip_view<all_t<_Ranges&&>...>(std::forward<_Ranges>(__rs)...)))
+ -> decltype(zip_view<all_t<_Ranges&&>...>(std::forward<_Ranges>(__rs)...)) {
return zip_view<all_t<_Ranges>...>(std::forward<_Ranges>(__rs)...);
}
};
diff --git a/libcxx/include/__string/char_traits.h b/libcxx/include/__string/char_traits.h
index 5880d3a22db2..47ed1057caaa 100644
--- a/libcxx/include/__string/char_traits.h
+++ b/libcxx/include/__string/char_traits.h
@@ -9,7 +9,6 @@
#ifndef _LIBCPP___STRING_CHAR_TRAITS_H
#define _LIBCPP___STRING_CHAR_TRAITS_H
-#include <__algorithm/copy_n.h>
#include <__algorithm/fill_n.h>
#include <__algorithm/find_end.h>
#include <__algorithm/find_first_of.h>
@@ -144,7 +143,7 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char> {
copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT {
_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(!std::__is_pointer_in_range(__s1, __s1 + __n, __s2),
"char_traits::copy: source and destination ranges overlap");
- std::copy_n(__s2, __n, __s1);
+ std::__constexpr_memmove(__s1, __s2, __element_count(__n));
return __s1;
}
@@ -221,7 +220,7 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<wchar_t> {
copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT {
_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(!std::__is_pointer_in_range(__s1, __s1 + __n, __s2),
"char_traits::copy: source and destination ranges overlap");
- std::copy_n(__s2, __n, __s1);
+ std::__constexpr_memmove(__s1, __s2, __element_count(__n));
return __s1;
}
@@ -287,7 +286,7 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char8_t> {
copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT {
_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(!std::__is_pointer_in_range(__s1, __s1 + __n, __s2),
"char_traits::copy: source and destination ranges overlap");
- std::copy_n(__s2, __n, __s1);
+ std::__constexpr_memmove(__s1, __s2, __element_count(__n));
return __s1;
}
@@ -366,7 +365,7 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char16_t> {
copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT {
_LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(!std::__is_pointer_in_range(__s1, __s1 + __n, __s2),
"char_traits::copy: source and destination ranges overlap");
- std::copy_n(__s2, __n, __s1);
+ std::__constexpr_memmove(__s1, __s2, __element_count(__n));
return __s1;
}
@@ -454,7 +453,7 @@ struct _LIBCPP_TEMPLATE_VIS char_traits<char32_t> {
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 static char_type*
copy(char_type* __s1, const char_type* __s2, size_t __n) _NOEXCEPT {
- std::copy_n(__s2, __n, __s1);
+ std::__constexpr_memmove(__s1, __s2, __element_count(__n));
return __s1;
}
diff --git a/libcxx/include/__system_error/errc.h b/libcxx/include/__system_error/errc.h
index f87df86a71e1..e9f3656b7b9c 100644
--- a/libcxx/include/__system_error/errc.h
+++ b/libcxx/include/__system_error/errc.h
@@ -58,18 +58,18 @@ enum class errc
no_child_process, // ECHILD
no_link, // ENOLINK
no_lock_available, // ENOLCK
- no_message_available, // ENODATA
+ no_message_available, // ENODATA // deprecated
no_message, // ENOMSG
no_protocol_option, // ENOPROTOOPT
no_space_on_device, // ENOSPC
- no_stream_resources, // ENOSR
+ no_stream_resources, // ENOSR // deprecated
no_such_device_or_address, // ENXIO
no_such_device, // ENODEV
no_such_file_or_directory, // ENOENT
no_such_process, // ESRCH
not_a_directory, // ENOTDIR
not_a_socket, // ENOTSOCK
- not_a_stream, // ENOSTR
+ not_a_stream, // ENOSTR // deprecated
not_connected, // ENOTCONN
not_enough_memory, // ENOMEM
not_supported, // ENOTSUP
@@ -87,7 +87,7 @@ enum class errc
resource_unavailable_try_again, // EAGAIN
result_out_of_range, // ERANGE
state_not_recoverable, // ENOTRECOVERABLE
- stream_timeout, // ETIME
+ stream_timeout, // ETIME // deprecated
text_file_busy, // ETXTBSY
timed_out, // ETIMEDOUT
too_many_files_open_in_system, // ENFILE
@@ -107,12 +107,34 @@ enum class errc
# pragma GCC system_header
#endif
+// The method of pushing and popping the diagnostics fails for GCC. GCC does
+// not recognize the pragma's used to generate deprecated diagnostics for
+// macros. So GCC does not need the pushing and popping.
+//
+// TODO Remove this when the deprecated constants are removed.
+#if defined(_LIBCPP_COMPILER_CLANG_BASED)
+# define _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH _LIBCPP_SUPPRESS_DEPRECATED_PUSH
+# define _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP _LIBCPP_SUPPRESS_DEPRECATED_POP
+#else
+# define _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH
+# define _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP
+#endif
+
_LIBCPP_BEGIN_NAMESPACE_STD
// Some error codes are not present on all platforms, so we provide equivalents
// for them:
// enum class errc
+//
+// LWG3869 deprecates the UNIX STREAMS macros and enum values.
+// This makes the code clumbersome:
+// - the enum value is deprecated and should show a diagnostic,
+// - the macro is deprecated and should _not_ show a diagnostic in this
+// context, and
+// - the macro is not always available.
+// This leads to the odd pushing and popping of the deprecated
+// diagnostic.
_LIBCPP_DECLARE_STRONG_ENUM(errc){
address_family_not_supported = EAFNOSUPPORT,
address_in_use = EADDRINUSE,
@@ -154,30 +176,48 @@ _LIBCPP_DECLARE_STRONG_ENUM(errc){
no_child_process = ECHILD,
no_link = ENOLINK,
no_lock_available = ENOLCK,
+ // clang-format off
+ no_message_available _LIBCPP_DEPRECATED =
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH
#ifdef ENODATA
- no_message_available = ENODATA,
+ ENODATA
#else
- no_message_available = ENOMSG,
+ ENOMSG
#endif
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP
+ ,
+ // clang-format on
no_message = ENOMSG,
no_protocol_option = ENOPROTOOPT,
no_space_on_device = ENOSPC,
+ // clang-format off
+ no_stream_resources _LIBCPP_DEPRECATED =
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH
#ifdef ENOSR
- no_stream_resources = ENOSR,
+ ENOSR
#else
- no_stream_resources = ENOMEM,
+ ENOMEM
#endif
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP
+ ,
+ // clang-format on
no_such_device_or_address = ENXIO,
no_such_device = ENODEV,
no_such_file_or_directory = ENOENT,
no_such_process = ESRCH,
not_a_directory = ENOTDIR,
not_a_socket = ENOTSOCK,
+ // clang-format off
+ not_a_stream _LIBCPP_DEPRECATED =
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH
#ifdef ENOSTR
- not_a_stream = ENOSTR,
+ ENOSTR
#else
- not_a_stream = EINVAL,
+ EINVAL
#endif
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP
+ ,
+ // clang-format on
not_connected = ENOTCONN,
not_enough_memory = ENOMEM,
not_supported = ENOTSUP,
@@ -195,11 +235,17 @@ _LIBCPP_DECLARE_STRONG_ENUM(errc){
resource_unavailable_try_again = EAGAIN,
result_out_of_range = ERANGE,
state_not_recoverable = ENOTRECOVERABLE,
+ // clang-format off
+ stream_timeout _LIBCPP_DEPRECATED =
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_PUSH
#ifdef ETIME
- stream_timeout = ETIME,
+ ETIME
#else
- stream_timeout = ETIMEDOUT,
+ ETIMEDOUT
#endif
+ _LIBCPP_SUPPRESS_DEPRECATED_ERRC_POP
+ ,
+ // clang-format on
text_file_busy = ETXTBSY,
timed_out = ETIMEDOUT,
too_many_files_open_in_system = ENFILE,
diff --git a/libcxx/include/__type_traits/apply_cv.h b/libcxx/include/__type_traits/apply_cv.h
index 7c6aabec8344..723af95b8d92 100644
--- a/libcxx/include/__type_traits/apply_cv.h
+++ b/libcxx/include/__type_traits/apply_cv.h
@@ -10,9 +10,7 @@
#define _LIBCPP___TYPE_TRAITS_APPLY_CV_H
#include <__config>
-#include <__type_traits/is_const.h>
-#include <__type_traits/is_volatile.h>
-#include <__type_traits/remove_reference.h>
+#include <__type_traits/copy_cv.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -20,54 +18,16 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Tp,
- bool = is_const<__libcpp_remove_reference_t<_Tp> >::value,
- bool = is_volatile<__libcpp_remove_reference_t<_Tp> >::value>
-struct __apply_cv_impl {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = _Up;
-};
-
template <class _Tp>
-struct __apply_cv_impl<_Tp, true, false> {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = const _Up;
-};
-
-template <class _Tp>
-struct __apply_cv_impl<_Tp, false, true> {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = volatile _Up;
-};
-
-template <class _Tp>
-struct __apply_cv_impl<_Tp, true, true> {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = const volatile _Up;
-};
-
-template <class _Tp>
-struct __apply_cv_impl<_Tp&, false, false> {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = _Up&;
-};
-
-template <class _Tp>
-struct __apply_cv_impl<_Tp&, true, false> {
- template <class _Up>
- using __apply _LIBCPP_NODEBUG = const _Up&;
-};
-
-template <class _Tp>
-struct __apply_cv_impl<_Tp&, false, true> {
+struct __apply_cv_impl {
template <class _Up>
- using __apply _LIBCPP_NODEBUG = volatile _Up&;
+ using __apply _LIBCPP_NODEBUG = __copy_cv_t<_Tp, _Up>;
};
template <class _Tp>
-struct __apply_cv_impl<_Tp&, true, true> {
+struct __apply_cv_impl<_Tp&> {
template <class _Up>
- using __apply _LIBCPP_NODEBUG = const volatile _Up&;
+ using __apply _LIBCPP_NODEBUG = __copy_cv_t<_Tp, _Up>&;
};
template <class _Tp, class _Up>
diff --git a/libcxx/include/cerrno b/libcxx/include/cerrno
index d488fa72a54b..6171ae31f184 100644
--- a/libcxx/include/cerrno
+++ b/libcxx/include/cerrno
@@ -38,4 +38,17 @@ Macros:
# pragma GCC system_header
#endif
+#ifdef ENODATA
+# pragma clang deprecated(ENODATA, "ENODATA is deprecated in ISO C++")
+#endif
+#ifdef ENOSR
+# pragma clang deprecated(ENOSR, "ENOSR is deprecated in ISO C++")
+#endif
+#ifdef ENOSTR
+# pragma clang deprecated(ENOSTR, "ENOSTR is deprecated in ISO C++")
+#endif
+#ifdef ETIME
+# pragma clang deprecated(ETIME, "ETIME is deprecated in ISO C++")
+#endif
+
#endif // _LIBCPP_CERRNO
diff --git a/libcxx/include/deque b/libcxx/include/deque
index 85ea9c6f661e..a6472e46d426 100644
--- a/libcxx/include/deque
+++ b/libcxx/include/deque
@@ -192,6 +192,7 @@ template <class T, class Allocator, class Predicate>
#include <__availability>
#include <__config>
#include <__format/enable_insertable.h>
+#include <__fwd/deque.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/next.h>
@@ -244,9 +245,6 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Tp, class _Allocator = allocator<_Tp> >
-class _LIBCPP_TEMPLATE_VIS deque;
-
template <class _ValueType, class _DiffType>
struct __deque_block_size {
static const _DiffType value = sizeof(_ValueType) < 256 ? 4096 / sizeof(_ValueType) : 16;
diff --git a/libcxx/include/format b/libcxx/include/format
index 146613464534..f1e87de0f830 100644
--- a/libcxx/include/format
+++ b/libcxx/include/format
@@ -223,6 +223,8 @@ namespace std {
#if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
# include <locale>
+# include <queue>
+# include <stack>
#endif
#endif // _LIBCPP_FORMAT
diff --git a/libcxx/include/iosfwd b/libcxx/include/iosfwd
index f1c2cbd96696..9af5e0503185 100644
--- a/libcxx/include/iosfwd
+++ b/libcxx/include/iosfwd
@@ -110,6 +110,7 @@ using wosyncstream = basic_osyncstream<wchar_t>; // C++20
#include <__fwd/fstream.h>
#include <__fwd/ios.h>
#include <__fwd/istream.h>
+#include <__fwd/memory.h>
#include <__fwd/ostream.h>
#include <__fwd/sstream.h>
#include <__fwd/streambuf.h>
@@ -162,10 +163,6 @@ using wosyncstream = basic_osyncstream<wchar_t>;
#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
-// Include other forward declarations here
-template <class _Tp, class _Alloc = allocator<_Tp> >
-class _LIBCPP_TEMPLATE_VIS vector;
-
template <class _CharT, class _Traits>
class __save_flags {
typedef basic_ios<_CharT, _Traits> __stream_type;
diff --git a/libcxx/include/libcxx.imp b/libcxx/include/libcxx.imp
index 77b7befd44f5..fb446da5dc6f 100644
--- a/libcxx/include/libcxx.imp
+++ b/libcxx/include/libcxx.imp
@@ -217,6 +217,7 @@
{ include: [ "<__algorithm/shift_right.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/shuffle.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/sift_down.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/simd_utils.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/sort.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/sort_heap.h>", "private", "<algorithm>", "public" ] },
{ include: [ "<__algorithm/stable_partition.h>", "private", "<algorithm>", "public" ] },
@@ -424,22 +425,27 @@
{ include: [ "<__fwd/bit_reference.h>", "private", "<bitset>", "public" ] },
{ include: [ "<__fwd/bit_reference.h>", "private", "<vector>", "public" ] },
{ include: [ "<__fwd/complex.h>", "private", "<complex>", "public" ] },
+ { include: [ "<__fwd/deque.h>", "private", "<deque>", "public" ] },
{ include: [ "<__fwd/format.h>", "private", "<format>", "public" ] },
{ include: [ "<__fwd/fstream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/functional.h>", "private", "<functional>", "public" ] },
{ include: [ "<__fwd/ios.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/istream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/mdspan.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__fwd/memory.h>", "private", "<memory>", "public" ] },
{ include: [ "<__fwd/memory_resource.h>", "private", "<memory_resource>", "public" ] },
{ include: [ "<__fwd/ostream.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/pair.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__fwd/queue.h>", "private", "<queue>", "public" ] },
{ include: [ "<__fwd/span.h>", "private", "<span>", "public" ] },
{ include: [ "<__fwd/sstream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/stack.h>", "private", "<stack>", "public" ] },
{ include: [ "<__fwd/streambuf.h>", "private", "<iosfwd>", "public" ] },
{ include: [ "<__fwd/string.h>", "private", "<string>", "public" ] },
{ include: [ "<__fwd/string_view.h>", "private", "<string_view>", "public" ] },
{ include: [ "<__fwd/subrange.h>", "private", "<ranges>", "public" ] },
{ include: [ "<__fwd/tuple.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__fwd/vector.h>", "private", "<vector>", "public" ] },
{ include: [ "<__ios/fpos.h>", "private", "<ios>", "public" ] },
{ include: [ "<__iterator/access.h>", "private", "<iterator>", "public" ] },
{ include: [ "<__iterator/advance.h>", "private", "<iterator>", "public" ] },
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index f36a47cef009..079c6234d410 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -697,7 +697,10 @@ module std_private_algorithm_minmax [system
export *
}
module std_private_algorithm_minmax_element [system] { header "__algorithm/minmax_element.h" }
-module std_private_algorithm_mismatch [system] { header "__algorithm/mismatch.h" }
+module std_private_algorithm_mismatch [system] {
+ header "__algorithm/mismatch.h"
+ export std_private_algorithm_simd_utils
+}
module std_private_algorithm_move [system] { header "__algorithm/move.h" }
module std_private_algorithm_move_backward [system] { header "__algorithm/move_backward.h" }
module std_private_algorithm_next_permutation [system] { header "__algorithm/next_permutation.h" }
@@ -1048,6 +1051,7 @@ module std_private_algorithm_sort [system
header "__algorithm/sort.h"
export std_private_debug_utils_strict_weak_ordering_check
}
+module std_private_algorithm_simd_utils [system] { header "__algorithm/simd_utils.h" }
module std_private_algorithm_sort_heap [system] { header "__algorithm/sort_heap.h" }
module std_private_algorithm_stable_partition [system] { header "__algorithm/stable_partition.h" }
module std_private_algorithm_stable_sort [system] { header "__algorithm/stable_sort.h" }
@@ -1251,6 +1255,8 @@ module std_private_debug_utils_strict_weak_ordering_check [system] {
export std_private_type_traits_is_constant_evaluated
}
+module std_private_deque_fwd [system] { header "__fwd/deque.h" }
+
module std_private_exception_exception [system] { header "__exception/exception.h" }
module std_private_exception_exception_ptr [system] {
header "__exception/exception_ptr.h"
@@ -1531,6 +1537,7 @@ module std_private_memory_concepts [system] {
}
module std_private_memory_construct_at [system] { header "__memory/construct_at.h" }
module std_private_memory_destruct_n [system] { header "__memory/destruct_n.h" }
+module std_private_memory_fwd [system] { header "__fwd/memory.h" }
module std_private_memory_pointer_traits [system] { header "__memory/pointer_traits.h" }
module std_private_memory_ranges_construct_at [system] { header "__memory/ranges_construct_at.h" }
module std_private_memory_ranges_uninitialized_algorithms [system] {
@@ -1596,6 +1603,8 @@ module std_private_numeric_transform_exclusive_scan [system] { header "__numeric
module std_private_numeric_transform_inclusive_scan [system] { header "__numeric/transform_inclusive_scan.h" }
module std_private_numeric_transform_reduce [system] { header "__numeric/transform_reduce.h" }
+module std_private_queue_fwd [system] { header "__fwd/queue.h" }
+
module std_private_random_bernoulli_distribution [system] { header "__random/bernoulli_distribution.h" }
module std_private_random_binomial_distribution [system] { header "__random/binomial_distribution.h" }
module std_private_random_cauchy_distribution [system] { header "__random/cauchy_distribution.h" }
@@ -1733,6 +1742,8 @@ module std_private_ranges_zip_view [system] { header "__ranges
module std_private_span_span_fwd [system] { header "__fwd/span.h" }
+module std_private_stack_fwd [system] { header "__fwd/stack.h" }
+
module std_private_stop_token_atomic_unique_lock [system] { header "__stop_token/atomic_unique_lock.h" }
module std_private_stop_token_intrusive_list_view [system] { header "__stop_token/intrusive_list_view.h" }
module std_private_stop_token_intrusive_shared_ptr [system] { header "__stop_token/intrusive_shared_ptr.h" }
@@ -2081,3 +2092,5 @@ module std_private_utility_to_underlying [system] { header "__utility/t
module std_private_utility_unreachable [system] { header "__utility/unreachable.h" }
module std_private_variant_monostate [system] { header "__variant/monostate.h" }
+
+module std_private_vector_fwd [system] { header "__fwd/vector.h" }
diff --git a/libcxx/include/queue b/libcxx/include/queue
index 521a465713cd..f94cd7671863 100644
--- a/libcxx/include/queue
+++ b/libcxx/include/queue
@@ -260,6 +260,8 @@ template <class T, class Container, class Compare>
#include <__algorithm/ranges_copy.h>
#include <__config>
#include <__functional/operations.h>
+#include <__fwd/deque.h>
+#include <__fwd/queue.h>
#include <__iterator/back_insert_iterator.h>
#include <__iterator/iterator_traits.h>
#include <__memory/uses_allocator.h>
@@ -287,9 +289,6 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Tp, class _Container = deque<_Tp> >
-class _LIBCPP_TEMPLATE_VIS queue;
-
template <class _Tp, class _Container>
_LIBCPP_HIDE_FROM_ABI bool operator==(const queue<_Tp, _Container>& __x, const queue<_Tp, _Container>& __y);
@@ -511,7 +510,7 @@ template <class _Tp, class _Container, class _Alloc>
struct _LIBCPP_TEMPLATE_VIS uses_allocator<queue<_Tp, _Container>, _Alloc> : public uses_allocator<_Container, _Alloc> {
};
-template <class _Tp, class _Container = vector<_Tp>, class _Compare = less<typename _Container::value_type> >
+template <class _Tp, class _Container, class _Compare>
class _LIBCPP_TEMPLATE_VIS priority_queue {
public:
typedef _Container container_type;
diff --git a/libcxx/include/stack b/libcxx/include/stack
index 4003792600a0..08a392da6848 100644
--- a/libcxx/include/stack
+++ b/libcxx/include/stack
@@ -115,6 +115,7 @@ template <class T, class Container>
#include <__algorithm/ranges_copy.h>
#include <__config>
+#include <__fwd/stack.h>
#include <__iterator/back_insert_iterator.h>
#include <__iterator/iterator_traits.h>
#include <__memory/uses_allocator.h>
@@ -142,9 +143,6 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Tp, class _Container = deque<_Tp> >
-class _LIBCPP_TEMPLATE_VIS stack;
-
template <class _Tp, class _Container>
_LIBCPP_HIDE_FROM_ABI bool operator==(const stack<_Tp, _Container>& __x, const stack<_Tp, _Container>& __y);
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index f78db061b844..a9f0d680fe0e 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -1377,15 +1377,41 @@ inline _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) apply(_Fn&& __f, _Tuple&&
std::forward<_Tuple>(__t),
typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type{}))
+#if _LIBCPP_STD_VER >= 20
template <class _Tp, class _Tuple, size_t... _Idx>
inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp __make_from_tuple_impl(_Tuple&& __t, __tuple_indices<_Idx...>)
+ noexcept(noexcept(_Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...)))
+ requires is_constructible_v<_Tp, decltype(std::get<_Idx>(std::forward<_Tuple>(__t)))...> {
+ return _Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...);
+}
+#else
+template <class _Tp, class _Tuple, size_t... _Idx>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp __make_from_tuple_impl(_Tuple&& __t, __tuple_indices<_Idx...>,
+ enable_if_t<is_constructible_v<_Tp, decltype(std::get<_Idx>(std::forward<_Tuple>(__t)))...>> * = nullptr)
_LIBCPP_NOEXCEPT_RETURN(_Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...))
+#endif // _LIBCPP_STD_VER >= 20
+
+template <class _Tp, class _Tuple,
+ class _Seq = typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type, class = void>
+inline constexpr bool __can_make_from_tuple = false;
+template <class _Tp, class _Tuple, size_t... _Idx>
+inline constexpr bool __can_make_from_tuple<_Tp, _Tuple, __tuple_indices<_Idx...>,
+ enable_if_t<is_constructible_v<_Tp, decltype(std::get<_Idx>(std::declval<_Tuple>()))...>>> = true;
+
+// Based on LWG3528(https://wg21.link/LWG3528) and http://eel.is/c++draft/description#structure.requirements-9,
+// the standard allows to impose requirements, we constraint std::make_from_tuple to make std::make_from_tuple
+// SFINAE friendly and also avoid worse diagnostic messages. We still keep the constraints of std::__make_from_tuple_impl
+// so that std::__make_from_tuple_impl will have the same advantages when used alone.
+#if _LIBCPP_STD_VER >= 20
template <class _Tp, class _Tuple>
+ requires __can_make_from_tuple<_Tp, _Tuple> // strengthen
+#else
+template <class _Tp, class _Tuple, class = enable_if_t<__can_make_from_tuple<_Tp, _Tuple>>> // strengthen
+#endif // _LIBCPP_STD_VER >= 20
inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp make_from_tuple(_Tuple&& __t)
_LIBCPP_NOEXCEPT_RETURN(std::__make_from_tuple_impl<_Tp>(
std::forward<_Tuple>(__t), typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type{}))
-
# undef _LIBCPP_NOEXCEPT_RETURN
# endif // _LIBCPP_STD_VER >= 17
diff --git a/libcxx/include/vector b/libcxx/include/vector
index 0908482600c5..1defc43a5247 100644
--- a/libcxx/include/vector
+++ b/libcxx/include/vector
@@ -325,6 +325,7 @@ template<class T, class charT> requires is-vector-bool-reference<T> // Since C++
#include <__format/formatter_bool.h>
#include <__functional/hash.h>
#include <__functional/unary_function.h>
+#include <__fwd/vector.h>
#include <__iterator/advance.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
@@ -357,7 +358,6 @@ template<class T, class charT> requires is-vector-bool-reference<T> // Since C++
#include <__utility/swap.h>
#include <climits>
#include <cstring>
-#include <iosfwd> // for forward declaration of vector
#include <limits>
#include <stdexcept>
#include <version>
@@ -2989,6 +2989,7 @@ _LIBCPP_POP_MACROS
# include <atomic>
# include <concepts>
# include <cstdlib>
+# include <iosfwd>
# include <locale>
# include <tuple>
# include <type_traits>
diff --git a/libcxx/modules/CMakeLists.txt b/libcxx/modules/CMakeLists.txt
index 0dea8cfca94a..d47d19a47553 100644
--- a/libcxx/modules/CMakeLists.txt
+++ b/libcxx/modules/CMakeLists.txt
@@ -206,9 +206,20 @@ add_custom_target(generate-cxx-modules
# Configure the modules manifest.
# Use the relative path between the installation and the module in the json
# file. This allows moving the entire installation to a different location.
+if("${CMAKE_INSTALL_PREFIX}" STREQUAL "")
+ set(BASE_DIRECTORY "/")
+else()
+ set(BASE_DIRECTORY ${CMAKE_INSTALL_PREFIX})
+endif()
+cmake_path(ABSOLUTE_PATH LIBCXX_INSTALL_LIBRARY_DIR
+ BASE_DIRECTORY ${BASE_DIRECTORY}
+ OUTPUT_VARIABLE ABS_LIBRARY_DIR)
+cmake_path(ABSOLUTE_PATH LIBCXX_INSTALL_MODULES_DIR
+ BASE_DIRECTORY ${BASE_DIRECTORY}
+ OUTPUT_VARIABLE ABS_MODULES_DIR)
file(RELATIVE_PATH LIBCXX_MODULE_RELATIVE_PATH
- ${CMAKE_INSTALL_PREFIX}/${LIBCXX_INSTALL_LIBRARY_DIR}
- ${CMAKE_INSTALL_PREFIX}/${LIBCXX_INSTALL_MODULES_DIR})
+ ${ABS_LIBRARY_DIR}
+ ${ABS_MODULES_DIR})
configure_file(
"modules.json.in"
"${LIBCXX_LIBRARY_DIR}/libc++.modules.json"
diff --git a/libcxx/src/include/tzdb/tzdb_list_private.h b/libcxx/src/include/tzdb/tzdb_list_private.h
index f43d7d8ea772..969b2b9f8a9f 100644
--- a/libcxx/src/include/tzdb/tzdb_list_private.h
+++ b/libcxx/src/include/tzdb/tzdb_list_private.h
@@ -54,14 +54,14 @@ public:
using const_iterator = tzdb_list::const_iterator;
- const tzdb& front() const noexcept {
+ const tzdb& __front() const noexcept {
#ifndef _LIBCPP_HAS_NO_THREADS
shared_lock __lock{__mutex_};
#endif
return __tzdb_.front();
}
- const_iterator erase_after(const_iterator __p) {
+ const_iterator __erase_after(const_iterator __p) {
#ifndef _LIBCPP_HAS_NO_THREADS
unique_lock __lock{__mutex_};
#endif
@@ -70,20 +70,17 @@ public:
return __tzdb_.erase_after(__p);
}
- const_iterator begin() const noexcept {
+ const_iterator __begin() const noexcept {
#ifndef _LIBCPP_HAS_NO_THREADS
shared_lock __lock{__mutex_};
#endif
return __tzdb_.begin();
}
- const_iterator end() const noexcept {
+ const_iterator __end() const noexcept {
// forward_list<T>::end does not access the list, so no need to take a lock.
return __tzdb_.end();
}
- const_iterator cbegin() const noexcept { return begin(); }
- const_iterator cend() const noexcept { return end(); }
-
private:
// Loads the tzdbs
// pre: The caller ensures the locking, if needed, is done.
diff --git a/libcxx/src/random.cpp b/libcxx/src/random.cpp
index c7073c54da6b..93590af310e5 100644
--- a/libcxx/src/random.cpp
+++ b/libcxx/src/random.cpp
@@ -79,8 +79,10 @@ unsigned random_device::operator()() {
char* p = reinterpret_cast<char*>(&r);
while (n > 0) {
ssize_t s = read(__f_, p, n);
+ _LIBCPP_SUPPRESS_DEPRECATED_PUSH
if (s == 0)
- __throw_system_error(ENODATA, "random_device got EOF");
+ __throw_system_error(ENODATA, "random_device got EOF"); // TODO ENODATA -> ENOMSG
+ _LIBCPP_SUPPRESS_DEPRECATED_POP
if (s == -1) {
if (errno != EINTR)
__throw_system_error(errno, "random_device got an unexpected error");
diff --git a/libcxx/src/tzdb_list.cpp b/libcxx/src/tzdb_list.cpp
index d3ee8b58f98b..b99c30a9b9e6 100644
--- a/libcxx/src/tzdb_list.cpp
+++ b/libcxx/src/tzdb_list.cpp
@@ -18,26 +18,24 @@ namespace chrono {
_LIBCPP_EXPORTED_FROM_ABI tzdb_list::~tzdb_list() { delete __impl_; }
-_LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const tzdb& tzdb_list::front() const noexcept {
- return __impl_->front();
-}
+[[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const tzdb& tzdb_list::__front() const noexcept { return __impl_->__front(); }
-_LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::erase_after(const_iterator __p) {
- return __impl_->erase_after(__p);
+_LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::__erase_after(const_iterator __p) {
+ return __impl_->__erase_after(__p);
}
-_LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::begin() const noexcept {
- return __impl_->begin();
+[[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::__begin() const noexcept {
+ return __impl_->__begin();
}
-_LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::end() const noexcept {
- return __impl_->end();
+[[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::__end() const noexcept {
+ return __impl_->__end();
}
-_LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::cbegin() const noexcept {
- return __impl_->cbegin();
+[[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::__cbegin() const noexcept {
+ return __impl_->__begin();
}
-_LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::cend() const noexcept {
- return __impl_->cend();
+[[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI tzdb_list::const_iterator tzdb_list::__cend() const noexcept {
+ return __impl_->__end();
}
} // namespace chrono
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx2a.pass.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.pass.cpp
index 59657ca46a14..d9a65eee4c13 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx2a.pass.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.pass.cpp
@@ -12,11 +12,9 @@
// pointer address(reference x) const;
// const_pointer address(const_reference x) const;
-// In C++20, parts of std::allocator<T> have been removed.
-// However, for backwards compatibility, if _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// is defined before including <memory>, then removed members will be restored.
+// Removed in C++20, deprecated in C++17.
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
+// REQUIRES: c++03 || c++11 || c++14 || c++17
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
#include <memory>
@@ -25,25 +23,22 @@
#include "test_macros.h"
template <class T>
-void test_address()
-{
- T* tp = new T();
- const T* ctp = tp;
- const std::allocator<T> a;
- assert(a.address(*tp) == tp);
- assert(a.address(*ctp) == tp);
- delete tp;
+void test_address() {
+ T* tp = new T();
+ const T* ctp = tp;
+ const std::allocator<T> a;
+ assert(a.address(*tp) == tp);
+ assert(a.address(*ctp) == tp);
+ delete tp;
}
-struct A
-{
- void operator&() const {}
+struct A {
+ void operator&() const {}
};
-int main(int, char**)
-{
- test_address<int>();
- test_address<A>();
+int main(int, char**) {
+ test_address<int>();
+ test_address<A>();
return 0;
}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.verify.cpp
new file mode 100644
index 000000000000..21fd4d23449b
--- /dev/null
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.cxx20.verify.cpp
@@ -0,0 +1,42 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <memory>
+
+// allocator:
+// pointer address(reference x) const;
+// const_pointer address(const_reference x) const;
+
+// In C++20, parts of std::allocator<T> have been removed.
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+#include <memory>
+#include <cassert>
+
+#include "test_macros.h"
+
+template <class T>
+void test_address() {
+ T* tp = new T();
+ const T* ctp = tp;
+ const std::allocator<T> a;
+ assert(a.address(*tp) == tp); // expected-error 2 {{no member}}
+ assert(a.address(*ctp) == tp); // expected-error 2 {{no member}}
+ delete tp;
+}
+
+struct A {
+ void operator&() const {}
+};
+
+int main(int, char**) {
+ test_address<int>();
+ test_address<A>();
+
+ return 0;
+}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.depr_in_cxx17.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.depr_in_cxx17.verify.cpp
index 83d059a838ff..4098bdb2ee92 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.depr_in_cxx17.verify.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/address.depr_in_cxx17.verify.cpp
@@ -14,9 +14,7 @@
// Deprecated in C++17
-// UNSUPPORTED: c++03, c++11, c++14
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS -Wno-deprecated-pragma
+// REQUIRES: c++17
#include <memory>
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.pass.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.pass.cpp
index f2fb606ee6db..8fc6628ebfba 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.pass.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.pass.cpp
@@ -11,17 +11,18 @@
// allocator:
// T* allocate(size_t n, const void* hint);
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
+// Removed in C++20, deprecated in C++17.
+
+// REQUIRES: c++03 || c++11 || c++14 || c++17
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
#include <memory>
#include <cassert>
-#include <cstddef> // for std::max_align_t
+#include <cstddef> // for std::max_align_t
#include "test_macros.h"
#include "count_new.h"
-
#ifdef TEST_HAS_NO_ALIGNED_ALLOCATION
static const bool UsingAlignedNew = false;
#else
@@ -36,7 +37,6 @@ static const std::size_t MaxAligned = std::alignment_of<std::max_align_t>::value
static const std::size_t OverAligned = MaxAligned * 2;
-
template <std::size_t Align>
struct TEST_ALIGNAS(Align) AlignedType {
char data;
@@ -48,7 +48,6 @@ struct TEST_ALIGNAS(Align) AlignedType {
template <std::size_t Align>
int AlignedType<Align>::constructed = 0;
-
template <std::size_t Align>
void test_aligned() {
typedef AlignedType<Align> T;
@@ -56,11 +55,11 @@ void test_aligned() {
globalMemCounter.reset();
std::allocator<T> a;
const bool IsOverAlignedType = Align > MaxAligned;
- const bool ExpectAligned = IsOverAlignedType && UsingAlignedNew;
+ const bool ExpectAligned = IsOverAlignedType && UsingAlignedNew;
{
- globalMemCounter.last_new_size = 0;
+ globalMemCounter.last_new_size = 0;
globalMemCounter.last_new_align = 0;
- T* ap2 = a.allocate(11, (const void*)5);
+ T* ap2 = a.allocate(11, (const void*)5);
DoNotOptimize(ap2);
assert(globalMemCounter.checkOutstandingNewEq(1));
assert(globalMemCounter.checkNewCalledEq(1));
@@ -80,14 +79,14 @@ void test_aligned() {
}
int main(int, char**) {
- test_aligned<1>();
- test_aligned<2>();
- test_aligned<4>();
- test_aligned<8>();
- test_aligned<16>();
- test_aligned<MaxAligned>();
- test_aligned<OverAligned>();
- test_aligned<OverAligned * 2>();
+ test_aligned<1>();
+ test_aligned<2>();
+ test_aligned<4>();
+ test_aligned<8>();
+ test_aligned<16>();
+ test_aligned<MaxAligned>();
+ test_aligned<OverAligned>();
+ test_aligned<OverAligned * 2>();
return 0;
}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.verify.cpp
new file mode 100644
index 000000000000..bf02c7570d39
--- /dev/null
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx20.verify.cpp
@@ -0,0 +1,23 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <memory>
+
+// allocator:
+// T* allocate(size_t n, const void* hint);
+
+// Removed in C++20.
+
+#include <memory>
+
+void f() {
+ std::allocator<int> a;
+ a.allocate(3, nullptr); // expected-error {{too many arguments to function call}}
+}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.verify.cpp
deleted file mode 100644
index 2289cd6cd404..000000000000
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.cxx2a.verify.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// UNSUPPORTED: c++03, c++11, c++14, c++17
-
-// <memory>
-
-// allocator:
-// T* allocate(size_t n, const void* hint);
-
-// In C++20, parts of std::allocator<T> have been removed.
-// However, for backwards compatibility, if _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// is defined before including <memory>, then removed members will be restored.
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-
-#include <memory>
-
-void f() {
- std::allocator<int> a;
- a.allocate(3, nullptr); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}}
-}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.depr_in_cxx17.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.depr_in_cxx17.verify.cpp
index 8b2e862e9503..8629df3c4164 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.depr_in_cxx17.verify.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/allocate.depr_in_cxx17.verify.cpp
@@ -13,9 +13,7 @@
// Deprecated in C++17
-// UNSUPPORTED: c++03, c++11, c++14
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS -Wno-deprecated-pragma
+// REQUIRES: c++17
#include <memory>
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx2a.pass.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp
index d3a7dadbbe11..9a37cf8af8e6 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx2a.pass.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.pass.cpp
@@ -11,12 +11,10 @@
// allocator:
// template <class... Args> void construct(pointer p, Args&&... args);
-// In C++20, parts of std::allocator<T> have been removed.
-// However, for backwards compatibility, if _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// is defined before including <memory>, then removed members will be restored.
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
+// In C++20, parts of std::allocator<T> have been removed.
+// In C++17, they were deprecated.
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
+// REQUIRES: c++03 || c++11 || c++14 || c++17
#include <memory>
#include <cassert>
@@ -26,42 +24,39 @@
int A_constructed = 0;
-struct A
-{
- int data;
- A() {++A_constructed;}
+struct A {
+ int data;
+ A() { ++A_constructed; }
- A(const A&) {++A_constructed;}
+ A(const A&) { ++A_constructed; }
- explicit A(int) {++A_constructed;}
- A(int, int*) {++A_constructed;}
+ explicit A(int) { ++A_constructed; }
+ A(int, int*) { ++A_constructed; }
- ~A() {--A_constructed;}
+ ~A() { --A_constructed; }
};
int move_only_constructed = 0;
#if TEST_STD_VER >= 11
-class move_only
-{
- move_only(const move_only&) = delete;
- move_only& operator=(const move_only&)= delete;
+class move_only {
+ move_only(const move_only&) = delete;
+ move_only& operator=(const move_only&) = delete;
public:
- move_only(move_only&&) {++move_only_constructed;}
- move_only& operator=(move_only&&) {return *this;}
+ move_only(move_only&&) { ++move_only_constructed; }
+ move_only& operator=(move_only&&) { return *this; }
- move_only() {++move_only_constructed;}
- ~move_only() {--move_only_constructed;}
+ move_only() { ++move_only_constructed; }
+ ~move_only() { --move_only_constructed; }
public:
- int data; // unused other than to make sizeof(move_only) == sizeof(int).
- // but public to suppress "-Wunused-private-field"
+ int data; // unused other than to make sizeof(move_only) == sizeof(int).
+ // but public to suppress "-Wunused-private-field"
};
#endif // TEST_STD_VER >= 11
-int main(int, char**)
-{
+int main(int, char**) {
globalMemCounter.reset();
{
std::allocator<A> a;
@@ -69,7 +64,7 @@ int main(int, char**)
assert(A_constructed == 0);
globalMemCounter.last_new_size = 0;
- A* ap = a.allocate(3);
+ A* ap = a.allocate(3);
DoNotOptimize(ap);
assert(globalMemCounter.checkOutstandingNewEq(1));
assert(globalMemCounter.checkLastNewSizeEq(3 * sizeof(int)));
@@ -113,13 +108,13 @@ int main(int, char**)
assert(A_constructed == 0);
}
#if TEST_STD_VER >= 11
- {
+ {
std::allocator<move_only> a;
assert(globalMemCounter.checkOutstandingNewEq(0));
assert(move_only_constructed == 0);
globalMemCounter.last_new_size = 0;
- move_only* ap = a.allocate(3);
+ move_only* ap = a.allocate(3);
DoNotOptimize(ap);
assert(globalMemCounter.checkOutstandingNewEq(1));
assert(globalMemCounter.checkLastNewSizeEq(3 * sizeof(int)));
@@ -145,7 +140,7 @@ int main(int, char**)
DoNotOptimize(ap);
assert(globalMemCounter.checkOutstandingNewEq(0));
assert(move_only_constructed == 0);
- }
+ }
#endif
return 0;
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.verify.cpp
new file mode 100644
index 000000000000..b39f9d918c95
--- /dev/null
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/construct.cxx20.verify.cpp
@@ -0,0 +1,77 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <memory>
+
+// allocator:
+// template <class... Args> void construct(pointer p, Args&&... args);
+
+// Removed in C++20.
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+#include <memory>
+#include <cassert>
+
+int A_constructed = 0;
+
+struct A {
+ int data;
+ A() { ++A_constructed; }
+
+ A(const A&) { ++A_constructed; }
+
+ explicit A(int) { ++A_constructed; }
+ A(int, int*) { ++A_constructed; }
+
+ ~A() { --A_constructed; }
+};
+
+int move_only_constructed = 0;
+
+class move_only {
+ move_only(const move_only&) = delete;
+ move_only& operator=(const move_only&) = delete;
+
+public:
+ move_only(move_only&&) { ++move_only_constructed; }
+ move_only& operator=(move_only&&) { return *this; }
+
+ move_only() { ++move_only_constructed; }
+ ~move_only() { --move_only_constructed; }
+
+public:
+ int data; // unused other than to make sizeof(move_only) == sizeof(int).
+ // but public to suppress "-Wunused-private-field"
+};
+
+int main(int, char**) {
+ {
+ std::allocator<A> a;
+ A* ap = a.allocate(3);
+ a.construct(ap); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.construct(ap, A()); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.construct(ap, 5); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.construct(ap, 5, (int*)0); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.deallocate(ap, 3);
+ }
+ {
+ std::allocator<move_only> a;
+ move_only* ap = a.allocate(3);
+ a.construct(ap); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.construct(ap, move_only()); // expected-error {{no member}}
+ a.destroy(ap); // expected-error {{no member}}
+ a.deallocate(ap, 3);
+ }
+ return 0;
+}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx2a.pass.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.pass.cpp
index b07568355fee..92e3b919b0f7 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx2a.pass.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.pass.cpp
@@ -11,11 +11,9 @@
// allocator:
// size_type max_size() const throw();
-// In C++20, parts of std::allocator<T> have been removed.
-// However, for backwards compatibility, if _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// is defined before including <memory>, then removed members will be restored.
+// Removed in C++20, deprecated in C++17.
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
+// REQUIRES: c++03 || c++11 || c++14 || c++17
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
#include <memory>
@@ -27,11 +25,10 @@
int new_called = 0;
-int main(int, char**)
-{
- const std::allocator<int> a;
- std::size_t M = a.max_size();
- assert(M > 0xFFFF && M <= (std::numeric_limits<std::size_t>::max() / sizeof(int)));
+int main(int, char**) {
+ const std::allocator<int> a;
+ std::size_t M = a.max_size();
+ assert(M > 0xFFFF && M <= (std::numeric_limits<std::size_t>::max() / sizeof(int)));
return 0;
}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.verify.cpp
new file mode 100644
index 000000000000..0e0f3c3f4aa4
--- /dev/null
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator.members/max_size.cxx20.verify.cpp
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// <memory>
+
+// allocator:
+// size_type max_size() const throw();
+
+// In C++20, parts of std::allocator<T> have been removed.
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+#include <memory>
+#include <limits>
+#include <cstddef>
+#include <cassert>
+
+#include "test_macros.h"
+
+int new_called = 0;
+
+int main(int, char**) {
+ const std::allocator<int> a;
+ std::size_t M = a.max_size(); // expected-error {{no member}}
+ assert(M > 0xFFFF && M <= (std::numeric_limits<std::size_t>::max() / sizeof(int)));
+
+ return 0;
+}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx2a.pass.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx20.pass.cpp
index a6134b04a8f5..e462e07d896c 100644
--- a/libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx2a.pass.cpp
+++ b/libcxx/test/libcxx/depr/depr.default.allocator/allocator_types.cxx20.pass.cpp
@@ -26,7 +26,9 @@
// ...
// };
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
+// Removed in C++20, deprecated in C++17.
+
+// REQUIRES: c++03 || c++11 || c++14 || c++17
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
#include <memory>
@@ -35,17 +37,17 @@
template <class T>
void test() {
- static_assert((std::is_same<typename std::allocator<T>::size_type, std::size_t>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::difference_type, std::ptrdiff_t>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::pointer, T*>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::const_pointer, const T*>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::reference, T&>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::const_reference, const T&>::value), "");
- static_assert((std::is_same<typename std::allocator<T>::template rebind<int>::other,
- std::allocator<int> >::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::size_type, std::size_t>::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::difference_type, std::ptrdiff_t>::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::pointer, T*>::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::const_pointer, const T*>::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::reference, T&>::value), "");
+ static_assert((std::is_same<typename std::allocator<T>::const_reference, const T&>::value), "");
+ static_assert(
+ (std::is_same<typename std::allocator<T>::template rebind<int>::other, std::allocator<int> >::value), "");
}
int main(int, char**) {
- test<char>();
- return 0;
+ test<char>();
+ return 0;
}
diff --git a/libcxx/test/libcxx/depr/depr.default.allocator/enable_removed_allocator_members.deprecated.verify.cpp b/libcxx/test/libcxx/depr/depr.default.allocator/enable_removed_allocator_members.deprecated.verify.cpp
deleted file mode 100644
index ab6495ea9db4..000000000000
--- a/libcxx/test/libcxx/depr/depr.default.allocator/enable_removed_allocator_members.deprecated.verify.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <memory>
-
-// Ensure that defining _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS yields a
-// deprecation warning. We intend to issue a deprecation warning in LLVM 18
-// and remove the macro entirely in LLVM 19. As such, this test will be quite
-// short lived.
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-
-// UNSUPPORTED: clang-modules-build
-
-#include <memory> // expected-warning@* 1+ {{macro '_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS' has been marked as deprecated}}
diff --git a/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp b/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
index 26703f748d87..1acbed55d2b5 100644
--- a/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
+++ b/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
@@ -9,7 +9,7 @@
// UNSUPPORTED: c++03, c++11, c++14
// UNSUPPORTED: availability-filesystem-missing
// UNSUPPORTED: no-filesystem
-// ADDITIONAL_COMPILE_FLAGS: -I %S/../../../../../../src
+// ADDITIONAL_COMPILE_FLAGS: -I %{libcxx-dir}/src
// This test relies on calling functions from the libcxx internal headers
// of <filesystem>; the Windows implementation uses different
diff --git a/libcxx/test/libcxx/input.output/filesystems/convert_file_time.pass.cpp b/libcxx/test/libcxx/input.output/filesystems/convert_file_time.pass.cpp
index 3c901c4e9f2e..699bbfb9e6dc 100644
--- a/libcxx/test/libcxx/input.output/filesystems/convert_file_time.pass.cpp
+++ b/libcxx/test/libcxx/input.output/filesystems/convert_file_time.pass.cpp
@@ -13,7 +13,7 @@
// typedef TrivialClock file_time_type;
-// ADDITIONAL_COMPILE_FLAGS: -I %S/../../../../src -Wno-macro-redefined
+// ADDITIONAL_COMPILE_FLAGS: -I %{libcxx-dir}/src -Wno-macro-redefined
#include <cassert>
#include <chrono>
diff --git a/libcxx/test/libcxx/time/time.zone/time.zone.db/rules.pass.cpp b/libcxx/test/libcxx/time/time.zone/time.zone.db/rules.pass.cpp
index 4814f4aad87f..fcfc34625fbe 100644
--- a/libcxx/test/libcxx/time/time.zone/time.zone.db/rules.pass.cpp
+++ b/libcxx/test/libcxx/time/time.zone/time.zone.db/rules.pass.cpp
@@ -17,7 +17,7 @@
// Tests the IANA database rules parsing and operations.
// This is not part of the public tzdb interface.
// The test uses private implementation headers.
-// ADDITIONAL_COMPILE_FLAGS: -I %S/../../../../../src/include
+// ADDITIONAL_COMPILE_FLAGS: -I %{libcxx-dir}/src/include
#include <chrono>
#include <fstream>
diff --git a/libcxx/test/libcxx/time/time.zone/time.zone.db/zones.pass.cpp b/libcxx/test/libcxx/time/time.zone/time.zone.db/zones.pass.cpp
index 8571e0d05ebb..e97b36fca2bb 100644
--- a/libcxx/test/libcxx/time/time.zone/time.zone.db/zones.pass.cpp
+++ b/libcxx/test/libcxx/time/time.zone/time.zone.db/zones.pass.cpp
@@ -17,7 +17,7 @@
// Tests the IANA database zones parsing and operations.
// This is not part of the public tzdb interface.
// The test uses private implementation headers.
-// ADDITIONAL_COMPILE_FLAGS: -I %S/../../../../../src/include
+// ADDITIONAL_COMPILE_FLAGS: -I %{libcxx-dir}/src/include
#include <cassert>
#include <chrono>
diff --git a/libcxx/test/libcxx/transitive_includes/cxx03.csv b/libcxx/test/libcxx/transitive_includes/cxx03.csv
index c65b9b9d705e..2e246644f626 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx03.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx03.csv
@@ -267,6 +267,7 @@ filesystem type_traits
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
diff --git a/libcxx/test/libcxx/transitive_includes/cxx11.csv b/libcxx/test/libcxx/transitive_includes/cxx11.csv
index b3d9e327fc7a..e074bf1f7dcc 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx11.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx11.csv
@@ -268,6 +268,7 @@ filesystem type_traits
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
diff --git a/libcxx/test/libcxx/transitive_includes/cxx14.csv b/libcxx/test/libcxx/transitive_includes/cxx14.csv
index d723409422a3..88f9c24f0864 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx14.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx14.csv
@@ -270,6 +270,7 @@ filesystem type_traits
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
diff --git a/libcxx/test/libcxx/transitive_includes/cxx17.csv b/libcxx/test/libcxx/transitive_includes/cxx17.csv
index d723409422a3..88f9c24f0864 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx17.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx17.csv
@@ -270,6 +270,7 @@ filesystem type_traits
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
diff --git a/libcxx/test/libcxx/transitive_includes/cxx20.csv b/libcxx/test/libcxx/transitive_includes/cxx20.csv
index 03b4eda8b4d8..27f59660fb98 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx20.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx20.csv
@@ -281,6 +281,7 @@ filesystem type_traits
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
diff --git a/libcxx/test/libcxx/transitive_includes/cxx23.csv b/libcxx/test/libcxx/transitive_includes/cxx23.csv
index 062127364adf..79c67dc00cfb 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx23.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx23.csv
@@ -190,6 +190,7 @@ filesystem string_view
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
@@ -201,8 +202,6 @@ format initializer_list
format limits
format new
format optional
-format queue
-format stack
format stdexcept
format string
format string_view
@@ -680,7 +679,6 @@ vector cstdlib
vector cstring
vector cwchar
vector initializer_list
-vector iosfwd
vector limits
vector new
vector stdexcept
diff --git a/libcxx/test/libcxx/transitive_includes/cxx26.csv b/libcxx/test/libcxx/transitive_includes/cxx26.csv
index 062127364adf..79c67dc00cfb 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx26.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx26.csv
@@ -190,6 +190,7 @@ filesystem string_view
filesystem version
format array
format cctype
+format cerrno
format clocale
format cmath
format cstddef
@@ -201,8 +202,6 @@ format initializer_list
format limits
format new
format optional
-format queue
-format stack
format stdexcept
format string
format string_view
@@ -680,7 +679,6 @@ vector cstdlib
vector cstring
vector cwchar
vector initializer_list
-vector iosfwd
vector limits
vector new
vector stdexcept
diff --git a/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_allocator_void_no_members.verify.cpp b/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_allocator_void_no_members.verify.cpp
deleted file mode 100644
index 8888683a044f..000000000000
--- a/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_allocator_void_no_members.verify.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// Check that members of std::allocator<void> are not provided in C++20
-// with _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION but without
-// _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS.
-
-// UNSUPPORTED: c++03, c++11, c++14, c++17
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION
-//
-// Ignore any extra errors arising from typo correction.
-// ADDITIONAL_COMPILE_FLAGS: -Xclang -verify-ignore-unexpected=error
-
-#include <memory>
-
-std::allocator<void>::pointer x; // expected-error-re {{no {{(type|template)}} named 'pointer'}}
-std::allocator<void>::const_pointer y; // expected-error-re {{no {{(type|template)}} named 'const_pointer'}}
-std::allocator<void>::value_type z; // expected-error-re {{no {{(type|template)}} named 'value_type'}}
-std::allocator<void>::rebind<int>::other t; // expected-error-re {{no {{(type|template)}} named 'rebind'}}
diff --git a/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_with_removed_members.compile.pass.cpp b/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_with_removed_members.compile.pass.cpp
deleted file mode 100644
index 3f151edefe1c..000000000000
--- a/libcxx/test/libcxx/utilities/memory/default.allocator/allocator_types.void.cxx20_with_removed_members.compile.pass.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// Check that the nested types of std::allocator<void> are provided in C++20
-// with a flag that keeps the removed members.
-
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION
-
-#include <memory>
-#include <type_traits>
-
-static_assert((std::is_same<std::allocator<void>::pointer, void*>::value), "");
-static_assert((std::is_same<std::allocator<void>::const_pointer, const void*>::value), "");
-static_assert((std::is_same<std::allocator<void>::value_type, void>::value), "");
-static_assert((std::is_same<std::allocator<void>::rebind<int>::other, std::allocator<int> >::value), "");
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch.pass.cpp
index cc588c095ccf..55c9eea863c3 100644
--- a/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch.pass.cpp
+++ b/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch.pass.cpp
@@ -16,79 +16,201 @@
// template<InputIterator Iter1, InputIterator Iter2Pred>
// constexpr pair<Iter1, Iter2> // constexpr after c++17
// mismatch(Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2); // C++14
+//
+// template<InputIterator Iter1, InputIterator Iter2,
+// Predicate<auto, Iter1::value_type, Iter2::value_type> Pred>
+// requires CopyConstructible<Pred>
+// constexpr pair<Iter1, Iter2> // constexpr after c++17
+// mismatch(Iter1 first1, Iter1 last1, Iter2 first2, Pred pred);
+//
+// template<InputIterator Iter1, InputIterator Iter2, Predicate Pred>
+// constexpr pair<Iter1, Iter2> // constexpr after c++17
+// mismatch(Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, Pred pred); // C++14
+
+// ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-steps): -fconstexpr-steps=50000000
+// ADDITIONAL_COMPILE_FLAGS(has-fconstexpr-ops-limit): -fconstexpr-ops-limit=100000000
#include <algorithm>
+#include <array>
#include <cassert>
+#include <vector>
#include "test_macros.h"
#include "test_iterators.h"
-
-#if TEST_STD_VER > 17
-TEST_CONSTEXPR bool test_constexpr() {
- int ia[] = {1, 3, 6, 7};
- int ib[] = {1, 3};
- int ic[] = {1, 3, 5, 7};
- typedef cpp17_input_iterator<int*> II;
- typedef bidirectional_iterator<int*> BI;
-
- auto p1 = std::mismatch(std::begin(ia), std::end(ia), std::begin(ic));
- if (p1.first != ia+2 || p1.second != ic+2)
- return false;
-
- auto p2 = std::mismatch(std::begin(ia), std::end(ia), std::begin(ic), std::end(ic));
- if (p2.first != ia+2 || p2.second != ic+2)
- return false;
-
- auto p3 = std::mismatch(std::begin(ib), std::end(ib), std::begin(ic));
- if (p3.first != ib+2 || p3.second != ic+2)
- return false;
-
- auto p4 = std::mismatch(std::begin(ib), std::end(ib), std::begin(ic), std::end(ic));
- if (p4.first != ib+2 || p4.second != ic+2)
- return false;
-
- auto p5 = std::mismatch(II(std::begin(ib)), II(std::end(ib)), II(std::begin(ic)));
- if (p5.first != II(ib+2) || p5.second != II(ic+2))
- return false;
- auto p6 = std::mismatch(BI(std::begin(ib)), BI(std::end(ib)), BI(std::begin(ic)), BI(std::end(ic)));
- if (p6.first != BI(ib+2) || p6.second != BI(ic+2))
- return false;
-
- return true;
- }
+#include "type_algorithms.h"
+
+template <class Iter, class Container1, class Container2>
+TEST_CONSTEXPR_CXX20 void check(Container1 lhs, Container2 rhs, size_t offset) {
+ if (lhs.size() == rhs.size()) {
+ assert(std::mismatch(Iter(lhs.data()), Iter(lhs.data() + lhs.size()), Iter(rhs.data())) ==
+ std::make_pair(Iter(lhs.data() + offset), Iter(rhs.data() + offset)));
+
+ assert(std::mismatch(Iter(lhs.data()),
+ Iter(lhs.data() + lhs.size()),
+ Iter(rhs.data()),
+ std::equal_to<typename Container1::value_type>()) ==
+ std::make_pair(Iter(lhs.data() + offset), Iter(rhs.data() + offset)));
+ }
+
+#if TEST_STD_VER >= 14
+ assert(
+ std::mismatch(Iter(lhs.data()), Iter(lhs.data() + lhs.size()), Iter(rhs.data()), Iter(rhs.data() + rhs.size())) ==
+ std::make_pair(Iter(lhs.data() + offset), Iter(rhs.data() + offset)));
+
+ assert(std::mismatch(Iter(lhs.data()),
+ Iter(lhs.data() + lhs.size()),
+ Iter(rhs.data()),
+ Iter(rhs.data() + rhs.size()),
+ std::equal_to<typename Container1::value_type>()) ==
+ std::make_pair(Iter(lhs.data() + offset), Iter(rhs.data() + offset)));
#endif
+}
-int main(int, char**)
-{
- int ia[] = {0, 1, 2, 2, 0, 1, 2, 3};
- const unsigned sa = sizeof(ia)/sizeof(ia[0]);
- int ib[] = {0, 1, 2, 3, 0, 1, 2, 3};
- const unsigned sb = sizeof(ib)/sizeof(ib[0]); ((void)sb); // unused in C++11
-
- typedef cpp17_input_iterator<const int*> II;
- typedef random_access_iterator<const int*> RAI;
-
- assert(std::mismatch(II(ia), II(ia + sa), II(ib))
- == (std::pair<II, II>(II(ia+3), II(ib+3))));
-
- assert(std::mismatch(RAI(ia), RAI(ia + sa), RAI(ib))
- == (std::pair<RAI, RAI>(RAI(ia+3), RAI(ib+3))));
-
-#if TEST_STD_VER > 11 // We have the four iteration version
- assert(std::mismatch(II(ia), II(ia + sa), II(ib), II(ib+sb))
- == (std::pair<II, II>(II(ia+3), II(ib+3))));
+struct NonTrivial {
+ int i_;
+
+ TEST_CONSTEXPR_CXX20 NonTrivial(int i) : i_(i) {}
+ TEST_CONSTEXPR_CXX20 NonTrivial(NonTrivial&& other) : i_(other.i_) { other.i_ = 0; }
+
+ TEST_CONSTEXPR_CXX20 friend bool operator==(const NonTrivial& lhs, const NonTrivial& rhs) { return lhs.i_ == rhs.i_; }
+};
+
+struct ModTwoComp {
+ TEST_CONSTEXPR_CXX20 bool operator()(int lhs, int rhs) { return lhs % 2 == rhs % 2; }
+};
+
+template <class Iter>
+TEST_CONSTEXPR_CXX20 bool test() {
+ { // empty ranges
+ std::array<int, 0> lhs = {};
+ std::array<int, 0> rhs = {};
+ check<Iter>(lhs, rhs, 0);
+ }
+
+ { // same range without mismatch
+ std::array<int, 8> lhs = {0, 1, 2, 3, 0, 1, 2, 3};
+ std::array<int, 8> rhs = {0, 1, 2, 3, 0, 1, 2, 3};
+ check<Iter>(lhs, rhs, 8);
+ }
+
+ { // same range with mismatch
+ std::array<int, 8> lhs = {0, 1, 2, 2, 0, 1, 2, 3};
+ std::array<int, 8> rhs = {0, 1, 2, 3, 0, 1, 2, 3};
+ check<Iter>(lhs, rhs, 3);
+ }
+
+ { // second range is smaller
+ std::array<int, 8> lhs = {0, 1, 2, 2, 0, 1, 2, 3};
+ std::array<int, 2> rhs = {0, 1};
+ check<Iter>(lhs, rhs, 2);
+ }
+
+ { // first range is smaller
+ std::array<int, 2> lhs = {0, 1};
+ std::array<int, 8> rhs = {0, 1, 2, 2, 0, 1, 2, 3};
+ check<Iter>(lhs, rhs, 2);
+ }
+
+ { // use a custom comparator
+ std::array<int, 4> lhs = {0, 2, 3, 4};
+ std::array<int, 4> rhs = {0, 0, 4, 4};
+ assert(std::mismatch(lhs.data(), lhs.data() + lhs.size(), rhs.data(), ModTwoComp()) ==
+ std::make_pair(lhs.data() + 2, rhs.data() + 2));
+#if TEST_STD_VER >= 14
+ assert(std::mismatch(lhs.data(), lhs.data() + lhs.size(), rhs.data(), rhs.data() + rhs.size(), ModTwoComp()) ==
+ std::make_pair(lhs.data() + 2, rhs.data() + 2));
+#endif
+ }
- assert(std::mismatch(RAI(ia), RAI(ia + sa), RAI(ib), RAI(ib+sb))
- == (std::pair<RAI, RAI>(RAI(ia+3), RAI(ib+3))));
+ return true;
+}
+struct Test {
+ template <class Iter>
+ TEST_CONSTEXPR_CXX20 void operator()() {
+ test<Iter>();
+ }
+};
+
+TEST_CONSTEXPR_CXX20 bool test() {
+ types::for_each(types::cpp17_input_iterator_list<int*>(), Test());
+
+ { // use a non-integer type to also test the general case - all elements match
+ std::array<NonTrivial, 8> lhs = {1, 2, 3, 4, 5, 6, 7, 8};
+ std::array<NonTrivial, 8> rhs = {1, 2, 3, 4, 5, 6, 7, 8};
+ check<NonTrivial*>(std::move(lhs), std::move(rhs), 8);
+ }
+
+ { // use a non-integer type to also test the general case - not all elements match
+ std::array<NonTrivial, 8> lhs = {1, 2, 3, 4, 7, 6, 7, 8};
+ std::array<NonTrivial, 8> rhs = {1, 2, 3, 4, 5, 6, 7, 8};
+ check<NonTrivial*>(std::move(lhs), std::move(rhs), 4);
+ }
+
+ return true;
+}
- assert(std::mismatch(II(ia), II(ia + sa), II(ib), II(ib+2))
- == (std::pair<II, II>(II(ia+2), II(ib+2))));
+int main(int, char**) {
+ test();
+#if TEST_STD_VER >= 20
+ static_assert(test());
#endif
-#if TEST_STD_VER > 17
- static_assert(test_constexpr());
-#endif
+ { // check with a lot of elements to test the vectorization optimization
+ {
+ std::vector<char> lhs(256);
+ std::vector<char> rhs(256);
+ for (size_t i = 0; i != lhs.size(); ++i) {
+ lhs[i] = 1;
+ check<char*>(lhs, rhs, i);
+ lhs[i] = 0;
+ rhs[i] = 1;
+ check<char*>(lhs, rhs, i);
+ rhs[i] = 0;
+ }
+ }
+ {
+ std::vector<int> lhs(256);
+ std::vector<int> rhs(256);
+ for (size_t i = 0; i != lhs.size(); ++i) {
+ lhs[i] = 1;
+ check<int*>(lhs, rhs, i);
+ lhs[i] = 0;
+ rhs[i] = 1;
+ check<int*>(lhs, rhs, i);
+ rhs[i] = 0;
+ }
+ }
+ }
+
+ { // check the tail of the vectorized loop
+ for (size_t vec_size = 1; vec_size != 256; ++vec_size) {
+ {
+ std::vector<char> lhs(256);
+ std::vector<char> rhs(256);
+
+ check<char*>(lhs, rhs, lhs.size());
+ lhs.back() = 1;
+ check<char*>(lhs, rhs, lhs.size() - 1);
+ lhs.back() = 0;
+ rhs.back() = 1;
+ check<char*>(lhs, rhs, lhs.size() - 1);
+ rhs.back() = 0;
+ }
+ {
+ std::vector<int> lhs(256);
+ std::vector<int> rhs(256);
+
+ check<int*>(lhs, rhs, lhs.size());
+ lhs.back() = 1;
+ check<int*>(lhs, rhs, lhs.size() - 1);
+ lhs.back() = 0;
+ rhs.back() = 1;
+ check<int*>(lhs, rhs, lhs.size() - 1);
+ rhs.back() = 0;
+ }
+ }
+ }
return 0;
}
diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch_pred.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch_pred.pass.cpp
deleted file mode 100644
index bda4ec7ba5ed..000000000000
--- a/libcxx/test/std/algorithms/alg.nonmodifying/mismatch/mismatch_pred.pass.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-// <algorithm>
-
-// template<InputIterator Iter1, InputIterator Iter2,
-// Predicate<auto, Iter1::value_type, Iter2::value_type> Pred>
-// requires CopyConstructible<Pred>
-// constexpr pair<Iter1, Iter2> // constexpr after c++17
-// mismatch(Iter1 first1, Iter1 last1, Iter2 first2, Pred pred);
-//
-// template<InputIterator Iter1, InputIterator Iter2, Predicate Pred>
-// constexpr pair<Iter1, Iter2> // constexpr after c++17
-// mismatch(Iter1 first1, Iter1 last1, Iter2 first2, Iter2 last2, Pred pred); // C++14
-
-#include <algorithm>
-#include <functional>
-#include <cassert>
-
-#include "test_macros.h"
-#include "test_iterators.h"
-#include "counting_predicates.h"
-
-#if TEST_STD_VER > 17
-TEST_CONSTEXPR bool eq(int a, int b) { return a == b; }
-
-TEST_CONSTEXPR bool test_constexpr() {
- int ia[] = {1, 3, 6, 7};
- int ib[] = {1, 3};
- int ic[] = {1, 3, 5, 7};
- typedef cpp17_input_iterator<int*> II;
- typedef bidirectional_iterator<int*> BI;
-
- auto p1 = std::mismatch(std::begin(ia), std::end(ia), std::begin(ic), eq);
- if (p1.first != ia+2 || p1.second != ic+2)
- return false;
-
- auto p2 = std::mismatch(std::begin(ia), std::end(ia), std::begin(ic), std::end(ic), eq);
- if (p2.first != ia+2 || p2.second != ic+2)
- return false;
-
- auto p3 = std::mismatch(std::begin(ib), std::end(ib), std::begin(ic), eq);
- if (p3.first != ib+2 || p3.second != ic+2)
- return false;
-
- auto p4 = std::mismatch(std::begin(ib), std::end(ib), std::begin(ic), std::end(ic), eq);
- if (p4.first != ib+2 || p4.second != ic+2)
- return false;
-
- auto p5 = std::mismatch(II(std::begin(ib)), II(std::end(ib)), II(std::begin(ic)), eq);
- if (p5.first != II(ib+2) || p5.second != II(ic+2))
- return false;
- auto p6 = std::mismatch(BI(std::begin(ib)), BI(std::end(ib)), BI(std::begin(ic)), BI(std::end(ic)), eq);
- if (p6.first != BI(ib+2) || p6.second != BI(ic+2))
- return false;
-
- return true;
- }
-#endif
-
-
-#if TEST_STD_VER > 11
-#define HAS_FOUR_ITERATOR_VERSION
-#endif
-
-int main(int, char**)
-{
- int ia[] = {0, 1, 2, 2, 0, 1, 2, 3};
- const unsigned sa = sizeof(ia)/sizeof(ia[0]);
- int ib[] = {0, 1, 2, 3, 0, 1, 2, 3};
- const unsigned sb = sizeof(ib)/sizeof(ib[0]); ((void)sb); // unused in C++11
-
- typedef cpp17_input_iterator<const int*> II;
- typedef random_access_iterator<const int*> RAI;
- typedef std::equal_to<int> EQ;
-
- assert(std::mismatch(II(ia), II(ia + sa), II(ib), EQ())
- == (std::pair<II, II>(II(ia+3), II(ib+3))));
- assert(std::mismatch(RAI(ia), RAI(ia + sa), RAI(ib), EQ())
- == (std::pair<RAI, RAI>(RAI(ia+3), RAI(ib+3))));
-
- binary_counting_predicate<EQ, int> bcp((EQ()));
- assert(std::mismatch(RAI(ia), RAI(ia + sa), RAI(ib), std::ref(bcp))
- == (std::pair<RAI, RAI>(RAI(ia+3), RAI(ib+3))));
- assert(bcp.count() > 0 && bcp.count() < sa);
- bcp.reset();
-
-#if TEST_STD_VER >= 14
- assert(std::mismatch(II(ia), II(ia + sa), II(ib), II(ib + sb), EQ())
- == (std::pair<II, II>(II(ia+3), II(ib+3))));
- assert(std::mismatch(RAI(ia), RAI(ia + sa), RAI(ib), RAI(ib + sb), EQ())
- == (std::pair<RAI, RAI>(RAI(ia+3), RAI(ib+3))));
-
- assert(std::mismatch(II(ia), II(ia + sa), II(ib), II(ib + sb), std::ref(bcp))
- == (std::pair<II, II>(II(ia+3), II(ib+3))));
- assert(bcp.count() > 0 && bcp.count() < std::min(sa, sb));
-#endif
-
- assert(std::mismatch(ia, ia + sa, ib, EQ()) ==
- (std::pair<int*,int*>(ia+3,ib+3)));
-
-#if TEST_STD_VER >= 14
- assert(std::mismatch(ia, ia + sa, ib, ib + sb, EQ()) ==
- (std::pair<int*,int*>(ia+3,ib+3)));
- assert(std::mismatch(ia, ia + sa, ib, ib + 2, EQ()) ==
- (std::pair<int*,int*>(ia+2,ib+2)));
-#endif
-
-#if TEST_STD_VER > 17
- static_assert(test_constexpr());
-#endif
-
- return 0;
-}
diff --git a/libcxx/test/std/containers/sequences/deque/types.pass.cpp b/libcxx/test/std/containers/sequences/deque/types.pass.cpp
index bfe4808f863d..8c14de0c7744 100644
--- a/libcxx/test/std/containers/sequences/deque/types.pass.cpp
+++ b/libcxx/test/std/containers/sequences/deque/types.pass.cpp
@@ -28,9 +28,6 @@
// typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
// };
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-
#include <deque>
#include <iterator>
#include <type_traits>
@@ -47,14 +44,22 @@ test()
typedef std::deque<T, Allocator> C;
static_assert((std::is_same<typename C::value_type, T>::value), "");
- static_assert((std::is_same<typename C::value_type, typename Allocator::value_type>::value), "");
+ static_assert(
+ (std::is_same<typename C::value_type, typename std::allocator_traits<Allocator>::value_type>::value), "");
static_assert((std::is_same<typename C::allocator_type, Allocator>::value), "");
- static_assert((std::is_same<typename C::size_type, typename Allocator::size_type>::value), "");
- static_assert((std::is_same<typename C::difference_type, typename Allocator::difference_type>::value), "");
- static_assert((std::is_same<typename C::reference, typename Allocator::reference>::value), "");
- static_assert((std::is_same<typename C::const_reference, typename Allocator::const_reference>::value), "");
- static_assert((std::is_same<typename C::pointer, typename Allocator::pointer>::value), "");
- static_assert((std::is_same<typename C::const_pointer, typename Allocator::const_pointer>::value), "");
+ static_assert(
+ (std::is_same<typename C::size_type, typename std::allocator_traits<Allocator>::size_type>::value), "");
+ static_assert(
+ (std::is_same<typename C::difference_type, typename std::allocator_traits<Allocator>::difference_type>::value),
+ "");
+ static_assert(
+ (std::is_same<typename C::reference, typename std::allocator_traits<Allocator>::value_type&>::value), "");
+ static_assert((std::is_same<typename C::const_reference,
+ const typename std::allocator_traits<Allocator>::value_type&>::value),
+ "");
+ static_assert((std::is_same<typename C::pointer, typename std::allocator_traits<Allocator>::pointer>::value), "");
+ static_assert(
+ (std::is_same<typename C::const_pointer, typename std::allocator_traits<Allocator>::const_pointer>::value), "");
static_assert((std::is_same<
typename std::iterator_traits<typename C::iterator>::iterator_category,
std::random_access_iterator_tag>::value), "");
diff --git a/libcxx/test/std/containers/sequences/list/types.pass.cpp b/libcxx/test/std/containers/sequences/list/types.pass.cpp
index 0c1ca27745ce..8fe31e3949de 100644
--- a/libcxx/test/std/containers/sequences/list/types.pass.cpp
+++ b/libcxx/test/std/containers/sequences/list/types.pass.cpp
@@ -21,9 +21,6 @@
// typedef typename allocator_type::pointer pointer;
// typedef typename allocator_type::const_pointer const_pointer;
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-
#include <list>
#include <type_traits>
@@ -38,10 +35,12 @@ int main(int, char**)
typedef std::list<int> C;
static_assert((std::is_same<C::value_type, int>::value), "");
static_assert((std::is_same<C::allocator_type, std::allocator<int> >::value), "");
- static_assert((std::is_same<C::reference, std::allocator<int>::reference>::value), "");
- static_assert((std::is_same<C::const_reference, std::allocator<int>::const_reference>::value), "");
- static_assert((std::is_same<C::pointer, std::allocator<int>::pointer>::value), "");
- static_assert((std::is_same<C::const_pointer, std::allocator<int>::const_pointer>::value), "");
+ static_assert((std::is_same<C::reference, std::allocator_traits<std::allocator<int> >::value_type&>::value), "");
+ static_assert(
+ (std::is_same<C::const_reference, const std::allocator_traits<std::allocator<int> >::value_type&>::value), "");
+ static_assert((std::is_same<C::pointer, std::allocator_traits<std::allocator<int> >::pointer>::value), "");
+ static_assert(
+ (std::is_same<C::const_pointer, std::allocator_traits<std::allocator<int> >::const_pointer>::value), "");
static_assert((std::is_signed<typename C::difference_type>::value), "");
static_assert((std::is_unsigned<typename C::size_type>::value), "");
diff --git a/libcxx/test/std/containers/sequences/vector/types.pass.cpp b/libcxx/test/std/containers/sequences/vector/types.pass.cpp
index 4bcfbe7c3ea4..f4d7fa088842 100644
--- a/libcxx/test/std/containers/sequences/vector/types.pass.cpp
+++ b/libcxx/test/std/containers/sequences/vector/types.pass.cpp
@@ -28,9 +28,6 @@
// typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
// };
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS
-// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
-
#include <vector>
#include <iterator>
#include <type_traits>
@@ -52,14 +49,22 @@ test()
// blindly pulling typedefs out of the allocator. This is why we can't call
// test<int, min_allocator<int>>() below.
static_assert((std::is_same<typename C::value_type, T>::value), "");
- static_assert((std::is_same<typename C::value_type, typename Allocator::value_type>::value), "");
+ static_assert(
+ (std::is_same<typename C::value_type, typename std::allocator_traits<Allocator>::value_type>::value), "");
static_assert((std::is_same<typename C::allocator_type, Allocator>::value), "");
- static_assert((std::is_same<typename C::size_type, typename Allocator::size_type>::value), "");
- static_assert((std::is_same<typename C::difference_type, typename Allocator::difference_type>::value), "");
- static_assert((std::is_same<typename C::reference, typename Allocator::reference>::value), "");
- static_assert((std::is_same<typename C::const_reference, typename Allocator::const_reference>::value), "");
- static_assert((std::is_same<typename C::pointer, typename Allocator::pointer>::value), "");
- static_assert((std::is_same<typename C::const_pointer, typename Allocator::const_pointer>::value), "");
+ static_assert(
+ (std::is_same<typename C::size_type, typename std::allocator_traits<Allocator>::size_type>::value), "");
+ static_assert(
+ (std::is_same<typename C::difference_type, typename std::allocator_traits<Allocator>::difference_type>::value),
+ "");
+ static_assert(
+ (std::is_same<typename C::reference, typename std::allocator_traits<Allocator>::value_type&>::value), "");
+ static_assert((std::is_same<typename C::const_reference,
+ const typename std::allocator_traits<Allocator>::value_type&>::value),
+ "");
+ static_assert((std::is_same<typename C::pointer, typename std::allocator_traits<Allocator>::pointer>::value), "");
+ static_assert(
+ (std::is_same<typename C::const_pointer, typename std::allocator_traits<Allocator>::const_pointer>::value), "");
static_assert((std::is_signed<typename C::difference_type>::value), "");
static_assert((std::is_unsigned<typename C::size_type>::value), "");
diff --git a/libcxx/test/std/containers/sequences/vector/vector.cons/deduct.verify.cpp b/libcxx/test/std/containers/sequences/vector/vector.cons/deduct.verify.cpp
index 7ce00d70f844..2b2242e240a2 100644
--- a/libcxx/test/std/containers/sequences/vector/vector.cons/deduct.verify.cpp
+++ b/libcxx/test/std/containers/sequences/vector/vector.cons/deduct.verify.cpp
@@ -14,25 +14,20 @@
// -> vector<typename iterator_traits<InputIterator>::value_type, Allocator>;
//
-#include <deque>
-#include <iterator>
#include <cassert>
#include <cstddef>
-
-
-int main(int, char**)
-{
-// Test the explicit deduction guides
-
-// Test the implicit deduction guides
- {
-// vector (allocator &)
- std::vector vec((std::allocator<int>())); // expected-error {{no viable constructor or deduction guide for deduction of template arguments of 'vector'}}
-// Note: The extra parens are necessary, since otherwise clang decides it is a function declaration.
-// Also, we can't use {} instead of parens, because that constructs a
-// deque<allocator<int>, allocator<allocator<int>>>
- }
-
+#include <vector>
+
+int main(int, char**) {
+ // Test the explicit deduction guides
+ // TODO: Should there be tests for explicit deduction guides?
+
+ // Test the implicit deduction guides
+ {
+ // vector (allocator &)
+ // expected-error@+1 {{no viable constructor or deduction guide for deduction of template arguments of 'vector'}}
+ std::vector vec(std::allocator< int>{});
+ }
return 0;
}
diff --git a/libcxx/test/std/depr.cerro/cerrno.syn.verify.cpp b/libcxx/test/std/depr.cerro/cerrno.syn.verify.cpp
new file mode 100644
index 000000000000..3a38605570da
--- /dev/null
+++ b/libcxx/test/std/depr.cerro/cerrno.syn.verify.cpp
@@ -0,0 +1,37 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: clang-modules-build
+// UNSUPPORTED: apple-clang && c++03
+
+// <cerrno>
+
+// tests LWG 3869 deprecated macros.
+//
+// Note the macros may not be defined. When they are not defined the
+// ifdef XXX does not trigger a deprecated message. So use them in the
+// ifdef and test for 2 deprecated messages.
+
+#include <cerrno>
+
+#ifdef ENODATA
+[[maybe_unused]] int nodata =
+ ENODATA; // expected-warning@cerrno.syn.verify.cpp:* 2 {{macro 'ENODATA' has been marked as deprecated}}
+#endif
+#ifdef ENOSR
+[[maybe_unused]] int nosr =
+ ENOSR; // expected-warning@cerrno.syn.verify.cpp:* 2 {{macro 'ENOSR' has been marked as deprecated}}
+#endif
+#ifdef ENOSTR
+[[maybe_unused]] int nostr =
+ ENOSTR; // expected-warning@cerrno.syn.verify.cpp:* 2 {{macro 'ENOSTR' has been marked as deprecated}}
+#endif
+#ifdef ETIME
+[[maybe_unused]] int timeout =
+ ETIME; // expected-warning@cerrno.syn.verify.cpp:* 2 {{macro 'ETIME' has been marked as deprecated}}
+#endif
diff --git a/libcxx/test/std/depr.cerro/system.error.syn.verify.cpp b/libcxx/test/std/depr.cerro/system.error.syn.verify.cpp
new file mode 100644
index 000000000000..fab5dd5b5593
--- /dev/null
+++ b/libcxx/test/std/depr.cerro/system.error.syn.verify.cpp
@@ -0,0 +1,28 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// These macros do not seem to behave as expected on all Apple platforms.
+// Since the macros are not provided newer POSIX versions it is expected the
+// macros will be retroactively removed from C++. (The deprecation was
+// retroactively.)
+// UNSUPPORTED: apple-clang && (c++03 || clang-modules-build)
+
+// <system_error>
+
+// enum errc {...}
+
+// tests LWG 3869 deprecated enum members.
+
+#include <system_error>
+
+[[maybe_unused]] std::errc nodata =
+ std::errc::no_message_available; // expected-warning {{'no_message_available' is deprecated}}
+[[maybe_unused]] std::errc nosr =
+ std::errc::no_stream_resources; // expected-warning {{'no_stream_resources' is deprecated}}
+[[maybe_unused]] std::errc nostr = std::errc::not_a_stream; // expected-warning {{'not_a_stream' is deprecated}}
+[[maybe_unused]] std::errc timeout = std::errc::stream_timeout; // expected-warning {{'stream_timeout' is deprecated}}
diff --git a/libcxx/test/std/diagnostics/syserr/errc.pass.cpp b/libcxx/test/std/diagnostics/syserr/errc.pass.cpp
index e44cb50102e3..4abee08ddc66 100644
--- a/libcxx/test/std/diagnostics/syserr/errc.pass.cpp
+++ b/libcxx/test/std/diagnostics/syserr/errc.pass.cpp
@@ -6,6 +6,8 @@
//
//===----------------------------------------------------------------------===//
+// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
+
// <system_error>
// enum errc {...}
diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_broadcast.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_broadcast.pass.cpp
index 679cb1aa167c..8a291632a8ab 100644
--- a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_broadcast.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_broadcast.pass.cpp
@@ -41,7 +41,7 @@ struct CheckSimdBroadcastCtorFromVectorizedType {
std::array<T, array_size> expected_value;
std::fill(expected_value.begin(), expected_value.end(), 3);
- types::for_each(arithmetic_no_bool_types(), BroadCastHelper<T, SimdAbi, array_size>(expected_value));
+ types::for_each(simd_test_types(), BroadCastHelper<T, SimdAbi, array_size>(expected_value));
}
};
@@ -110,7 +110,7 @@ template <class T, std::size_t>
struct CheckBroadcastCtorTraits {
template <class SimdAbi>
void operator()() {
- types::for_each(arithmetic_no_bool_types(), CheckBroadcastCtorTraitsHelper<T, SimdAbi>());
+ types::for_each(simd_test_types(), CheckBroadcastCtorTraitsHelper<T, SimdAbi>());
static_assert(!has_broadcast_ctor<no_implicit_type<T>, T, SimdAbi>::value);
static_assert(has_broadcast_ctor<implicit_type<T>, T, SimdAbi>::value);
diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_conversion.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_conversion.pass.cpp
index 7ce4bed9c7db..653704cd245a 100644
--- a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_conversion.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_conversion.pass.cpp
@@ -46,7 +46,7 @@ struct CheckConversionSimdCtor {
for (size_t i = 0; i < array_size; ++i)
expected_value[i] = static_cast<T>(i);
- types::for_each(arithmetic_no_bool_types(), ConversionHelper<T, SimdAbi, array_size>(expected_value));
+ types::for_each(simd_test_types(), ConversionHelper<T, SimdAbi, array_size>(expected_value));
}
};
@@ -70,7 +70,7 @@ struct CheckConversionSimdCtorTraits {
void operator()() {
constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
- types::for_each(arithmetic_no_bool_types(), CheckConversionSimdCtorTraitsHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), CheckConversionSimdCtorTraitsHelper<T, SimdAbi, array_size>());
}
};
diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_load.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_load.pass.cpp
index 3992f3e450cb..3f663d9c9735 100644
--- a/libcxx/test/std/experimental/simd/simd.class/simd_ctor_load.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.class/simd_ctor_load.pass.cpp
@@ -59,9 +59,9 @@ struct CheckSimdLoadCtor {
void operator()() {
constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
- types::for_each(arithmetic_no_bool_types(), ElementAlignedLoadCtorHelper<T, SimdAbi, array_size>());
- types::for_each(arithmetic_no_bool_types(), VectorAlignedLoadCtorHelper<T, SimdAbi, array_size>());
- types::for_each(arithmetic_no_bool_types(), OveralignedLoadCtorHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), ElementAlignedLoadCtorHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), VectorAlignedLoadCtorHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), OveralignedLoadCtorHelper<T, SimdAbi, array_size>());
}
};
diff --git a/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_ctor_conversion.pass.cpp b/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_ctor_conversion.pass.cpp
index dc0764f937fc..7910b2cc2522 100644
--- a/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_ctor_conversion.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.mask.class/simd_mask_ctor_conversion.pass.cpp
@@ -42,7 +42,7 @@ struct CheckConversionMaskCtor {
constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
std::array<bool, array_size> expected_value{};
- types::for_each(arithmetic_no_bool_types(), ConversionHelper<T, SimdAbi, array_size>(expected_value));
+ types::for_each(simd_test_types(), ConversionHelper<T, SimdAbi, array_size>(expected_value));
}
};
@@ -65,7 +65,7 @@ struct CheckConversionMaskCtorTraits {
void operator()() {
constexpr std::size_t array_size = ex::simd_size_v<T, SimdAbi>;
- types::for_each(arithmetic_no_bool_types(), CheckConversionMaskCtorTraitsHelper<T, SimdAbi, array_size>());
+ types::for_each(simd_test_types(), CheckConversionMaskCtorTraitsHelper<T, SimdAbi, array_size>());
}
};
diff --git a/libcxx/test/std/experimental/simd/simd.reference/reference_assignment.pass.cpp b/libcxx/test/std/experimental/simd/simd.reference/reference_assignment.pass.cpp
index cfef55a868ba..013adb368593 100644
--- a/libcxx/test/std/experimental/simd/simd.reference/reference_assignment.pass.cpp
+++ b/libcxx/test/std/experimental/simd/simd.reference/reference_assignment.pass.cpp
@@ -72,10 +72,10 @@ template <class T, std::size_t>
struct CheckReferenceAssignment {
template <class SimdAbi>
void operator()() {
- types::for_each(arithmetic_no_bool_types(), CheckSimdReferenceAssignmentHelper<T, SimdAbi>());
- types::for_each(arithmetic_no_bool_types(), CheckMaskReferenceAssignmentHelper<T, SimdAbi>());
+ types::for_each(simd_test_types(), CheckSimdReferenceAssignmentHelper<T, SimdAbi>());
+ types::for_each(simd_test_types(), CheckMaskReferenceAssignmentHelper<T, SimdAbi>());
- types::for_each(arithmetic_no_bool_types(), CheckReferenceAssignmentTraitsHelper<T, SimdAbi>());
+ types::for_each(simd_test_types(), CheckReferenceAssignmentTraitsHelper<T, SimdAbi>());
}
};
diff --git a/libcxx/test/std/experimental/simd/test_utils.h b/libcxx/test/std/experimental/simd/test_utils.h
index b3679b51e50b..9b4416085751 100644
--- a/libcxx/test/std/experimental/simd/test_utils.h
+++ b/libcxx/test/std/experimental/simd/test_utils.h
@@ -48,6 +48,18 @@ using arithmetic_no_bool_types = types::concatenate_t<types::integer_types, type
using arithmetic_no_bool_types = types::concatenate_t<types::integer_types, types::floating_point_types>;
#endif
+// For interfaces with vectorizable type template parameters, we only use some common or boundary types
+// as template parameters for testing to ensure that the compilation time of a single test does not exceed.
+using simd_test_types =
+ types::type_list<char,
+ unsigned,
+ int,
+#ifndef TEST_HAS_NO_INT128
+ __int128_t,
+#endif
+ float,
+ double>;
+
template <template <class T, std::size_t N> class Func>
void test_all_simd_abi() {
types::for_each(arithmetic_no_bool_types(), TestAllSimdAbiFunctor<Func>());
diff --git a/libcxx/test/std/numerics/numeric.ops/numeric.ops.sat/saturate_cast.pass.cpp b/libcxx/test/std/numerics/numeric.ops/numeric.ops.sat/saturate_cast.pass.cpp
index c06a9ed2d5cb..cbca37e3a661 100644
--- a/libcxx/test/std/numerics/numeric.ops/numeric.ops.sat/saturate_cast.pass.cpp
+++ b/libcxx/test/std/numerics/numeric.ops/numeric.ops.sat/saturate_cast.pass.cpp
@@ -329,7 +329,7 @@ constexpr bool test() {
{ [[maybe_unused]] std::same_as<unsigned long int> decltype(auto) _ = std::saturate_cast<unsigned long int>(sBigMax); }
assert(std::saturate_cast<unsigned long int>( sBigMin) == 0UL); // saturated
assert(std::saturate_cast<unsigned long int>( sZero) == 0UL);
- assert(std::saturate_cast<unsigned long int>( sBigMax) == ULONG_MAX); // saturated
+ assert(std::saturate_cast<unsigned long int>( sBigMax) == (sizeof(UIntT) > sizeof(unsigned long int) ? ULONG_MAX : LONG_MAX)); // saturated depending on underlying types
{ [[maybe_unused]] std::same_as<unsigned long int> decltype(auto) _ = std::saturate_cast<unsigned long int>(uBigMax); }
assert(std::saturate_cast<unsigned long int>( uZero) == 0UL);
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_token_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_token_pred.pass.cpp
index 4ea60557d9f8..7a39d1253a33 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_token_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_for_token_pred.pass.cpp
@@ -119,7 +119,7 @@ void test() {
bool flag = false;
auto thread = support::make_test_thread([&]() {
std::this_thread::sleep_for(2ms);
- Lock lock2{mutex};
+ std::unique_lock<Mutex> lock2{mutex};
flag = true;
cv.notify_all();
});
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_token_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_token_pred.pass.cpp
index e96a3e8bd1bc..f322d8cfdc68 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_token_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_token_pred.pass.cpp
@@ -63,7 +63,7 @@ void test() {
bool flag = false;
auto thread = support::make_test_thread([&]() {
std::this_thread::sleep_for(std::chrono::milliseconds(2));
- Lock lock2{mutex};
+ std::unique_lock<Mutex> lock2{mutex};
flag = true;
cv.notify_all();
});
diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_token_pred.pass.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_token_pred.pass.cpp
index d649db025d75..e7388b9ce0e1 100644
--- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_token_pred.pass.cpp
+++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_until_token_pred.pass.cpp
@@ -119,7 +119,7 @@ void test() {
bool flag = false;
auto thread = support::make_test_thread([&]() {
std::this_thread::sleep_for(std::chrono::milliseconds(2));
- Lock lock2{mutex};
+ std::unique_lock<Mutex> lock2{mutex};
flag = true;
cv.notify_all();
});
diff --git a/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.apply/make_from_tuple.pass.cpp b/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.apply/make_from_tuple.pass.cpp
index e3a21149c21e..d7374351afa8 100644
--- a/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.apply/make_from_tuple.pass.cpp
+++ b/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.apply/make_from_tuple.pass.cpp
@@ -195,6 +195,94 @@ void test_noexcept() {
}
}
+namespace LWG3528 {
+template <class T, class Tuple>
+auto test_make_from_tuple(T&&, Tuple&& t) -> decltype(std::make_from_tuple<T>(t), uint8_t()) {
+ return 0;
+}
+template <class T, class Tuple>
+uint32_t test_make_from_tuple(...) {
+ return 0;
+}
+
+template <class T, class Tuple>
+static constexpr bool can_make_from_tuple =
+ std::is_same_v<decltype(test_make_from_tuple<T, Tuple>(T{}, Tuple{})), uint8_t>;
+
+template <class T, class Tuple>
+auto test_make_from_tuple_impl(T&&, Tuple&& t)
+ -> decltype(std::__make_from_tuple_impl<T>(
+ t, typename std::__make_tuple_indices< std::tuple_size_v<std::remove_reference_t<Tuple>>>::type{}),
+ uint8_t()) {
+ return 0;
+}
+template <class T, class Tuple>
+uint32_t test_make_from_tuple_impl(...) {
+ return 0;
+}
+
+template <class T, class Tuple>
+static constexpr bool can_make_from_tuple_impl =
+ std::is_same_v<decltype(test_make_from_tuple_impl<T, Tuple>(T{}, Tuple{})), uint8_t>;
+
+struct A {
+ int a;
+};
+struct B : public A {};
+
+struct C {
+ C(const B&) {}
+};
+
+enum class D {
+ ONE,
+ TWO,
+};
+
+// Test std::make_from_tuple constraints.
+
+// reinterpret_cast
+static_assert(!can_make_from_tuple<int*, std::tuple<A*>>);
+static_assert(can_make_from_tuple<A*, std::tuple<A*>>);
+
+// const_cast
+static_assert(!can_make_from_tuple<char*, std::tuple<const char*>>);
+static_assert(!can_make_from_tuple<volatile char*, std::tuple<const volatile char*>>);
+static_assert(can_make_from_tuple<volatile char*, std::tuple<volatile char*>>);
+static_assert(can_make_from_tuple<char*, std::tuple<char*>>);
+static_assert(can_make_from_tuple<const char*, std::tuple<char*>>);
+static_assert(can_make_from_tuple<const volatile char*, std::tuple<volatile char*>>);
+
+// static_cast
+static_assert(!can_make_from_tuple<int, std::tuple<D>>);
+static_assert(!can_make_from_tuple<D, std::tuple<int>>);
+static_assert(can_make_from_tuple<long, std::tuple<int>>);
+static_assert(can_make_from_tuple<double, std::tuple<float>>);
+static_assert(can_make_from_tuple<float, std::tuple<double>>);
+
+// Test std::__make_from_tuple_impl constraints.
+
+// reinterpret_cast
+static_assert(!can_make_from_tuple_impl<int*, std::tuple<A*>>);
+static_assert(can_make_from_tuple_impl<A*, std::tuple<A*>>);
+
+// const_cast
+static_assert(!can_make_from_tuple_impl<char*, std::tuple<const char*>>);
+static_assert(!can_make_from_tuple_impl<volatile char*, std::tuple<const volatile char*>>);
+static_assert(can_make_from_tuple_impl<volatile char*, std::tuple<volatile char*>>);
+static_assert(can_make_from_tuple_impl<char*, std::tuple<char*>>);
+static_assert(can_make_from_tuple_impl<const char*, std::tuple<char*>>);
+static_assert(can_make_from_tuple_impl<const volatile char*, std::tuple<volatile char*>>);
+
+// static_cast
+static_assert(!can_make_from_tuple_impl<int, std::tuple<D>>);
+static_assert(!can_make_from_tuple_impl<D, std::tuple<int>>);
+static_assert(can_make_from_tuple_impl<long, std::tuple<int>>);
+static_assert(can_make_from_tuple_impl<double, std::tuple<float>>);
+static_assert(can_make_from_tuple_impl<float, std::tuple<double>>);
+
+} // namespace LWG3528
+
int main(int, char**)
{
test_constexpr_construction();
diff --git a/libcxx/test/std/utilities/variant/variant.get/get_if_index.pass.cpp b/libcxx/test/std/utilities/variant/variant.get/get_if_index.pass.cpp
index 3333d2a993ec..09e2e85abe66 100644
--- a/libcxx/test/std/utilities/variant/variant.get/get_if_index.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.get/get_if_index.pass.cpp
@@ -44,30 +44,6 @@ void test_const_get_if() {
static_assert(*std::get_if<1>(&v) == 42, "");
static_assert(std::get_if<0>(&v) == nullptr, "");
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), int *);
- assert(std::get_if<0>(&v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), int *);
- assert(std::get_if<0>(&v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), const int *);
- assert(std::get_if<0>(&v) == &x);
- }
-#endif
}
void test_get_if() {
@@ -91,37 +67,6 @@ void test_get_if() {
assert(*std::get_if<1>(&v) == 42);
assert(std::get_if<0>(&v) == nullptr);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), int *);
- assert(std::get_if<0>(&v) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), const int *);
- assert(std::get_if<0>(&v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), int *);
- assert(std::get_if<0>(&v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<0>(&v)), const int *);
- assert(std::get_if<0>(&v) == &x);
- }
-#endif
}
int main(int, char**) {
diff --git a/libcxx/test/std/utilities/variant/variant.get/get_if_type.pass.cpp b/libcxx/test/std/utilities/variant/variant.get/get_if_type.pass.cpp
index b81e000f5bf6..c4fefc74e62a 100644
--- a/libcxx/test/std/utilities/variant/variant.get/get_if_type.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.get/get_if_type.pass.cpp
@@ -42,30 +42,6 @@ void test_const_get_if() {
static_assert(*std::get_if<const long>(&v) == 42, "");
static_assert(std::get_if<int>(&v) == nullptr, "");
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<int &>(&v)), int *);
- assert(std::get_if<int &>(&v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<int &&>(&v)), int *);
- assert(std::get_if<int &&>(&v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<const int &&>(&v)), const int *);
- assert(std::get_if<const int &&>(&v) == &x);
- }
-#endif
}
void test_get_if() {
@@ -89,37 +65,6 @@ void test_get_if() {
assert(*std::get_if<const long>(&v) == 42);
assert(std::get_if<int>(&v) == nullptr);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<int &>(&v)), int *);
- assert(std::get_if<int &>(&v) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get_if<const int &>(&v)), const int *);
- assert(std::get_if<const int &>(&v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<int &&>(&v)), int *);
- assert(std::get_if<int &&>(&v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get_if<const int &&>(&v)), const int *);
- assert(std::get_if<const int &&>(&v) == &x);
- }
-#endif
}
int main(int, char**) {
diff --git a/libcxx/test/std/utilities/variant/variant.get/get_index.pass.cpp b/libcxx/test/std/utilities/variant/variant.get/get_index.pass.cpp
index 97c7ff0ed095..7ec9d8827f6b 100644
--- a/libcxx/test/std/utilities/variant/variant.get/get_index.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.get/get_index.pass.cpp
@@ -60,30 +60,6 @@ void test_const_lvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<1>(v)), const long &);
assert(std::get<1>(v) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), int &);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), int &);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), const int &);
- assert(&std::get<0>(v) == &x);
- }
-#endif
}
void test_lvalue_get() {
@@ -100,37 +76,6 @@ void test_lvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<1>(v)), const long &);
assert(std::get<1>(v) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), int &);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), const int &);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), int &);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(v)), const int &);
- assert(&std::get<0>(v) == &x);
- }
-#endif
}
void test_rvalue_get() {
@@ -147,39 +92,6 @@ void test_rvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<1>(std::move(v))), const long &&);
assert(std::get<1>(std::move(v)) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), int &);
- assert(&std::get<0>(std::move(v)) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), const int &);
- assert(&std::get<0>(std::move(v)) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), int &&);
- int &&xref = std::get<0>(std::move(v));
- assert(&xref == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), const int &&);
- const int &&xref = std::get<0>(std::move(v));
- assert(&xref == &x);
- }
-#endif
}
void test_const_rvalue_get() {
@@ -196,39 +108,6 @@ void test_const_rvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<1>(std::move(v))), const long &&);
assert(std::get<1>(std::move(v)) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), int &);
- assert(&std::get<0>(std::move(v)) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), const int &);
- assert(&std::get<0>(std::move(v)) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), int &&);
- int &&xref = std::get<0>(std::move(v));
- assert(&xref == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<0>(std::move(v))), const int &&);
- const int &&xref = std::get<0>(std::move(v));
- assert(&xref == &x);
- }
-#endif
}
template <std::size_t I> using Idx = std::integral_constant<std::size_t, I>;
diff --git a/libcxx/test/std/utilities/variant/variant.get/get_type.pass.cpp b/libcxx/test/std/utilities/variant/variant.get/get_type.pass.cpp
index d5e54d41e2f7..3485122c98f0 100644
--- a/libcxx/test/std/utilities/variant/variant.get/get_type.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.get/get_type.pass.cpp
@@ -54,30 +54,6 @@ void test_const_lvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<const long>(v)), const long &);
assert(std::get<const long>(v) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<int &>(v)), int &);
- assert(&std::get<int &>(v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<int &&>(v)), int &);
- assert(&std::get<int &&>(v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<const int &&>(v)), const int &);
- assert(&std::get<const int &&>(v) == &x);
- }
-#endif
}
void test_lvalue_get() {
@@ -94,37 +70,6 @@ void test_lvalue_get() {
ASSERT_SAME_TYPE(decltype(std::get<const long>(v)), const long &);
assert(std::get<const long>(v) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<int &>(v)), int &);
- assert(&std::get<int &>(v) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<const int &>(v)), const int &);
- assert(&std::get<const int &>(v) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<int &&>(v)), int &);
- assert(&std::get<int &&>(v) == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<const int &&>(v)), const int &);
- assert(&std::get<const int &&>(v) == &x);
- }
-#endif
}
void test_rvalue_get() {
@@ -142,41 +87,6 @@ void test_rvalue_get() {
const long &&);
assert(std::get<const long>(std::move(v)) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<int &>(std::move(v))), int &);
- assert(&std::get<int &>(std::move(v)) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<const int &>(std::move(v))),
- const int &);
- assert(&std::get<const int &>(std::move(v)) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<int &&>(std::move(v))), int &&);
- int &&xref = std::get<int &&>(std::move(v));
- assert(&xref == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<const int &&>(std::move(v))),
- const int &&);
- const int &&xref = std::get<const int &&>(std::move(v));
- assert(&xref == &x);
- }
-#endif
}
void test_const_rvalue_get() {
@@ -194,41 +104,6 @@ void test_const_rvalue_get() {
const long &&);
assert(std::get<const long>(std::move(v)) == 42);
}
-// FIXME: Remove these once reference support is reinstated
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<int &>(std::move(v))), int &);
- assert(&std::get<int &>(std::move(v)) == &x);
- }
- {
- using V = std::variant<const int &>;
- int x = 42;
- const V v(x);
- ASSERT_SAME_TYPE(decltype(std::get<const int &>(std::move(v))),
- const int &);
- assert(&std::get<const int &>(std::move(v)) == &x);
- }
- {
- using V = std::variant<int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<int &&>(std::move(v))), int &&);
- int &&xref = std::get<int &&>(std::move(v));
- assert(&xref == &x);
- }
- {
- using V = std::variant<const int &&>;
- int x = 42;
- const V v(std::move(x));
- ASSERT_SAME_TYPE(decltype(std::get<const int &&>(std::move(v))),
- const int &&);
- const int &&xref = std::get<const int &&>(std::move(v));
- assert(&xref == &x);
- }
-#endif
}
template <class Tp> struct identity { using type = Tp; };
diff --git a/libcxx/test/std/utilities/variant/variant.helpers/variant_alternative.pass.cpp b/libcxx/test/std/utilities/variant/variant.helpers/variant_alternative.pass.cpp
index be1a0c960d1c..31b9b76213c4 100644
--- a/libcxx/test/std/utilities/variant/variant.helpers/variant_alternative.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.helpers/variant_alternative.pass.cpp
@@ -62,16 +62,6 @@ int main(int, char**) {
test<V, 2, const void *>();
test<V, 3, long double>();
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, int &, const int &, int &&, long double>;
- test<V, 0, int>();
- test<V, 1, int &>();
- test<V, 2, const int &>();
- test<V, 3, int &&>();
- test<V, 4, long double>();
- }
-#endif
return 0;
}
diff --git a/libcxx/test/std/utilities/variant/variant.variant/variant.assign/T.pass.cpp b/libcxx/test/std/utilities/variant/variant.variant/variant.assign/T.pass.cpp
index b38b10d89dfd..4b9eaba2d2ba 100644
--- a/libcxx/test/std/utilities/variant/variant.variant/variant.assign/T.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.variant/variant.assign/T.pass.cpp
@@ -155,16 +155,6 @@ void test_T_assignment_sfinae() {
static_assert(std::is_assignable<V, Y>::value,
"regression on user-defined conversions in operator=");
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, int &&>;
- static_assert(!std::is_assignable<V, int>::value, "ambiguous");
- }
- {
- using V = std::variant<int, const int &>;
- static_assert(!std::is_assignable<V, int>::value, "ambiguous");
- }
-#endif // TEST_VARIANT_HAS_NO_REFERENCES
}
void test_T_assignment_basic() {
@@ -204,25 +194,6 @@ void test_T_assignment_basic() {
assert(v.index() == 1);
assert(std::get<1>(v) == nullptr);
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &, int &&, long>;
- int x = 42;
- V v(43l);
- v = x;
- assert(v.index() == 0);
- assert(&std::get<0>(v) == &x);
- v = std::move(x);
- assert(v.index() == 1);
- assert(&std::get<1>(v) == &x);
- // 'long' is selected by FUN(const int &) since 'const int &' cannot bind
- // to 'int&'.
- const int &cx = x;
- v = cx;
- assert(v.index() == 2);
- assert(std::get<2>(v) == 42);
- }
-#endif // TEST_VARIANT_HAS_NO_REFERENCES
}
void test_T_assignment_performs_construction() {
diff --git a/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/T.pass.cpp b/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/T.pass.cpp
index 6b7de8888849..142da1d820d9 100644
--- a/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/T.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/T.pass.cpp
@@ -36,10 +36,17 @@ struct NoThrowT {
NoThrowT(int) noexcept(true) {}
};
-struct AnyConstructible { template <typename T> AnyConstructible(T&&) {} };
-struct NoConstructible { NoConstructible() = delete; };
+struct AnyConstructible {
+ template <typename T>
+ AnyConstructible(T&&) {}
+};
+struct NoConstructible {
+ NoConstructible() = delete;
+};
template <class T>
-struct RValueConvertibleFrom { RValueConvertibleFrom(T&&) {} };
+struct RValueConvertibleFrom {
+ RValueConvertibleFrom(T&&) {}
+};
void test_T_ctor_noexcept() {
{
@@ -59,12 +66,11 @@ void test_T_ctor_sfinae() {
}
{
using V = std::variant<std::string, std::string>;
- static_assert(!std::is_constructible<V, const char *>::value, "ambiguous");
+ static_assert(!std::is_constructible<V, const char*>::value, "ambiguous");
}
{
- using V = std::variant<std::string, void *>;
- static_assert(!std::is_constructible<V, int>::value,
- "no matching constructor");
+ using V = std::variant<std::string, void*>;
+ static_assert(!std::is_constructible<V, int>::value, "no matching constructor");
}
{
using V = std::variant<std::string, float>;
@@ -72,8 +78,7 @@ void test_T_ctor_sfinae() {
}
{
using V = std::variant<std::unique_ptr<int>, bool>;
- static_assert(!std::is_constructible<V, std::unique_ptr<char>>::value,
- "no explicit bool in constructor");
+ static_assert(!std::is_constructible<V, std::unique_ptr<char>>::value, "no explicit bool in constructor");
struct X {
operator void*();
};
@@ -86,30 +91,13 @@ void test_T_ctor_sfinae() {
operator X();
};
using V = std::variant<X>;
- static_assert(std::is_constructible<V, Y>::value,
- "regression on user-defined conversions in constructor");
+ static_assert(std::is_constructible<V, Y>::value, "regression on user-defined conversions in constructor");
}
{
using V = std::variant<AnyConstructible, NoConstructible>;
- static_assert(
- !std::is_constructible<V, std::in_place_type_t<NoConstructible>>::value,
- "no matching constructor");
- static_assert(!std::is_constructible<V, std::in_place_index_t<1>>::value,
- "no matching constructor");
- }
-
-
-
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, int &&>;
- static_assert(!std::is_constructible<V, int>::value, "ambiguous");
+ static_assert(!std::is_constructible<V, std::in_place_type_t<NoConstructible>>::value, "no matching constructor");
+ static_assert(!std::is_constructible<V, std::in_place_index_t<1>>::value, "no matching constructor");
}
- {
- using V = std::variant<int, const int &>;
- static_assert(!std::is_constructible<V, int>::value, "ambiguous");
- }
-#endif
}
void test_T_ctor_basic() {
@@ -147,33 +135,17 @@ void test_T_ctor_basic() {
std::variant<RValueConvertibleFrom<int>> v1 = 42;
assert(v1.index() == 0);
- int x = 42;
+ int x = 42;
std::variant<RValueConvertibleFrom<int>, AnyConstructible> v2 = x;
assert(v2.index() == 1);
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<const int &, int &&, long>;
- static_assert(std::is_convertible<int &, V>::value, "must be implicit");
- int x = 42;
- V v(x);
- assert(v.index() == 0);
- assert(&std::get<0>(v) == &x);
- }
- {
- using V = std::variant<const int &, int &&, long>;
- static_assert(std::is_convertible<int, V>::value, "must be implicit");
- int x = 42;
- V v(std::move(x));
- assert(v.index() == 1);
- assert(&std::get<1>(v) == &x);
- }
-#endif
}
struct BoomOnAnything {
template <class T>
- constexpr BoomOnAnything(T) { static_assert(!std::is_same<T, T>::value, ""); }
+ constexpr BoomOnAnything(T) {
+ static_assert(!std::is_same<T, T>::value, "");
+ }
};
void test_no_narrowing_check_for_class_types() {
diff --git a/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/default.pass.cpp b/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/default.pass.cpp
index cc1a3fe8ff78..40db038a0033 100644
--- a/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/default.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.variant/variant.ctor/default.pass.cpp
@@ -44,12 +44,6 @@ void test_default_ctor_sfinae() {
using V = std::variant<NonDefaultConstructible, int>;
static_assert(!std::is_default_constructible<V>::value, "");
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int &, int>;
- static_assert(!std::is_default_constructible<V>::value, "");
- }
-#endif
}
void test_default_ctor_noexcept() {
diff --git a/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_index_args.pass.cpp b/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_index_args.pass.cpp
index 96fcd7e7bee4..2fe9033dd816 100644
--- a/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_index_args.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_index_args.pass.cpp
@@ -55,29 +55,6 @@ void test_emplace_sfinae() {
static_assert(emplace_exists<V, 2, int *>(), "");
static_assert(!emplace_exists<V, 3>(), "cannot construct");
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, int &, const int &, int &&, TestTypes::NoCtors>;
- static_assert(emplace_exists<V, 0>(), "");
- static_assert(emplace_exists<V, 0, int>(), "");
- static_assert(emplace_exists<V, 0, long long>(), "");
- static_assert(!emplace_exists<V, 0, int, int>(), "too many args");
- static_assert(emplace_exists<V, 1, int &>(), "");
- static_assert(!emplace_exists<V, 1>(), "cannot default construct ref");
- static_assert(!emplace_exists<V, 1, const int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, 1, int &&>(), "cannot bind ref");
- static_assert(emplace_exists<V, 2, int &>(), "");
- static_assert(emplace_exists<V, 2, const int &>(), "");
- static_assert(emplace_exists<V, 2, int &&>(), "");
- static_assert(!emplace_exists<V, 2, void *>(),
- "not constructible from void*");
- static_assert(emplace_exists<V, 3, int>(), "");
- static_assert(!emplace_exists<V, 3, int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, 3, const int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, 3, const int &&>(), "cannot bind ref");
- static_assert(!emplace_exists<V, 4>(), "no ctors");
- }
-#endif
}
void test_basic() {
@@ -113,41 +90,6 @@ void test_basic() {
assert(std::get<4>(v) == "aaa");
assert(&ref3 == &std::get<4>(v));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, long, const int &, int &&, TestTypes::NoCtors,
- std::string>;
- const int x = 100;
- int y = 42;
- int z = 43;
- V v(std::in_place_index<0>, -1);
- // default emplace a value
- auto& ref1 = v.emplace<1>();
- static_assert(std::is_same_v<long&, decltype(ref1)>, "");
- assert(std::get<1>(v) == 0);
- assert(&ref1 == &std::get<1>(v));
- // emplace a reference
- auto& ref2 = v.emplace<2>(x);
- static_assert(std::is_same_v<&, decltype(ref)>, "");
- assert(&std::get<2>(v) == &x);
- assert(&ref2 == &std::get<2>(v));
- // emplace an rvalue reference
- auto& ref3 = v.emplace<3>(std::move(y));
- static_assert(std::is_same_v<&, decltype(ref)>, "");
- assert(&std::get<3>(v) == &y);
- assert(&ref3 == &std::get<3>(v));
- // re-emplace a new reference over the active member
- auto& ref4 = v.emplace<3>(std::move(z));
- static_assert(std::is_same_v<&, decltype(ref)>, "");
- assert(&std::get<3>(v) == &z);
- assert(&ref4 == &std::get<3>(v));
- // emplace with multiple args
- auto& ref5 = v.emplace<5>(3u, 'a');
- static_assert(std::is_same_v<std::string&, decltype(ref5)>, "");
- assert(std::get<5>(v) == "aaa");
- assert(&ref5 == &std::get<5>(v));
- }
-#endif
}
int main(int, char**) {
diff --git a/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_type_args.pass.cpp b/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_type_args.pass.cpp
index 24305aa0a35d..4e9f67775d10 100644
--- a/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_type_args.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.variant/variant.mod/emplace_type_args.pass.cpp
@@ -54,30 +54,6 @@ void test_emplace_sfinae() {
static_assert(emplace_exists<V, const void *, int *>(), "");
static_assert(!emplace_exists<V, TestTypes::NoCtors>(), "cannot construct");
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- using V = std::variant<int, int &, const int &, int &&, long, long,
- TestTypes::NoCtors>;
- static_assert(emplace_exists<V, int>(), "");
- static_assert(emplace_exists<V, int, int>(), "");
- static_assert(emplace_exists<V, int, long long>(), "");
- static_assert(!emplace_exists<V, int, int, int>(), "too many args");
- static_assert(emplace_exists<V, int &, int &>(), "");
- static_assert(!emplace_exists<V, int &>(), "cannot default construct ref");
- static_assert(!emplace_exists<V, int &, const int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, int &, int &&>(), "cannot bind ref");
- static_assert(emplace_exists<V, const int &, int &>(), "");
- static_assert(emplace_exists<V, const int &, const int &>(), "");
- static_assert(emplace_exists<V, const int &, int &&>(), "");
- static_assert(!emplace_exists<V, const int &, void *>(),
- "not constructible from void*");
- static_assert(emplace_exists<V, int &&, int>(), "");
- static_assert(!emplace_exists<V, int &&, int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, int &&, const int &>(), "cannot bind ref");
- static_assert(!emplace_exists<V, int &&, const int &&>(), "cannot bind ref");
- static_assert(!emplace_exists<V, long, long>(), "ambiguous");
- static_assert(!emplace_exists<V, TestTypes::NoCtors>(),
- "cannot construct void");
-#endif
}
void test_basic() {
@@ -113,41 +89,6 @@ void test_basic() {
assert(std::get<4>(v) == "aaa");
assert(&ref3 == &std::get<4>(v));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- {
- using V = std::variant<int, long, const int &, int &&, TestTypes::NoCtors,
- std::string>;
- const int x = 100;
- int y = 42;
- int z = 43;
- V v(std::in_place_index<0>, -1);
- // default emplace a value
- auto& ref1 = v.emplace<long>();
- static_assert(std::is_same_v<long&, decltype(ref1)>, "");
- assert(std::get<long>(v) == 0);
- assert(&ref1 == &std::get<long>(v));
- // emplace a reference
- auto& ref2 = v.emplace<const int &>(x);
- static_assert(std::is_same_v<const int&, decltype(ref2)>, "");
- assert(&std::get<const int &>(v) == &x);
- assert(&ref2 == &std::get<const int &>(v));
- // emplace an rvalue reference
- auto& ref3 = v.emplace<int &&>(std::move(y));
- static_assert(std::is_same_v<int &&, decltype(ref3)>, "");
- assert(&std::get<int &&>(v) == &y);
- assert(&ref3 == &std::get<int &&>(v));
- // re-emplace a new reference over the active member
- auto& ref4 = v.emplace<int &&>(std::move(z));
- static_assert(std::is_same_v<int &, decltype(ref4)>, "");
- assert(&std::get<int &&>(v) == &z);
- assert(&ref4 == &std::get<int &&>(v));
- // emplace with multiple args
- auto& ref5 = v.emplace<std::string>(3u, 'a');
- static_assert(std::is_same_v<std::string&, decltype(ref5)>, "");
- assert(std::get<std::string>(v) == "aaa");
- assert(&ref5 == &std::get<std::string>(v));
- }
-#endif
}
int main(int, char**) {
diff --git a/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
index 50e7fc81387a..d0c909985bbb 100644
--- a/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
@@ -82,39 +82,6 @@ void test_argument_forwarding() {
std::move(cv).visit(obj);
assert(Fn::check_call<const int&&>(val));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- { // single argument - lvalue reference
- using V = std::variant<int&>;
- int x = 42;
- V v(x);
- const V& cv = v;
-
- v.visit(obj);
- assert(Fn::check_call<int&>(val));
- cv.visit(obj);
- assert(Fn::check_call<int&>(val));
- std::move(v).visit(obj);
- assert(Fn::check_call<int&>(val));
- std::move(cv).visit(obj);
- assert(Fn::check_call<int&>(val));
- assert(false);
- }
- { // single argument - rvalue reference
- using V = std::variant<int&&>;
- int x = 42;
- V v(std::move(x));
- const V& cv = v;
-
- v.visit(obj);
- assert(Fn::check_call<int&>(val));
- cvstd::visit(obj);
- assert(Fn::check_call<int&>(val));
- std::move(v).visit(obj);
- assert(Fn::check_call<int&&>(val));
- std::move(cv).visit(obj);
- assert(Fn::check_call<int&&>(val));
- }
-#endif
}
void test_return_type() {
diff --git a/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
index b005f303bc4b..3312197d8df9 100644
--- a/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
@@ -110,38 +110,6 @@ void test_argument_forwarding() {
std::move(cv).visit<ReturnType>(obj);
assert(Fn::check_call<const int&&>(val));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- { // single argument - lvalue reference
- using V = std::variant<int&>;
- int x = 42;
- V v(x);
- const V& cv = v;
-
- v.visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- cv.visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- std::move(v).visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- std::move(cv).visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- }
- { // single argument - rvalue reference
- using V = std::variant<int&&>;
- int x = 42;
- V v(std::move(x));
- const V& cv = v;
-
- v.visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- cv.visit<ReturnType>(obj);
- assert(Fn::check_call<int&>(val));
- std::move(v).visit<ReturnType>(obj);
- assert(Fn::check_call<int&&>(val));
- std::move(cv).visit<ReturnType>(obj);
- assert(Fn::check_call<int&&>(val));
- }
-#endif
}
template <typename ReturnType>
diff --git a/libcxx/test/std/utilities/variant/variant.visit/visit.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit/visit.pass.cpp
index 798ce7ded72a..0caecbe875d5 100644
--- a/libcxx/test/std/utilities/variant/variant.visit/visit.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit/visit.pass.cpp
@@ -118,36 +118,6 @@ void test_argument_forwarding() {
std::visit(obj, std::move(cv));
assert(Fn::check_call<const int &&>(Val));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- { // single argument - lvalue reference
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- const V &cv = v;
- std::visit(obj, v);
- assert(Fn::check_call<int &>(Val));
- std::visit(obj, cv);
- assert(Fn::check_call<int &>(Val));
- std::visit(obj, std::move(v));
- assert(Fn::check_call<int &>(Val));
- std::visit(obj, std::move(cv));
- assert(Fn::check_call<int &>(Val));
- }
- { // single argument - rvalue reference
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- const V &cv = v;
- std::visit(obj, v);
- assert(Fn::check_call<int &>(Val));
- std::visit(obj, cv);
- assert(Fn::check_call<int &>(Val));
- std::visit(obj, std::move(v));
- assert(Fn::check_call<int &&>(Val));
- std::visit(obj, std::move(cv));
- assert(Fn::check_call<int &&>(Val));
- }
-#endif
{ // multi argument - multi variant
using V = std::variant<int, std::string, long>;
V v1(42), v2("hello"), v3(43l);
diff --git a/libcxx/test/std/utilities/variant/variant.visit/visit_return_type.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit/visit_return_type.pass.cpp
index b1189dff656d..d26c785c0374 100644
--- a/libcxx/test/std/utilities/variant/variant.visit/visit_return_type.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit/visit_return_type.pass.cpp
@@ -120,36 +120,6 @@ void test_argument_forwarding() {
std::visit<ReturnType>(obj, std::move(cv));
assert(Fn::check_call<const int &&>(Val));
}
-#if !defined(TEST_VARIANT_HAS_NO_REFERENCES)
- { // single argument - lvalue reference
- using V = std::variant<int &>;
- int x = 42;
- V v(x);
- const V &cv = v;
- std::visit<ReturnType>(obj, v);
- assert(Fn::check_call<int &>(Val));
- std::visit<ReturnType>(obj, cv);
- assert(Fn::check_call<int &>(Val));
- std::visit<ReturnType>(obj, std::move(v));
- assert(Fn::check_call<int &>(Val));
- std::visit<ReturnType>(obj, std::move(cv));
- assert(Fn::check_call<int &>(Val));
- }
- { // single argument - rvalue reference
- using V = std::variant<int &&>;
- int x = 42;
- V v(std::move(x));
- const V &cv = v;
- std::visit<ReturnType>(obj, v);
- assert(Fn::check_call<int &>(Val));
- std::visit<ReturnType>(obj, cv);
- assert(Fn::check_call<int &>(Val));
- std::visit<ReturnType>(obj, std::move(v));
- assert(Fn::check_call<int &&>(Val));
- std::visit<ReturnType>(obj, std::move(cv));
- assert(Fn::check_call<int &&>(Val));
- }
-#endif
{ // multi argument - multi variant
using V = std::variant<int, std::string, long>;
V v1(42), v2("hello"), v3(43l);
diff --git a/libcxx/test/support/deduction_guides_sfinae_checks.h b/libcxx/test/support/deduction_guides_sfinae_checks.h
index 8b715da5a34e..0c32b3732413 100644
--- a/libcxx/test/support/deduction_guides_sfinae_checks.h
+++ b/libcxx/test/support/deduction_guides_sfinae_checks.h
@@ -16,6 +16,7 @@
#include <memory>
#include <type_traits>
#include <utility>
+#include <vector>
#include "test_macros.h"
#if TEST_STD_VER >= 23
diff --git a/libcxx/test/support/variant_test_helpers.h b/libcxx/test/support/variant_test_helpers.h
index 345e32170e58..d1bc36dea671 100644
--- a/libcxx/test/support/variant_test_helpers.h
+++ b/libcxx/test/support/variant_test_helpers.h
@@ -21,9 +21,6 @@
#error This file requires C++17
#endif
-// FIXME: Currently the variant<T&> tests are disabled using this macro.
-#define TEST_VARIANT_HAS_NO_REFERENCES
-
#ifndef TEST_HAS_NO_EXCEPTIONS
struct CopyThrows {
CopyThrows() = default;
diff --git a/libcxx/utils/ci/Dockerfile b/libcxx/utils/ci/Dockerfile
index 178cba415933..db88da20b977 100644
--- a/libcxx/utils/ci/Dockerfile
+++ b/libcxx/utils/ci/Dockerfile
@@ -7,15 +7,13 @@
#===----------------------------------------------------------------------===##
#
# This file defines the buildkite and github actions builder images.
-# You can build & push both images using:
+# You can build both images using:
#
# docker compose build
-# docker compose push
#
-# Or you can select a single image to build & push using:
+# Or you can select a single image to build
#
# docker compose build buildkite-builder
-# docker compose push buildkite-builder
#
# The final images can be found at
#
@@ -23,7 +21,8 @@
# ghcr.io/libcxx/actions-builder
# ghcr.io/libcxx/android-buildkite-builder
#
-# Members of the github.com/libcxx/ organizations have permissions required to push new images.
+# Members of the github.com/libcxx/ organizations can push new images to the CI.
+# This is done by GitHub actions in the https://github.com/libcxx/builders repo.
#
# ===----------------------------------------------------------------------===##
# Running the buildkite image
diff --git a/libcxx/utils/ci/buildkite-pipeline.yml b/libcxx/utils/ci/buildkite-pipeline.yml
index e42262620d5f..c43e41441872 100644
--- a/libcxx/utils/ci/buildkite-pipeline.yml
+++ b/libcxx/utils/ci/buildkite-pipeline.yml
@@ -207,10 +207,10 @@ steps:
- group: ':freebsd: FreeBSD'
steps:
- label: FreeBSD 13 amd64
- command: libcxx/utils/ci/run-buildbot generic-cxx23
+ command: libcxx/utils/ci/run-buildbot generic-cxx26
env:
- CC: clang16
- CXX: clang++16
+ CC: clang17
+ CXX: clang++17
agents:
queue: libcxx-builders
os: freebsd
diff --git a/libcxx/utils/ci/oss-fuzz.sh b/libcxx/utils/ci/oss-fuzz.sh
index d5e7fcea6012..e5723406a9ff 100755
--- a/libcxx/utils/ci/oss-fuzz.sh
+++ b/libcxx/utils/ci/oss-fuzz.sh
@@ -14,7 +14,7 @@ MONOREPO_ROOT=${PWD}
mkdir ${BUILD}
cmake -S ${MONOREPO_ROOT}/runtimes -B ${BUILD} \
- -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi" \
+ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_INSTALL_PREFIX="${INSTALL}"
cmake --build ${BUILD} --target install-cxx-headers
diff --git a/libcxx/utils/generate_escaped_output_table.py b/libcxx/utils/generate_escaped_output_table.py
index 5816435c96ca..c6bde8f2411c 100755
--- a/libcxx/utils/generate_escaped_output_table.py
+++ b/libcxx/utils/generate_escaped_output_table.py
@@ -124,7 +124,7 @@ DATA_ARRAY_TEMPLATE = """
/// - bits [0, 10] The size of the range, allowing 2048 elements.
/// - bits [11, 31] The lower bound code point of the range. The upper bound of
/// the range is lower bound + size.
-inline constexpr uint32_t __entries[{size}] = {{
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[{size}] = {{
{entries}}};
/// At the end of the valid Unicode code points space a lot of code points are
diff --git a/libcxx/utils/generate_extended_grapheme_cluster_table.py b/libcxx/utils/generate_extended_grapheme_cluster_table.py
index b0074b58cd20..6a598399ce47 100755
--- a/libcxx/utils/generate_extended_grapheme_cluster_table.py
+++ b/libcxx/utils/generate_extended_grapheme_cluster_table.py
@@ -113,7 +113,7 @@ DATA_ARRAY_TEMPLATE = """
/// following benchmark.
/// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp
// clang-format off
-inline constexpr uint32_t __entries[{size}] = {{
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[{size}] = {{
{entries}}};
// clang-format on
diff --git a/libcxx/utils/generate_width_estimation_table.py b/libcxx/utils/generate_width_estimation_table.py
index 76b70f121eab..918dae25fe49 100644
--- a/libcxx/utils/generate_width_estimation_table.py
+++ b/libcxx/utils/generate_width_estimation_table.py
@@ -143,7 +143,7 @@ DATA_ARRAY_TEMPLATE = """
/// - bits [0, 13] The size of the range, allowing 16384 elements.
/// - bits [14, 31] The lower bound code point of the range. The upper bound of
/// the range is lower bound + size.
-inline constexpr uint32_t __entries[{size}] = {{
+_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[{size}] = {{
{entries}}};
/// The upper bound entry of EastAsianWidth.txt.
diff --git a/libcxx/utils/libcxx/test/features.py b/libcxx/utils/libcxx/test/features.py
index 307d35349f3a..0793c34fd7f0 100644
--- a/libcxx/utils/libcxx/test/features.py
+++ b/libcxx/utils/libcxx/test/features.py
@@ -280,6 +280,7 @@ DEFAULT_FEATURES = [
# This is not allowed per C11 7.1.2 Standard headers/6
# Any declaration of a library function shall have external linkage.
when=lambda cfg: "__ANDROID__" in compilerMacros(cfg)
+ or "__FreeBSD__" in compilerMacros(cfg)
or "_WIN32" in compilerMacros(cfg)
or platform.system().lower().startswith("aix")
# Avoid building on platforms that don't support modules properly.
diff --git a/libcxx/utils/libcxx/test/params.py b/libcxx/utils/libcxx/test/params.py
index 695e01115aa4..5e42562ed5db 100644
--- a/libcxx/utils/libcxx/test/params.py
+++ b/libcxx/utils/libcxx/test/params.py
@@ -407,6 +407,6 @@ DEFAULT_PARAMETERS = [
AddFeature('has-clang-tidy'),
AddSubstitution('%{clang-tidy}', exe),
]
- ),
+ ),
]
# fmt: on
diff --git a/libcxxabi/test/forced_unwind4.pass.cpp b/libcxxabi/test/forced_unwind4.pass.cpp
index 2864426ca16d..15efca8d9316 100644
--- a/libcxxabi/test/forced_unwind4.pass.cpp
+++ b/libcxxabi/test/forced_unwind4.pass.cpp
@@ -7,10 +7,10 @@
//
//===----------------------------------------------------------------------===//
-// REQUIRES: linux && target={{aarch64-.+}}
+// REQUIRES: linux && target=aarch64-{{.+}}-gnu
// pthread_cancel in case of glibc calls _Unwind_ForcedUnwind from a signal on
-// the child_thread. This test ensures sigretrun is handled correctly (see:
+// the child_thread. This test ensures sigreturn is handled correctly (see:
// UnwindCursor<A, R>::setInfoForSigReturn).
#include <cstdlib> // defines __BIONIC__
diff --git a/lld/COFF/Config.h b/lld/COFF/Config.h
index 8f85929f1bea..917f88fc2828 100644
--- a/lld/COFF/Config.h
+++ b/lld/COFF/Config.h
@@ -54,6 +54,7 @@ enum class EmitKind { Obj, LLVM, ASM };
struct Export {
StringRef name; // N in /export:N or /export:E=N
StringRef extName; // E in /export:E=N
+ StringRef exportAs; // E in /export:N,EXPORTAS,E
StringRef aliasTarget; // GNU specific: N in "alias == N"
Symbol *sym = nullptr;
uint16_t ordinal = 0;
@@ -73,10 +74,9 @@ struct Export {
StringRef exportName; // Name in DLL
bool operator==(const Export &e) const {
- return (name == e.name && extName == e.extName &&
- aliasTarget == e.aliasTarget &&
- ordinal == e.ordinal && noname == e.noname &&
- data == e.data && isPrivate == e.isPrivate);
+ return (name == e.name && extName == e.extName && exportAs == e.exportAs &&
+ aliasTarget == e.aliasTarget && ordinal == e.ordinal &&
+ noname == e.noname && data == e.data && isPrivate == e.isPrivate);
}
};
diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
index 1b075389325a..2b1d4abb6ed0 100644
--- a/lld/COFF/Driver.cpp
+++ b/lld/COFF/Driver.cpp
@@ -945,6 +945,7 @@ void LinkerDriver::createImportLibrary(bool asLib) {
e2.Name = std::string(e1.name);
e2.SymbolName = std::string(e1.symbolName);
e2.ExtName = std::string(e1.extName);
+ e2.ExportAs = std::string(e1.exportAs);
e2.AliasTarget = std::string(e1.aliasTarget);
e2.Ordinal = e1.ordinal;
e2.Noname = e1.noname;
@@ -1032,19 +1033,19 @@ void LinkerDriver::parseModuleDefs(StringRef path) {
for (COFFShortExport e1 : m.Exports) {
Export e2;
- // In simple cases, only Name is set. Renamed exports are parsed
- // and set as "ExtName = Name". If Name has the form "OtherDll.Func",
- // it shouldn't be a normal exported function but a forward to another
- // DLL instead. This is supported by both MS and GNU linkers.
+ // Renamed exports are parsed and set as "ExtName = Name". If Name has
+ // the form "OtherDll.Func", it shouldn't be a normal exported
+ // function but a forward to another DLL instead. This is supported
+ // by both MS and GNU linkers.
if (!e1.ExtName.empty() && e1.ExtName != e1.Name &&
StringRef(e1.Name).contains('.')) {
e2.name = saver().save(e1.ExtName);
e2.forwardTo = saver().save(e1.Name);
- ctx.config.exports.push_back(e2);
- continue;
+ } else {
+ e2.name = saver().save(e1.Name);
+ e2.extName = saver().save(e1.ExtName);
}
- e2.name = saver().save(e1.Name);
- e2.extName = saver().save(e1.ExtName);
+ e2.exportAs = saver().save(e1.ExportAs);
e2.aliasTarget = saver().save(e1.AliasTarget);
e2.ordinal = e1.Ordinal;
e2.noname = e1.Noname;
diff --git a/lld/COFF/DriverUtils.cpp b/lld/COFF/DriverUtils.cpp
index fc8eb327be49..b4ff31a606da 100644
--- a/lld/COFF/DriverUtils.cpp
+++ b/lld/COFF/DriverUtils.cpp
@@ -577,16 +577,16 @@ Export LinkerDriver::parseExport(StringRef arg) {
if (y.contains(".")) {
e.name = x;
e.forwardTo = y;
- return e;
+ } else {
+ e.extName = x;
+ e.name = y;
+ if (e.name.empty())
+ goto err;
}
-
- e.extName = x;
- e.name = y;
- if (e.name.empty())
- goto err;
}
- // If "<name>=<internalname>[,@ordinal[,NONAME]][,DATA][,PRIVATE]"
+ // Optional parameters
+ // "[,@ordinal[,NONAME]][,DATA][,PRIVATE][,EXPORTAS,exportname]"
while (!rest.empty()) {
StringRef tok;
std::tie(tok, rest) = rest.split(",");
@@ -608,6 +608,13 @@ Export LinkerDriver::parseExport(StringRef arg) {
e.isPrivate = true;
continue;
}
+ if (tok.equals_insensitive("exportas")) {
+ if (!rest.empty() && !rest.contains(','))
+ e.exportAs = rest;
+ else
+ error("invalid EXPORTAS value: " + rest);
+ break;
+ }
if (tok.starts_with("@")) {
int32_t ord;
if (tok.substr(1).getAsInteger(0, ord))
@@ -684,7 +691,9 @@ void LinkerDriver::fixupExports() {
}
for (Export &e : ctx.config.exports) {
- if (!e.forwardTo.empty()) {
+ if (!e.exportAs.empty()) {
+ e.exportName = e.exportAs;
+ } else if (!e.forwardTo.empty()) {
e.exportName = undecorate(ctx, e.name);
} else {
e.exportName = undecorate(ctx, e.extName.empty() ? e.name : e.extName);
diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index b02ad10649d9..e36e9d59a740 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -380,7 +380,7 @@ bool MIPS<ELFT>::needsThunk(RelExpr expr, RelType type, const InputFile *file,
template <class ELFT>
int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *buf, RelType type) const {
- const endianness e = ELFT::TargetEndianness;
+ const endianness e = ELFT::Endianness;
switch (type) {
case R_MIPS_32:
case R_MIPS_REL32:
@@ -521,7 +521,7 @@ static uint64_t fixupCrossModeJump(uint8_t *loc, RelType type, uint64_t val) {
// to a microMIPS target and vice versa. In that cases jump
// instructions need to be replaced by their "cross-mode"
// equivalents.
- const endianness e = ELFT::TargetEndianness;
+ const endianness e = ELFT::Endianness;
bool isMicroTgt = val & 0x1;
bool isCrossJump = (isMicroTgt && isBranchReloc(type)) ||
(!isMicroTgt && isMicroBranchReloc(type));
@@ -567,7 +567,7 @@ static uint64_t fixupCrossModeJump(uint8_t *loc, RelType type, uint64_t val) {
template <class ELFT>
void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
uint64_t val) const {
- const endianness e = ELFT::TargetEndianness;
+ const endianness e = ELFT::Endianness;
RelType type = rel.type;
if (ELFT::Is64Bits || config->mipsN32Abi)
diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index de459013595f..a85bf3aa0c09 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -328,9 +328,10 @@ bool X86_64::relaxOnce(int pass) const {
if (rel.expr != R_RELAX_GOT_PC)
continue;
- uint64_t v = sec->getRelocTargetVA(
- sec->file, rel.type, rel.addend,
- sec->getOutputSection()->addr + rel.offset, *rel.sym, rel.expr);
+ uint64_t v = sec->getRelocTargetVA(sec->file, rel.type, rel.addend,
+ sec->getOutputSection()->addr +
+ sec->outSecOff + rel.offset,
+ *rel.sym, rel.expr);
if (isInt<32>(v))
continue;
if (rel.sym->auxIdx == 0) {
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index bb3608da80b2..27274d69b7f1 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -138,7 +138,7 @@ private:
std::unique_ptr<BitcodeCompiler> lto;
std::vector<InputFile *> files;
- std::optional<InputFile *> armCmseImpLib;
+ InputFile *armCmseImpLib = nullptr;
public:
SmallVector<std::pair<StringRef, unsigned>, 0> archiveFiles;
diff --git a/lld/ELF/DWARF.h b/lld/ELF/DWARF.h
index 1b9a3e3f7794..d56895277bcc 100644
--- a/lld/ELF/DWARF.h
+++ b/lld/ELF/DWARF.h
@@ -74,7 +74,7 @@ public:
StringRef getLineStrSection() const override { return lineStrSection; }
bool isLittleEndian() const override {
- return ELFT::TargetEndianness == llvm::endianness::little;
+ return ELFT::Endianness == llvm::endianness::little;
}
std::optional<llvm::RelocAddrEntry> find(const llvm::DWARFSection &sec,
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 7257ebd0fac9..b43da7727e22 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -2114,8 +2114,11 @@ static void handleUndefinedGlob(StringRef arg) {
static void handleLibcall(StringRef name) {
Symbol *sym = symtab.find(name);
- if (sym && sym->isLazy() && isa<BitcodeFile>(sym->file))
+ if (sym && sym->isLazy() && isa<BitcodeFile>(sym->file)) {
+ if (!config->whyExtract.empty())
+ ctx.whyExtractRecords.emplace_back("<libcall>", sym->file, *sym);
sym->extract();
+ }
}
static void writeArchiveStats() {
@@ -2393,12 +2396,6 @@ static void readSymbolPartitionSection(InputSectionBase *s) {
sym->partition = newPart.getNumber();
}
-static Symbol *addUnusedUndefined(StringRef name,
- uint8_t binding = STB_GLOBAL) {
- return symtab.addSymbol(
- Undefined{ctx.internalFile, name, binding, STV_DEFAULT, 0});
-}
-
static void markBuffersAsDontNeed(bool skipLinkedOutput) {
// With --thinlto-index-only, all buffers are nearly unused from now on
// (except symbol/section names used by infrequent passes). Mark input file
@@ -2485,15 +2482,15 @@ static std::vector<WrappedSymbol> addWrappedSymbols(opt::InputArgList &args) {
continue;
Symbol *wrap =
- addUnusedUndefined(saver().save("__wrap_" + name), sym->binding);
+ symtab.addUnusedUndefined(saver().save("__wrap_" + name), sym->binding);
// If __real_ is referenced, pull in the symbol if it is lazy. Do this after
// processing __wrap_ as that may have referenced __real_.
StringRef realName = saver().save("__real_" + name);
if (symtab.find(realName))
- addUnusedUndefined(name, sym->binding);
+ symtab.addUnusedUndefined(name, sym->binding);
- Symbol *real = addUnusedUndefined(realName);
+ Symbol *real = symtab.addUnusedUndefined(realName);
v.push_back({sym, real, wrap});
// We want to tell LTO not to inline symbols to be overwritten
@@ -2723,38 +2720,14 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// Handle -u/--undefined before input files. If both a.a and b.so define foo,
// -u foo a.a b.so will extract a.a.
for (StringRef name : config->undefined)
- addUnusedUndefined(name)->referenced = true;
+ symtab.addUnusedUndefined(name)->referenced = true;
- // Add all files to the symbol table. This will add almost all
- // symbols that we need to the symbol table. This process might
- // add files to the link, via autolinking, these files are always
- // appended to the Files vector.
- {
- llvm::TimeTraceScope timeScope("Parse input files");
- for (size_t i = 0; i < files.size(); ++i) {
- llvm::TimeTraceScope timeScope("Parse input files", files[i]->getName());
- doParseFile<ELFT>(files[i]);
- }
- if (armCmseImpLib)
- parseArmCMSEImportLib(*armCmseImpLib);
- }
-
- // Now that we have every file, we can decide if we will need a
- // dynamic symbol table.
- // We need one if we were asked to export dynamic symbols or if we are
- // producing a shared library.
- // We also need one if any shared libraries are used and for pie executables
- // (probably because the dynamic linker needs it).
- config->hasDynSymTab =
- !ctx.sharedFiles.empty() || config->isPic || config->exportDynamic;
-
- // Some symbols (such as __ehdr_start) are defined lazily only when there
- // are undefined symbols for them, so we add these to trigger that logic.
- for (StringRef name : script->referencedSymbols) {
- Symbol *sym = addUnusedUndefined(name);
- sym->isUsedInRegularObj = true;
- sym->referenced = true;
- }
+ parseFiles(files, armCmseImpLib);
+
+ // Create dynamic sections for dynamic linking and static PIE.
+ config->hasDynSymTab = !ctx.sharedFiles.empty() || config->isPic;
+
+ script->addScriptReferencedSymbolsToSymTable();
// Prevent LTO from removing any definition referenced by -u.
for (StringRef name : config->undefined)
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 4c614c865d24..6529ea072fae 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -288,7 +288,7 @@ static bool isCompatible(InputFile *file) {
return false;
}
-template <class ELFT> void elf::doParseFile(InputFile *file) {
+template <class ELFT> static void doParseFile(InputFile *file) {
if (!isCompatible(file))
return;
@@ -330,12 +330,24 @@ extern template void ObjFile<ELF32BE>::importCmseSymbols();
extern template void ObjFile<ELF64LE>::importCmseSymbols();
extern template void ObjFile<ELF64BE>::importCmseSymbols();
-template <class ELFT> static void doParseArmCMSEImportLib(InputFile *file) {
- cast<ObjFile<ELFT>>(file)->importCmseSymbols();
+template <class ELFT>
+static void doParseFiles(const std::vector<InputFile *> &files,
+ InputFile *armCmseImpLib) {
+ // Add all files to the symbol table. This will add almost all symbols that we
+ // need to the symbol table. This process might add files to the link due to
+ // addDependentLibrary.
+ for (size_t i = 0; i < files.size(); ++i) {
+ llvm::TimeTraceScope timeScope("Parse input files", files[i]->getName());
+ doParseFile<ELFT>(files[i]);
+ }
+ if (armCmseImpLib)
+ cast<ObjFile<ELFT>>(*armCmseImpLib).importCmseSymbols();
}
-void elf::parseArmCMSEImportLib(InputFile *file) {
- invokeELFT(doParseArmCMSEImportLib, file);
+void elf::parseFiles(const std::vector<InputFile *> &files,
+ InputFile *armCmseImpLib) {
+ llvm::TimeTraceScope timeScope("Parse input files");
+ invokeELFT(doParseFiles, files, armCmseImpLib);
}
// Concatenates arguments to construct a string representing an error location.
@@ -959,8 +971,8 @@ template <class ELFT> static uint32_t readAndFeatures(const InputSection &sec) {
const uint8_t *place = desc.data();
if (desc.size() < 8)
reportFatal(place, "program property is too short");
- uint32_t type = read32<ELFT::TargetEndianness>(desc.data());
- uint32_t size = read32<ELFT::TargetEndianness>(desc.data() + 4);
+ uint32_t type = read32<ELFT::Endianness>(desc.data());
+ uint32_t size = read32<ELFT::Endianness>(desc.data() + 4);
desc = desc.slice(8);
if (desc.size() < size)
reportFatal(place, "program property is too short");
@@ -971,7 +983,7 @@ template <class ELFT> static uint32_t readAndFeatures(const InputSection &sec) {
// accumulate the bits set.
if (size < 4)
reportFatal(place, "FEATURE_1_AND entry is too short");
- featuresSet |= read32<ELFT::TargetEndianness>(desc.data());
+ featuresSet |= read32<ELFT::Endianness>(desc.data());
}
// Padding is present in the note descriptor, if necessary.
@@ -1545,7 +1557,7 @@ template <class ELFT> void SharedFile::parse() {
Symbol *s = symtab.addSymbol(
Undefined{this, name, sym.getBinding(), sym.st_other, sym.getType()});
s->exportDynamic = true;
- if (s->isUndefined() && sym.getBinding() != STB_WEAK &&
+ if (sym.getBinding() != STB_WEAK &&
config->unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore)
requiredSymbols.push_back(s);
continue;
diff --git a/lld/ELF/InputFiles.h b/lld/ELF/InputFiles.h
index 3beb5a3cb9a8..95197599a2e1 100644
--- a/lld/ELF/InputFiles.h
+++ b/lld/ELF/InputFiles.h
@@ -46,10 +46,9 @@ extern std::unique_ptr<llvm::TarWriter> tar;
std::optional<MemoryBufferRef> readFile(StringRef path);
// Add symbols in File to the symbol table.
-template <class ELFT> void doParseFile(InputFile *file);
void parseFile(InputFile *file);
-
-void parseArmCMSEImportLib(InputFile *file);
+void parseFiles(const std::vector<InputFile *> &files,
+ InputFile *armCmseImpLib);
// The root class of input files.
class InputFile {
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index c34bf08757b1..4f88313b868b 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -1258,10 +1258,10 @@ void EhInputSection::split(ArrayRef<RelTy> rels) {
msg = "CIE/FDE too small";
break;
}
- uint64_t size = endian::read32<ELFT::TargetEndianness>(d.data());
+ uint64_t size = endian::read32<ELFT::Endianness>(d.data());
if (size == 0) // ZERO terminator
break;
- uint32_t id = endian::read32<ELFT::TargetEndianness>(d.data() + 4);
+ uint32_t id = endian::read32<ELFT::Endianness>(d.data() + 4);
size += 4;
if (LLVM_UNLIKELY(size > d.size())) {
// If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index 3af09a32b651..f815b3ac6fee 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -198,15 +198,7 @@ static bool shouldDefineSym(SymbolAssignment *cmd) {
if (cmd->name == ".")
return false;
- if (!cmd->provide)
- return true;
-
- // If a symbol was in PROVIDE(), we need to define it only
- // when it is a referenced undefined symbol.
- Symbol *b = symtab.find(cmd->name);
- if (b && !b->isDefined() && !b->isCommon())
- return true;
- return false;
+ return !cmd->provide || LinkerScript::shouldAddProvideSym(cmd->name);
}
// Called by processSymbolAssignments() to assign definitions to
@@ -1517,3 +1509,41 @@ void LinkerScript::checkFinalScriptConditions() const {
checkMemoryRegion(lmaRegion, sec, sec->getLMA());
}
}
+
+void LinkerScript::addScriptReferencedSymbolsToSymTable() {
+ // Some symbols (such as __ehdr_start) are defined lazily only when there
+ // are undefined symbols for them, so we add these to trigger that logic.
+ auto reference = [](StringRef name) {
+ Symbol *sym = symtab.addUnusedUndefined(name);
+ sym->isUsedInRegularObj = true;
+ sym->referenced = true;
+ };
+ for (StringRef name : referencedSymbols)
+ reference(name);
+
+ // Keeps track of references from which PROVIDE symbols have been added to the
+ // symbol table.
+ DenseSet<StringRef> added;
+ SmallVector<const SmallVector<StringRef, 0> *, 0> symRefsVec;
+ for (const auto &[name, symRefs] : provideMap)
+ if (LinkerScript::shouldAddProvideSym(name) && added.insert(name).second)
+ symRefsVec.push_back(&symRefs);
+ while (symRefsVec.size()) {
+ for (StringRef name : *symRefsVec.pop_back_val()) {
+ reference(name);
+ // Prevent the symbol from being discarded by --gc-sections.
+ script->referencedSymbols.push_back(name);
+ auto it = script->provideMap.find(name);
+ if (it != script->provideMap.end() &&
+ LinkerScript::shouldAddProvideSym(name) &&
+ added.insert(name).second) {
+ symRefsVec.push_back(&it->second);
+ }
+ }
+ }
+}
+
+bool LinkerScript::shouldAddProvideSym(StringRef symName) {
+ Symbol *sym = symtab.find(symName);
+ return sym && !sym->isDefined() && !sym->isCommon();
+}
diff --git a/lld/ELF/LinkerScript.h b/lld/ELF/LinkerScript.h
index 18eaf58b785e..fa7c6eb9c0d8 100644
--- a/lld/ELF/LinkerScript.h
+++ b/lld/ELF/LinkerScript.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include <cstddef>
@@ -348,6 +349,18 @@ public:
// Check backward location counter assignment and memory region/LMA overflows.
void checkFinalScriptConditions() const;
+ // Add symbols that are referenced in the linker script to the symbol table.
+ // Symbols referenced in a PROVIDE command are only added to the symbol table
+ // if the PROVIDE command actually provides the symbol.
+ // It also adds the symbols referenced by the used PROVIDE symbols to the
+ // linker script referenced symbols list.
+ void addScriptReferencedSymbolsToSymTable();
+
+ // Returns true if the PROVIDE symbol should be added to the link.
+ // A PROVIDE symbol is added to the link only if it satisfies an
+ // undefined reference.
+ static bool shouldAddProvideSym(StringRef symName);
+
// SECTIONS command list.
SmallVector<SectionCommand *, 0> sectionCommands;
@@ -379,6 +392,14 @@ public:
// Sections that will be warned/errored by --orphan-handling.
SmallVector<const InputSectionBase *, 0> orphanSections;
+
+ // Stores the mapping: PROVIDE symbol -> symbols referred in the PROVIDE
+ // expression. For example, if the PROVIDE command is:
+ //
+ // PROVIDE(v = a + b + c);
+ //
+ // then provideMap should contain the mapping: 'v' -> ['a', 'b', 'c']
+ llvm::MapVector<StringRef, SmallVector<StringRef, 0>> provideMap;
};
LLVM_LIBRARY_VISIBILITY extern std::unique_ptr<LinkerScript> script;
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 619fbaf5dc54..92f2e200db11 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -1480,7 +1480,10 @@ template <class ELFT, class RelTy> void RelocationScanner::scanOne(RelTy *&i) {
// Process TLS relocations, including TLS optimizations. Note that
// R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
- if (sym.isTls()) {
+ //
+ // Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
+ // but we need to process them in handleTlsRelocation.
+ if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) {
if (unsigned processed =
handleTlsRelocation(type, sym, *sec, offset, addend, expr)) {
i += processed - 1;
@@ -1617,12 +1620,8 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
// relatively straightforward. We create a PLT entry in Iplt, which is
// usually at the end of .plt, which makes an indirect call using a
// matching GOT entry in igotPlt, which is usually at the end of .got.plt.
- // The GOT entry is relocated using an IRELATIVE relocation in relaIplt,
- // which is usually at the end of .rela.plt. Unlike most relocations in
- // .rela.plt, which may be evaluated lazily without -z now, dynamic
- // loaders evaluate IRELATIVE relocs eagerly, which means that for
- // IRELATIVE relocs only, GOT-generating relocations can point directly to
- // .got.plt without requiring a separate GOT entry.
+ // The GOT entry is relocated using an IRELATIVE relocation in relaDyn,
+ // which is usually at the end of .rela.dyn.
//
// - Despite the fact that an ifunc does not have a fixed value, compilers
// that are not passed -fPIC will assume that they do, and will emit
@@ -1660,10 +1659,17 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
// original section/value pairs. For non-GOT non-PLT relocation case below, we
// may alter section/value, so create a copy of the symbol to make
// section/value fixed.
+ //
+ // Prior to Android V, there was a bug that caused RELR relocations to be
+ // applied after packed relocations. This meant that resolvers referenced by
+ // IRELATIVE relocations in the packed relocation section would read
+ // unrelocated globals with RELR relocations when
+ // --pack-relative-relocs=android+relr is enabled. Work around this by placing
+ // IRELATIVE in .rela.plt.
auto *directSym = makeDefined(cast<Defined>(sym));
directSym->allocateAux();
- addPltEntry(*in.iplt, *in.igotPlt, *in.relaIplt, target->iRelativeRel,
- *directSym);
+ auto &dyn = config->androidPackDynRelocs ? *in.relaPlt : *mainPart->relaDyn;
+ addPltEntry(*in.iplt, *in.igotPlt, dyn, target->iRelativeRel, *directSym);
sym.allocateAux();
symAux.back().pltIdx = symAux[directSym->auxIdx].pltIdx;
diff --git a/lld/ELF/ScriptParser.cpp b/lld/ELF/ScriptParser.cpp
index 3bb1de99480f..f90ce6fa7407 100644
--- a/lld/ELF/ScriptParser.cpp
+++ b/lld/ELF/ScriptParser.cpp
@@ -36,6 +36,7 @@
#include "llvm/Support/TimeProfiler.h"
#include <cassert>
#include <limits>
+#include <optional>
#include <vector>
using namespace llvm;
@@ -138,6 +139,10 @@ private:
// A set to detect an INCLUDE() cycle.
StringSet<> seen;
+
+ // If we are currently parsing a PROVIDE|PROVIDE_HIDDEN command,
+ // then this member is set to the PROVIDE symbol name.
+ std::optional<llvm::StringRef> activeProvideSym;
};
} // namespace
@@ -1055,6 +1060,9 @@ SymbolAssignment *ScriptParser::readProvideHidden(bool provide, bool hidden) {
;
return nullptr;
}
+ llvm::SaveAndRestore saveActiveProvideSym(activeProvideSym);
+ if (provide)
+ activeProvideSym = name;
SymbolAssignment *cmd = readSymbolAssignment(name);
cmd->provide = provide;
cmd->hidden = hidden;
@@ -1570,7 +1578,10 @@ Expr ScriptParser::readPrimary() {
tok = unquote(tok);
else if (!isValidSymbolName(tok))
setError("malformed number: " + tok);
- script->referencedSymbols.push_back(tok);
+ if (activeProvideSym)
+ script->provideMap[*activeProvideSym].push_back(tok);
+ else
+ script->referencedSymbols.push_back(tok);
return [=] { return script->getSymbolValue(tok, location); };
}
diff --git a/lld/ELF/SymbolTable.cpp b/lld/ELF/SymbolTable.cpp
index b3d97e4de779..258a78ab40bb 100644
--- a/lld/ELF/SymbolTable.cpp
+++ b/lld/ELF/SymbolTable.cpp
@@ -333,3 +333,7 @@ void SymbolTable::scanVersionScript() {
// --dynamic-list.
handleDynamicList();
}
+
+Symbol *SymbolTable::addUnusedUndefined(StringRef name, uint8_t binding) {
+ return addSymbol(Undefined{ctx.internalFile, name, binding, STV_DEFAULT, 0});
+}
diff --git a/lld/ELF/SymbolTable.h b/lld/ELF/SymbolTable.h
index 37e31d237296..269f7f284bc7 100644
--- a/lld/ELF/SymbolTable.h
+++ b/lld/ELF/SymbolTable.h
@@ -57,6 +57,9 @@ public:
void handleDynamicList();
+ Symbol *addUnusedUndefined(StringRef name,
+ uint8_t binding = llvm::ELF::STB_GLOBAL);
+
// Set of .so files to not link the same shared object file more than once.
llvm::DenseMap<llvm::CachedHashStringRef, SharedFile *> soNames;
diff --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp
index cd2b9e22ab32..93653def328f 100644
--- a/lld/ELF/Symbols.cpp
+++ b/lld/ELF/Symbols.cpp
@@ -539,8 +539,8 @@ void elf::reportDuplicate(const Symbol &sym, const InputFile *newFile,
if (!d->section && !errSec && errOffset && d->value == errOffset)
return;
if (!d->section || !errSec) {
- error("duplicate symbol: " + toString(sym) + "\n>>> defined in " +
- toString(sym.file) + "\n>>> defined in " + toString(newFile));
+ errorOrWarn("duplicate symbol: " + toString(sym) + "\n>>> defined in " +
+ toString(sym.file) + "\n>>> defined in " + toString(newFile));
return;
}
@@ -564,7 +564,7 @@ void elf::reportDuplicate(const Symbol &sym, const InputFile *newFile,
if (!src2.empty())
msg += src2 + "\n>>> ";
msg += obj2;
- error(msg);
+ errorOrWarn(msg);
}
void Symbol::checkDuplicate(const Defined &other) const {
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 248ff6b4a865..8708bfeef8fa 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -415,7 +415,7 @@ void EhFrameSection::addRecords(EhInputSection *sec, ArrayRef<RelTy> rels) {
for (EhSectionPiece &cie : sec->cies)
offsetToCie[cie.inputOff] = addCie<ELFT>(cie, rels);
for (EhSectionPiece &fde : sec->fdes) {
- uint32_t id = endian::read32<ELFT::TargetEndianness>(fde.data().data() + 4);
+ uint32_t id = endian::read32<ELFT::Endianness>(fde.data().data() + 4);
CieRecord *rec = offsetToCie[fde.inputOff + 4 - id];
if (!rec)
fatal(toString(sec) + ": invalid CIE reference");
@@ -448,7 +448,7 @@ void EhFrameSection::iterateFDEWithLSDAAux(
if (hasLSDA(cie))
ciesWithLSDA.insert(cie.inputOff);
for (EhSectionPiece &fde : sec.fdes) {
- uint32_t id = endian::read32<ELFT::TargetEndianness>(fde.data().data() + 4);
+ uint32_t id = endian::read32<ELFT::Endianness>(fde.data().data() + 4);
if (!ciesWithLSDA.contains(fde.inputOff + 4 - id))
continue;
@@ -628,19 +628,19 @@ GotSection::GotSection()
}
void GotSection::addConstant(const Relocation &r) { relocations.push_back(r); }
-void GotSection::addEntry(Symbol &sym) {
+void GotSection::addEntry(const Symbol &sym) {
assert(sym.auxIdx == symAux.size() - 1);
symAux.back().gotIdx = numEntries++;
}
-bool GotSection::addTlsDescEntry(Symbol &sym) {
+bool GotSection::addTlsDescEntry(const Symbol &sym) {
assert(sym.auxIdx == symAux.size() - 1);
symAux.back().tlsDescIdx = numEntries;
numEntries += 2;
return true;
}
-bool GotSection::addDynTlsEntry(Symbol &sym) {
+bool GotSection::addDynTlsEntry(const Symbol &sym) {
assert(sym.auxIdx == symAux.size() - 1);
symAux.back().tlsGdIdx = numEntries;
// Global Dynamic TLS entries take two GOT slots.
@@ -1267,15 +1267,12 @@ DynamicSection<ELFT>::DynamicSection()
// The output section .rela.dyn may include these synthetic sections:
//
// - part.relaDyn
-// - in.relaIplt: this is included if in.relaIplt is named .rela.dyn
// - in.relaPlt: this is included if a linker script places .rela.plt inside
// .rela.dyn
//
// DT_RELASZ is the total size of the included sections.
static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
size_t size = relaDyn.getSize();
- if (in.relaIplt->getParent() == relaDyn.getParent())
- size += in.relaIplt->getSize();
if (in.relaPlt->getParent() == relaDyn.getParent())
size += in.relaPlt->getSize();
return size;
@@ -1285,13 +1282,7 @@ static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
// output section. When this occurs we cannot just use the OutputSection
// Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
// overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
-static uint64_t addPltRelSz() {
- size_t size = in.relaPlt->getSize();
- if (in.relaIplt->getParent() == in.relaPlt->getParent() &&
- in.relaIplt->name == in.relaPlt->name)
- size += in.relaIplt->getSize();
- return size;
-}
+static uint64_t addPltRelSz() { return in.relaPlt->getSize(); }
// Add remaining entries to complete .dynamic contents.
template <class ELFT>
@@ -1378,9 +1369,7 @@ DynamicSection<ELFT>::computeContents() {
if (!config->shared && !config->relocatable && !config->zRodynamic)
addInt(DT_DEBUG, 0);
- if (part.relaDyn->isNeeded() ||
- (in.relaIplt->isNeeded() &&
- part.relaDyn->getParent() == in.relaIplt->getParent())) {
+ if (part.relaDyn->isNeeded()) {
addInSec(part.relaDyn->dynamicTag, *part.relaDyn);
entries.emplace_back(part.relaDyn->sizeDynamicTag,
addRelaSz(*part.relaDyn));
@@ -1407,13 +1396,7 @@ DynamicSection<ELFT>::computeContents() {
addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT,
sizeof(Elf_Relr));
}
- // .rel[a].plt section usually consists of two parts, containing plt and
- // iplt relocations. It is possible to have only iplt relocations in the
- // output. In that case relaPlt is empty and have zero offset, the same offset
- // as relaIplt has. And we still want to emit proper dynamic tags for that
- // case, so here we always use relaPlt as marker for the beginning of
- // .rel[a].plt section.
- if (isMain && (in.relaPlt->isNeeded() || in.relaIplt->isNeeded())) {
+ if (isMain && in.relaPlt->isNeeded()) {
addInSec(DT_JMPREL, *in.relaPlt);
entries.emplace_back(DT_PLTRELSZ, addPltRelSz());
switch (config->emachine) {
@@ -1669,10 +1652,6 @@ void RelocationBaseSection::finalizeContents() {
getParent()->flags |= ELF::SHF_INFO_LINK;
getParent()->info = in.gotPlt->getParent()->sectionIndex;
}
- if (in.relaIplt.get() == this && in.igotPlt->getParent()) {
- getParent()->flags |= ELF::SHF_INFO_LINK;
- getParent()->info = in.igotPlt->getParent()->sectionIndex;
- }
}
void DynamicReloc::computeRaw(SymbolTableBaseSection *symtab) {
@@ -1686,6 +1665,11 @@ void RelocationBaseSection::computeRels() {
SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
parallelForEach(relocs,
[symTab](DynamicReloc &rel) { rel.computeRaw(symTab); });
+
+ auto irelative = std::partition(
+ relocs.begin() + numRelativeRelocs, relocs.end(),
+ [t = target->iRelativeRel](auto &r) { return r.type != t; });
+
// Sort by (!IsRelative,SymIndex,r_offset). DT_REL[A]COUNT requires us to
// place R_*_RELATIVE first. SymIndex is to improve locality, while r_offset
// is to make results easier to read.
@@ -1694,7 +1678,7 @@ void RelocationBaseSection::computeRels() {
parallelSort(relocs.begin(), nonRelative,
[&](auto &a, auto &b) { return a.r_offset < b.r_offset; });
// Non-relative relocations are few, so don't bother with parallelSort.
- llvm::sort(nonRelative, relocs.end(), [&](auto &a, auto &b) {
+ llvm::sort(nonRelative, irelative, [&](auto &a, auto &b) {
return std::tie(a.r_sym, a.r_offset) < std::tie(b.r_sym, b.r_offset);
});
}
@@ -3855,7 +3839,6 @@ void InStruct::reset() {
ppc32Got2.reset();
ibtPlt.reset();
relaPlt.reset();
- relaIplt.reset();
shStrTab.reset();
strTab.reset();
symTab.reset();
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index b41e69410054..02a669b01d15 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -106,9 +106,9 @@ public:
void writeTo(uint8_t *buf) override;
void addConstant(const Relocation &r);
- void addEntry(Symbol &sym);
- bool addTlsDescEntry(Symbol &sym);
- bool addDynTlsEntry(Symbol &sym);
+ void addEntry(const Symbol &sym);
+ bool addTlsDescEntry(const Symbol &sym);
+ bool addDynTlsEntry(const Symbol &sym);
bool addTlsIndex();
uint32_t getTlsDescOffset(const Symbol &sym) const;
uint64_t getTlsDescAddr(const Symbol &sym) const;
@@ -1358,7 +1358,6 @@ struct InStruct {
std::unique_ptr<PPC32Got2Section> ppc32Got2;
std::unique_ptr<IBTPltSection> ibtPlt;
std::unique_ptr<RelocationBaseSection> relaPlt;
- std::unique_ptr<RelocationBaseSection> relaIplt;
std::unique_ptr<StringTableSection> shStrTab;
std::unique_ptr<StringTableSection> strTab;
std::unique_ptr<SymbolTableBaseSection> symTab;
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 4eca7b22e90b..40d617b7fdf3 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -449,8 +449,8 @@ template <class ELFT> void elf::createSyntheticSections() {
add(*part.dynamic);
add(*part.dynStrTab);
- add(*part.relaDyn);
}
+ add(*part.relaDyn);
if (config->relrPackDynRelocs) {
part.relrDyn = std::make_unique<RelrSection<ELFT>>(threadCount);
@@ -550,17 +550,6 @@ template <class ELFT> void elf::createSyntheticSections() {
/*threadCount=*/1);
add(*in.relaPlt);
- // The relaIplt immediately follows .rel[a].dyn to ensure that the IRelative
- // relocations are processed last by the dynamic loader. We cannot place the
- // iplt section in .rel.dyn when Android relocation packing is enabled because
- // that would cause a section type mismatch. However, because the Android
- // dynamic loader reads .rel.plt after .rel.dyn, we can get the desired
- // behaviour by placing the iplt section in .rel.plt.
- in.relaIplt = std::make_unique<RelocationSection<ELFT>>(
- config->androidPackDynRelocs ? in.relaPlt->name : relaDynName,
- /*sort=*/false, /*threadCount=*/1);
- add(*in.relaIplt);
-
if ((config->emachine == EM_386 || config->emachine == EM_X86_64) &&
(config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT)) {
in.ibtPlt = std::make_unique<IBTPltSection>();
@@ -1071,20 +1060,18 @@ void PhdrEntry::add(OutputSection *sec) {
sec->ptLoad = this;
}
-// The beginning and the ending of .rel[a].plt section are marked
-// with __rel[a]_iplt_{start,end} symbols if it is a statically linked
-// executable. The runtime needs these symbols in order to resolve
-// all IRELATIVE relocs on startup. For dynamic executables, we don't
-// need these symbols, since IRELATIVE relocs are resolved through GOT
-// and PLT. For details, see http://www.airs.com/blog/archives/403.
+// A statically linked position-dependent executable should only contain
+// IRELATIVE relocations and no other dynamic relocations. Encapsulation symbols
+// __rel[a]_iplt_{start,end} will be defined for .rel[a].dyn, to be
+// processed by the libc runtime. Other executables or DSOs use dynamic tags
+// instead.
template <class ELFT> void Writer<ELFT>::addRelIpltSymbols() {
if (config->isPic)
return;
- // By default, __rela_iplt_{start,end} belong to a dummy section 0
- // because .rela.plt might be empty and thus removed from output.
- // We'll override Out::elfHeader with In.relaIplt later when we are
- // sure that .rela.plt exists in output.
+ // __rela_iplt_{start,end} are initially defined relative to dummy section 0.
+ // We'll override Out::elfHeader with relaDyn later when we are sure that
+ // .rela.dyn will be present in the output.
ElfSym::relaIpltStart = addOptionalRegular(
config->isRela ? "__rela_iplt_start" : "__rel_iplt_start",
Out::elfHeader, 0, STV_HIDDEN);
@@ -1110,11 +1097,11 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
ElfSym::globalOffsetTable->section = sec;
}
- // .rela_iplt_{start,end} mark the start and the end of in.relaIplt.
- if (ElfSym::relaIpltStart && in.relaIplt->isNeeded()) {
- ElfSym::relaIpltStart->section = in.relaIplt.get();
- ElfSym::relaIpltEnd->section = in.relaIplt.get();
- ElfSym::relaIpltEnd->value = in.relaIplt->getSize();
+ // .rela_iplt_{start,end} mark the start and the end of .rel[a].dyn.
+ if (ElfSym::relaIpltStart && mainPart->relaDyn->isNeeded()) {
+ ElfSym::relaIpltStart->section = mainPart->relaDyn.get();
+ ElfSym::relaIpltEnd->section = mainPart->relaDyn.get();
+ ElfSym::relaIpltEnd->value = mainPart->relaDyn->getSize();
}
PhdrEntry *last = nullptr;
@@ -1470,14 +1457,6 @@ static void sortSection(OutputSection &osec,
if (name == ".init" || name == ".fini")
return;
- // IRelative relocations that usually live in the .rel[a].dyn section should
- // be processed last by the dynamic loader. To achieve that we add synthetic
- // sections in the required order from the beginning so that the in.relaIplt
- // section is placed last in an output section. Here we just do not apply
- // sorting for an output section which holds the in.relaIplt section.
- if (in.relaIplt->getParent() == &osec)
- return;
-
// Sort input sections by priority using the list provided by
// --symbol-ordering-file or --shuffle-sections=. This is a least significant
// digit radix sort. The sections may be sorted stably again by a more
@@ -2196,7 +2175,6 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
finalizeSynthetic(in.mipsGot.get());
finalizeSynthetic(in.igotPlt.get());
finalizeSynthetic(in.gotPlt.get());
- finalizeSynthetic(in.relaIplt.get());
finalizeSynthetic(in.relaPlt.get());
finalizeSynthetic(in.plt.get());
finalizeSynthetic(in.iplt.get());
diff --git a/lld/MachO/ConcatOutputSection.cpp b/lld/MachO/ConcatOutputSection.cpp
index c5c0c8a89e28..279423720be9 100644
--- a/lld/MachO/ConcatOutputSection.cpp
+++ b/lld/MachO/ConcatOutputSection.cpp
@@ -323,11 +323,7 @@ void TextOutputSection::finalize() {
thunkInfo.isec =
makeSyntheticInputSection(isec->getSegName(), isec->getName());
thunkInfo.isec->parent = this;
-
- // This code runs after dead code removal. Need to set the `live` bit
- // on the thunk isec so that asserts that check that only live sections
- // get written are happy.
- thunkInfo.isec->live = true;
+ assert(thunkInfo.isec->live);
StringRef thunkName = saver().save(funcSym->getName() + ".thunk." +
std::to_string(thunkInfo.sequence++));
diff --git a/lld/MachO/Config.h b/lld/MachO/Config.h
index f820513a111e..7b45f7f4c39a 100644
--- a/lld/MachO/Config.h
+++ b/lld/MachO/Config.h
@@ -135,6 +135,7 @@ struct Configuration {
bool emitEncryptionInfo = false;
bool emitInitOffsets = false;
bool emitChainedFixups = false;
+ bool emitRelativeMethodLists = false;
bool thinLTOEmitImportsFiles;
bool thinLTOEmitIndexFiles;
bool thinLTOIndexOnly;
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index 36248925d65a..65de531db04b 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -612,7 +612,7 @@ static void replaceCommonSymbols() {
if (!osec)
osec = ConcatOutputSection::getOrCreateForInput(isec);
isec->parent = osec;
- inputSections.push_back(isec);
+ addInputSection(isec);
// FIXME: CommonSymbol should store isReferencedDynamically, noDeadStrip
// and pass them on here.
@@ -1086,6 +1086,22 @@ static bool shouldEmitChainedFixups(const InputArgList &args) {
return isRequested;
}
+static bool shouldEmitRelativeMethodLists(const InputArgList &args) {
+ const Arg *arg = args.getLastArg(OPT_objc_relative_method_lists,
+ OPT_no_objc_relative_method_lists);
+ if (arg && arg->getOption().getID() == OPT_objc_relative_method_lists)
+ return true;
+ if (arg && arg->getOption().getID() == OPT_no_objc_relative_method_lists)
+ return false;
+
+ // TODO: If no flag is specified, don't default to false, but instead:
+ // - default false on < ios14
+ // - default true on >= ios14
+ // For now, until this feature is confirmed stable, default to false if no
+ // flag is explicitly specified
+ return false;
+}
+
void SymbolPatterns::clear() {
literals.clear();
globs.clear();
@@ -1220,53 +1236,18 @@ static void createFiles(const InputArgList &args) {
static void gatherInputSections() {
TimeTraceScope timeScope("Gathering input sections");
- int inputOrder = 0;
for (const InputFile *file : inputFiles) {
for (const Section *section : file->sections) {
// Compact unwind entries require special handling elsewhere. (In
// contrast, EH frames are handled like regular ConcatInputSections.)
if (section->name == section_names::compactUnwind)
continue;
- ConcatOutputSection *osec = nullptr;
- for (const Subsection &subsection : section->subsections) {
- if (auto *isec = dyn_cast<ConcatInputSection>(subsection.isec)) {
- if (isec->isCoalescedWeak())
- continue;
- if (config->emitInitOffsets &&
- sectionType(isec->getFlags()) == S_MOD_INIT_FUNC_POINTERS) {
- in.initOffsets->addInput(isec);
- continue;
- }
- isec->outSecOff = inputOrder++;
- if (!osec)
- osec = ConcatOutputSection::getOrCreateForInput(isec);
- isec->parent = osec;
- inputSections.push_back(isec);
- } else if (auto *isec =
- dyn_cast<CStringInputSection>(subsection.isec)) {
- if (isec->getName() == section_names::objcMethname) {
- if (in.objcMethnameSection->inputOrder == UnspecifiedInputOrder)
- in.objcMethnameSection->inputOrder = inputOrder++;
- in.objcMethnameSection->addInput(isec);
- } else {
- if (in.cStringSection->inputOrder == UnspecifiedInputOrder)
- in.cStringSection->inputOrder = inputOrder++;
- in.cStringSection->addInput(isec);
- }
- } else if (auto *isec =
- dyn_cast<WordLiteralInputSection>(subsection.isec)) {
- if (in.wordLiteralSection->inputOrder == UnspecifiedInputOrder)
- in.wordLiteralSection->inputOrder = inputOrder++;
- in.wordLiteralSection->addInput(isec);
- } else {
- llvm_unreachable("unexpected input section kind");
- }
- }
+ for (const Subsection &subsection : section->subsections)
+ addInputSection(subsection.isec);
}
if (!file->objCImageInfo.empty())
in.objCImageInfo->addFile(file);
}
- assert(inputOrder <= UnspecifiedInputOrder);
}
static void foldIdenticalLiterals() {
@@ -1422,12 +1403,14 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
concatOutputSections.clear();
inputFiles.clear();
inputSections.clear();
+ inputSectionsOrder = 0;
loadedArchives.clear();
loadedObjectFrameworks.clear();
missingAutolinkWarnings.clear();
syntheticSections.clear();
thunkMap.clear();
unprocessedLCLinkerOptions.clear();
+ ObjCSelRefsHelper::cleanup();
firstTLVDataSection = nullptr;
tar = nullptr;
@@ -1663,6 +1646,7 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
config->emitChainedFixups = shouldEmitChainedFixups(args);
config->emitInitOffsets =
config->emitChainedFixups || args.hasArg(OPT_init_offsets);
+ config->emitRelativeMethodLists = shouldEmitRelativeMethodLists(args);
config->icfLevel = getICFLevel(args);
config->dedupStrings =
args.hasFlag(OPT_deduplicate_strings, OPT_no_deduplicate_strings, true);
diff --git a/lld/MachO/InputSection.cpp b/lld/MachO/InputSection.cpp
index 8f5affb1dc21..5c1e07cd21b1 100644
--- a/lld/MachO/InputSection.cpp
+++ b/lld/MachO/InputSection.cpp
@@ -37,6 +37,52 @@ static_assert(sizeof(void *) != 8 ||
"instances of it");
std::vector<ConcatInputSection *> macho::inputSections;
+int macho::inputSectionsOrder = 0;
+
+// Call this function to add a new InputSection and have it routed to the
+// appropriate container. Depending on its type and current config, it will
+// either be added to 'inputSections' vector or to a synthetic section.
+void lld::macho::addInputSection(InputSection *inputSection) {
+ if (auto *isec = dyn_cast<ConcatInputSection>(inputSection)) {
+ if (isec->isCoalescedWeak())
+ return;
+ if (config->emitRelativeMethodLists &&
+ ObjCMethListSection::isMethodList(isec)) {
+ if (in.objcMethList->inputOrder == UnspecifiedInputOrder)
+ in.objcMethList->inputOrder = inputSectionsOrder++;
+ in.objcMethList->addInput(isec);
+ isec->parent = in.objcMethList;
+ return;
+ }
+ if (config->emitInitOffsets &&
+ sectionType(isec->getFlags()) == S_MOD_INIT_FUNC_POINTERS) {
+ in.initOffsets->addInput(isec);
+ return;
+ }
+ isec->outSecOff = inputSectionsOrder++;
+ auto *osec = ConcatOutputSection::getOrCreateForInput(isec);
+ isec->parent = osec;
+ inputSections.push_back(isec);
+ } else if (auto *isec = dyn_cast<CStringInputSection>(inputSection)) {
+ if (isec->getName() == section_names::objcMethname) {
+ if (in.objcMethnameSection->inputOrder == UnspecifiedInputOrder)
+ in.objcMethnameSection->inputOrder = inputSectionsOrder++;
+ in.objcMethnameSection->addInput(isec);
+ } else {
+ if (in.cStringSection->inputOrder == UnspecifiedInputOrder)
+ in.cStringSection->inputOrder = inputSectionsOrder++;
+ in.cStringSection->addInput(isec);
+ }
+ } else if (auto *isec = dyn_cast<WordLiteralInputSection>(inputSection)) {
+ if (in.wordLiteralSection->inputOrder == UnspecifiedInputOrder)
+ in.wordLiteralSection->inputOrder = inputSectionsOrder++;
+ in.wordLiteralSection->addInput(isec);
+ } else {
+ llvm_unreachable("unexpected input section kind");
+ }
+
+ assert(inputSectionsOrder <= UnspecifiedInputOrder);
+}
uint64_t InputSection::getFileSize() const {
return isZeroFill(getFlags()) ? 0 : getSize();
@@ -235,6 +281,9 @@ ConcatInputSection *macho::makeSyntheticInputSection(StringRef segName,
Section &section =
*make<Section>(/*file=*/nullptr, segName, sectName, flags, /*addr=*/0);
auto isec = make<ConcatInputSection>(section, data, align);
+ // Since this is an explicitly created 'fake' input section,
+ // it should not be dead stripped.
+ isec->live = true;
section.subsections.push_back({0, isec});
return isec;
}
diff --git a/lld/MachO/InputSection.h b/lld/MachO/InputSection.h
index b25f0638f4c6..0f389e50425a 100644
--- a/lld/MachO/InputSection.h
+++ b/lld/MachO/InputSection.h
@@ -149,6 +149,7 @@ public:
};
// Initialize a fake InputSection that does not belong to any InputFile.
+// The created ConcatInputSection will always have 'live=true'
ConcatInputSection *makeSyntheticInputSection(StringRef segName,
StringRef sectName,
uint32_t flags = 0,
@@ -302,6 +303,8 @@ bool isEhFrameSection(const InputSection *);
bool isGccExceptTabSection(const InputSection *);
extern std::vector<ConcatInputSection *> inputSections;
+// This is used as a counter for specyfing input order for input sections
+extern int inputSectionsOrder;
namespace section_names {
@@ -340,6 +343,7 @@ constexpr const char moduleTermFunc[] = "__mod_term_func";
constexpr const char nonLazySymbolPtr[] = "__nl_symbol_ptr";
constexpr const char objcCatList[] = "__objc_catlist";
constexpr const char objcClassList[] = "__objc_classlist";
+constexpr const char objcMethList[] = "__objc_methlist";
constexpr const char objcClassRefs[] = "__objc_classrefs";
constexpr const char objcConst[] = "__objc_const";
constexpr const char objCImageInfo[] = "__objc_imageinfo";
@@ -369,6 +373,7 @@ constexpr const char addrSig[] = "__llvm_addrsig";
} // namespace section_names
+void addInputSection(InputSection *inputSection);
} // namespace macho
std::string toString(const macho::InputSection *);
diff --git a/lld/MachO/MapFile.cpp b/lld/MachO/MapFile.cpp
index f736360624eb..2a31a5c09cdd 100644
--- a/lld/MachO/MapFile.cpp
+++ b/lld/MachO/MapFile.cpp
@@ -197,18 +197,24 @@ void macho::writeMapFile() {
seg->name.str().c_str(), osec->name.str().c_str());
}
+ // Shared function to print an array of symbols.
+ auto printIsecArrSyms = [&](const std::vector<ConcatInputSection *> &arr) {
+ for (const ConcatInputSection *isec : arr) {
+ for (Defined *sym : isec->symbols) {
+ if (!(isPrivateLabel(sym->getName()) && sym->size == 0))
+ os << format("0x%08llX\t0x%08llX\t[%3u] %s\n", sym->getVA(),
+ sym->size, readerToFileOrdinal[sym->getFile()],
+ sym->getName().str().data());
+ }
+ }
+ };
+
os << "# Symbols:\n";
os << "# Address\tSize \tFile Name\n";
for (const OutputSegment *seg : outputSegments) {
for (const OutputSection *osec : seg->getSections()) {
if (auto *concatOsec = dyn_cast<ConcatOutputSection>(osec)) {
- for (const InputSection *isec : concatOsec->inputs) {
- for (Defined *sym : isec->symbols)
- if (!(isPrivateLabel(sym->getName()) && sym->size == 0))
- os << format("0x%08llX\t0x%08llX\t[%3u] %s\n", sym->getVA(),
- sym->size, readerToFileOrdinal[sym->getFile()],
- sym->getName().str().data());
- }
+ printIsecArrSyms(concatOsec->inputs);
} else if (osec == in.cStringSection || osec == in.objcMethnameSection) {
const auto &liveCStrings = info.liveCStringsForSection.lookup(osec);
uint64_t lastAddr = 0; // strings will never start at address 0, so this
@@ -237,6 +243,8 @@ void macho::writeMapFile() {
printNonLazyPointerSection(os, in.got);
} else if (osec == in.tlvPointers) {
printNonLazyPointerSection(os, in.tlvPointers);
+ } else if (osec == in.objcMethList) {
+ printIsecArrSyms(in.objcMethList->getInputs());
}
// TODO print other synthetic sections
}
diff --git a/lld/MachO/ObjC.cpp b/lld/MachO/ObjC.cpp
index 40df2243b26f..5902b82d30f5 100644
--- a/lld/MachO/ObjC.cpp
+++ b/lld/MachO/ObjC.cpp
@@ -790,7 +790,7 @@ void ObjcCategoryMerger::emitAndLinkProtocolList(
infoCategoryWriter.catPtrListInfo.align);
listSec->parent = infoCategoryWriter.catPtrListInfo.outputSection;
listSec->live = true;
- allInputSections.push_back(listSec);
+ addInputSection(listSec);
listSec->parent = infoCategoryWriter.catPtrListInfo.outputSection;
@@ -848,7 +848,7 @@ void ObjcCategoryMerger::emitAndLinkPointerList(
infoCategoryWriter.catPtrListInfo.align);
listSec->parent = infoCategoryWriter.catPtrListInfo.outputSection;
listSec->live = true;
- allInputSections.push_back(listSec);
+ addInputSection(listSec);
listSec->parent = infoCategoryWriter.catPtrListInfo.outputSection;
@@ -889,7 +889,7 @@ ObjcCategoryMerger::emitCatListEntrySec(const std::string &forCateogryName,
bodyData, infoCategoryWriter.catListInfo.align);
newCatList->parent = infoCategoryWriter.catListInfo.outputSection;
newCatList->live = true;
- allInputSections.push_back(newCatList);
+ addInputSection(newCatList);
newCatList->parent = infoCategoryWriter.catListInfo.outputSection;
@@ -927,7 +927,7 @@ Defined *ObjcCategoryMerger::emitCategoryBody(const std::string &name,
bodyData, infoCategoryWriter.catBodyInfo.align);
newBodySec->parent = infoCategoryWriter.catBodyInfo.outputSection;
newBodySec->live = true;
- allInputSections.push_back(newBodySec);
+ addInputSection(newBodySec);
std::string symName =
objc::symbol_names::category + baseClassName + "_$_(" + name + ")";
@@ -1132,7 +1132,7 @@ void ObjcCategoryMerger::generateCatListForNonErasedCategories(
infoCategoryWriter.catListInfo.align);
listSec->parent = infoCategoryWriter.catListInfo.outputSection;
listSec->live = true;
- allInputSections.push_back(listSec);
+ addInputSection(listSec);
std::string slotSymName = "<__objc_catlist slot for category ";
slotSymName += nonErasedCatBody->getName();
@@ -1221,9 +1221,11 @@ void ObjcCategoryMerger::doCleanup() { generatedSectionData.clear(); }
StringRef ObjcCategoryMerger::newStringData(const char *str) {
uint32_t len = strlen(str);
- auto &data = newSectionData(len + 1);
+ uint32_t bufSize = len + 1;
+ auto &data = newSectionData(bufSize);
char *strData = reinterpret_cast<char *>(data.data());
- strncpy(strData, str, len);
+ // Copy the string chars and null-terminator
+ memcpy(strData, str, bufSize);
return StringRef(strData, len);
}
diff --git a/lld/MachO/ObjC.h b/lld/MachO/ObjC.h
index 9fbe984e6223..8081605670c5 100644
--- a/lld/MachO/ObjC.h
+++ b/lld/MachO/ObjC.h
@@ -22,6 +22,8 @@ constexpr const char klassPropList[] = "__OBJC_$_CLASS_PROP_LIST_";
constexpr const char metaclass[] = "_OBJC_METACLASS_$_";
constexpr const char ehtype[] = "_OBJC_EHTYPE_$_";
constexpr const char ivar[] = "_OBJC_IVAR_$_";
+constexpr const char instanceMethods[] = "__OBJC_$_INSTANCE_METHODS_";
+constexpr const char classMethods[] = "__OBJC_$_CLASS_METHODS_";
constexpr const char listProprieties[] = "__OBJC_$_PROP_LIST_";
constexpr const char category[] = "__OBJC_$_CATEGORY_";
diff --git a/lld/MachO/Options.td b/lld/MachO/Options.td
index 0d8ee2a0926b..11458d92b3ab 100644
--- a/lld/MachO/Options.td
+++ b/lld/MachO/Options.td
@@ -1284,6 +1284,12 @@ def fixup_chains_section : Flag<["-"], "fixup_chains_section">,
HelpText<"This option is undocumented in ld64">,
Flags<[HelpHidden]>,
Group<grp_undocumented>;
+def objc_relative_method_lists : Flag<["-"], "objc_relative_method_lists">,
+ HelpText<"Emit relative method lists (more compact representation)">,
+ Group<grp_undocumented>;
+def no_objc_relative_method_lists : Flag<["-"], "no_objc_relative_method_lists">,
+ HelpText<"Don't emit relative method lists (use traditional representation)">,
+ Group<grp_undocumented>;
def flto_codegen_only : Flag<["-"], "flto-codegen-only">,
HelpText<"This option is undocumented in ld64">,
Flags<[HelpHidden]>,
@@ -1407,3 +1413,9 @@ def debug_variant : Flag<["-"], "debug_variant">,
HelpText<"Do not warn about issues that are only problems for binaries shipping to customers.">,
Flags<[HelpHidden]>,
Group<grp_ignored_silently>;
+
+// NOTE: This flag should be respected if -warn_duplicate_libraries is ever implemented.
+def no_warn_duplicate_libraries : Flag<["-"], "no_warn_duplicate_libraries">,
+ HelpText<"Do not warn if the input contains duplicate library options.">,
+ Flags<[HelpHidden]>,
+ Group<grp_ignored_silently>;
diff --git a/lld/MachO/SymbolTable.cpp b/lld/MachO/SymbolTable.cpp
index 825242f2cc72..755ff270e2f7 100644
--- a/lld/MachO/SymbolTable.cpp
+++ b/lld/MachO/SymbolTable.cpp
@@ -377,7 +377,7 @@ static void handleSectionBoundarySymbol(const Undefined &sym, StringRef segSect,
// live. Marking the isec live ensures an OutputSection is created that the
// start/end symbol can refer to.
assert(sym.isLive());
- isec->live = true;
+ assert(isec->live);
// This runs after gatherInputSections(), so need to explicitly set parent
// and add to inputSections.
diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp
index 7ee3261ce307..6f6b66118b7a 100644
--- a/lld/MachO/SyntheticSections.cpp
+++ b/lld/MachO/SyntheticSections.cpp
@@ -12,6 +12,7 @@
#include "ExportTrie.h"
#include "InputFiles.h"
#include "MachOStructs.h"
+#include "ObjC.h"
#include "OutputSegment.h"
#include "SymbolTable.h"
#include "Symbols.h"
@@ -793,7 +794,7 @@ void StubHelperSection::setUp() {
in.imageLoaderCache->parent =
ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache);
- inputSections.push_back(in.imageLoaderCache);
+ addInputSection(in.imageLoaderCache);
// Since this isn't in the symbol table or in any input file, the noDeadStrip
// argument doesn't matter.
dyldPrivate =
@@ -806,10 +807,9 @@ void StubHelperSection::setUp() {
dyldPrivate->used = true;
}
-ObjCSelRefsSection::ObjCSelRefsSection()
- : SyntheticSection(segment_names::data, section_names::objcSelrefs) {}
-
-void ObjCSelRefsSection::initialize() {
+llvm::DenseMap<llvm::CachedHashStringRef, ConcatInputSection *>
+ ObjCSelRefsHelper::methnameToSelref;
+void ObjCSelRefsHelper::initialize() {
// Do not fold selrefs without ICF.
if (config->icfLevel == ICFLevel::none)
return;
@@ -836,7 +836,9 @@ void ObjCSelRefsSection::initialize() {
}
}
-ConcatInputSection *ObjCSelRefsSection::makeSelRef(StringRef methname) {
+void ObjCSelRefsHelper::cleanup() { methnameToSelref.clear(); }
+
+ConcatInputSection *ObjCSelRefsHelper::makeSelRef(StringRef methname) {
auto methnameOffset =
in.objcMethnameSection->getStringOffset(methname).outSecOff;
@@ -848,20 +850,20 @@ ConcatInputSection *ObjCSelRefsSection::makeSelRef(StringRef methname) {
S_LITERAL_POINTERS | S_ATTR_NO_DEAD_STRIP,
ArrayRef<uint8_t>{selrefData, wordSize},
/*align=*/wordSize);
- objcSelref->live = true;
+ assert(objcSelref->live);
objcSelref->relocs.push_back({/*type=*/target->unsignedRelocType,
/*pcrel=*/false, /*length=*/3,
/*offset=*/0,
/*addend=*/static_cast<int64_t>(methnameOffset),
/*referent=*/in.objcMethnameSection->isec});
objcSelref->parent = ConcatOutputSection::getOrCreateForInput(objcSelref);
- inputSections.push_back(objcSelref);
+ addInputSection(objcSelref);
objcSelref->isFinal = true;
methnameToSelref[CachedHashStringRef(methname)] = objcSelref;
return objcSelref;
}
-ConcatInputSection *ObjCSelRefsSection::getSelRef(StringRef methname) {
+ConcatInputSection *ObjCSelRefsHelper::getSelRef(StringRef methname) {
auto it = methnameToSelref.find(CachedHashStringRef(methname));
if (it == methnameToSelref.end())
return nullptr;
@@ -890,8 +892,8 @@ StringRef ObjCStubsSection::getMethname(Symbol *sym) {
void ObjCStubsSection::addEntry(Symbol *sym) {
StringRef methname = getMethname(sym);
// We create a selref entry for each unique methname.
- if (!in.objcSelRefs->getSelRef(methname))
- in.objcSelRefs->makeSelRef(methname);
+ if (!ObjCSelRefsHelper::getSelRef(methname))
+ ObjCSelRefsHelper::makeSelRef(methname);
auto stubSize = config->objcStubsMode == ObjCStubsMode::fast
? target->objcStubsFastSize
@@ -940,7 +942,7 @@ void ObjCStubsSection::writeTo(uint8_t *buf) const {
Defined *sym = symbols[i];
auto methname = getMethname(sym);
- InputSection *selRef = in.objcSelRefs->getSelRef(methname);
+ InputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);
assert(selRef != nullptr && "no selref for methname");
auto selrefAddr = selRef->getVA(0);
target->writeObjCMsgSendStub(buf + stubOffset, sym, in.objcStubs->addr,
@@ -1974,6 +1976,241 @@ void InitOffsetsSection::setUp() {
}
}
+ObjCMethListSection::ObjCMethListSection()
+ : SyntheticSection(segment_names::text, section_names::objcMethList) {
+ flags = S_ATTR_NO_DEAD_STRIP;
+ align = relativeOffsetSize;
+}
+
+// Go through all input method lists and ensure that we have selrefs for all
+// their method names. The selrefs will be needed later by ::writeTo. We need to
+// create them early on here to ensure they are processed correctly by the lld
+// pipeline.
+void ObjCMethListSection::setUp() {
+ for (const ConcatInputSection *isec : inputs) {
+ uint32_t structSizeAndFlags = 0, structCount = 0;
+ readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);
+ uint32_t originalStructSize = structSizeAndFlags & structSizeMask;
+ // Method name is immediately after header
+ uint32_t methodNameOff = methodListHeaderSize;
+
+ // Loop through all methods, and ensure a selref for each of them exists.
+ while (methodNameOff < isec->data.size()) {
+ const Reloc *reloc = isec->getRelocAt(methodNameOff);
+ assert(reloc && "Relocation expected at method list name slot");
+ auto *def = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());
+ assert(def && "Expected valid Defined at method list name slot");
+ auto *cisec = cast<CStringInputSection>(def->isec);
+ assert(cisec && "Expected method name to be in a CStringInputSection");
+ auto methname = cisec->getStringRefAtOffset(def->value);
+ if (!ObjCSelRefsHelper::getSelRef(methname))
+ ObjCSelRefsHelper::makeSelRef(methname);
+
+ // Jump to method name offset in next struct
+ methodNameOff += originalStructSize;
+ }
+ }
+}
+
+// Calculate section size and final offsets for where InputSection's need to be
+// written.
+void ObjCMethListSection::finalize() {
+ // sectionSize will be the total size of the __objc_methlist section
+ sectionSize = 0;
+ for (ConcatInputSection *isec : inputs) {
+ // We can also use sectionSize as write offset for isec
+ assert(sectionSize == alignToPowerOf2(sectionSize, relativeOffsetSize) &&
+ "expected __objc_methlist to be aligned by default with the "
+ "required section alignment");
+ isec->outSecOff = sectionSize;
+
+ isec->isFinal = true;
+ uint32_t relativeListSize =
+ computeRelativeMethodListSize(isec->data.size());
+ sectionSize += relativeListSize;
+
+ // If encoding the method list in relative offset format shrinks the size,
+ // then we also need to adjust symbol sizes to match the new size. Note that
+ // on 32bit platforms the size of the method list will remain the same when
+ // encoded in relative offset format.
+ if (relativeListSize != isec->data.size()) {
+ for (Symbol *sym : isec->symbols) {
+ assert(isa<Defined>(sym) &&
+ "Unexpected undefined symbol in ObjC method list");
+ auto *def = cast<Defined>(sym);
+ // There can be 0-size symbols, check if this is the case and ignore
+ // them.
+ if (def->size) {
+ assert(
+ def->size == isec->data.size() &&
+ "Invalid ObjC method list symbol size: expected symbol size to "
+ "match isec size");
+ def->size = relativeListSize;
+ }
+ }
+ }
+ }
+}
+
+void ObjCMethListSection::writeTo(uint8_t *bufStart) const {
+ uint8_t *buf = bufStart;
+ for (const ConcatInputSection *isec : inputs) {
+ assert(buf - bufStart == long(isec->outSecOff) &&
+ "Writing at unexpected offset");
+ uint32_t writtenSize = writeRelativeMethodList(isec, buf);
+ buf += writtenSize;
+ }
+ assert(buf - bufStart == sectionSize &&
+ "Written size does not match expected section size");
+}
+
+// Check if an InputSection is a method list. To do this we scan the
+// InputSection for any symbols who's names match the patterns we expect clang
+// to generate for method lists.
+bool ObjCMethListSection::isMethodList(const ConcatInputSection *isec) {
+ const char *symPrefixes[] = {objc::symbol_names::classMethods,
+ objc::symbol_names::instanceMethods,
+ objc::symbol_names::categoryInstanceMethods,
+ objc::symbol_names::categoryClassMethods};
+ if (!isec)
+ return false;
+ for (const Symbol *sym : isec->symbols) {
+ auto *def = dyn_cast_or_null<Defined>(sym);
+ if (!def)
+ continue;
+ for (const char *prefix : symPrefixes) {
+ if (def->getName().starts_with(prefix)) {
+ assert(def->size == isec->data.size() &&
+ "Invalid ObjC method list symbol size: expected symbol size to "
+ "match isec size");
+ assert(def->value == 0 &&
+ "Offset of ObjC method list symbol must be 0");
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+// Encode a single relative offset value. The input is the data/symbol at
+// (&isec->data[inSecOff]). The output is written to (&buf[outSecOff]).
+// 'createSelRef' indicates that we should not directly use the specified
+// symbol, but instead get the selRef for the symbol and use that instead.
+void ObjCMethListSection::writeRelativeOffsetForIsec(
+ const ConcatInputSection *isec, uint8_t *buf, uint32_t &inSecOff,
+ uint32_t &outSecOff, bool useSelRef) const {
+ const Reloc *reloc = isec->getRelocAt(inSecOff);
+ assert(reloc && "Relocation expected at __objc_methlist Offset");
+ auto *def = dyn_cast_or_null<Defined>(reloc->referent.get<Symbol *>());
+ assert(def && "Expected all syms in __objc_methlist to be defined");
+ uint32_t symVA = def->getVA();
+
+ if (useSelRef) {
+ auto *cisec = cast<CStringInputSection>(def->isec);
+ auto methname = cisec->getStringRefAtOffset(def->value);
+ ConcatInputSection *selRef = ObjCSelRefsHelper::getSelRef(methname);
+ assert(selRef && "Expected all selector names to already be already be "
+ "present in __objc_selrefs");
+ symVA = selRef->getVA();
+ assert(selRef->data.size() == sizeof(target->wordSize) &&
+ "Expected one selref per ConcatInputSection");
+ }
+
+ uint32_t currentVA = isec->getVA() + outSecOff;
+ uint32_t delta = symVA - currentVA;
+ write32le(buf + outSecOff, delta);
+
+ // Move one pointer forward in the absolute method list
+ inSecOff += target->wordSize;
+ // Move one relative offset forward in the relative method list (32 bits)
+ outSecOff += relativeOffsetSize;
+}
+
+// Write a relative method list to buf, return the size of the written
+// information
+uint32_t
+ObjCMethListSection::writeRelativeMethodList(const ConcatInputSection *isec,
+ uint8_t *buf) const {
+ // Copy over the header, and add the "this is a relative method list" magic
+ // value flag
+ uint32_t structSizeAndFlags = 0, structCount = 0;
+ readMethodListHeader(isec->data.data(), structSizeAndFlags, structCount);
+ // Set the struct size for the relative method list
+ uint32_t relativeStructSizeAndFlags =
+ (relativeOffsetSize * pointersPerStruct) & structSizeMask;
+ // Carry over the old flags from the input struct
+ relativeStructSizeAndFlags |= structSizeAndFlags & structFlagsMask;
+ // Set the relative method list flag
+ relativeStructSizeAndFlags |= relMethodHeaderFlag;
+
+ writeMethodListHeader(buf, relativeStructSizeAndFlags, structCount);
+
+ assert(methodListHeaderSize +
+ (structCount * pointersPerStruct * target->wordSize) ==
+ isec->data.size() &&
+ "Invalid computed ObjC method list size");
+
+ uint32_t inSecOff = methodListHeaderSize;
+ uint32_t outSecOff = methodListHeaderSize;
+
+ // Go through the method list and encode input absolute pointers as relative
+ // offsets. writeRelativeOffsetForIsec will be incrementing inSecOff and
+ // outSecOff
+ for (uint32_t i = 0; i < structCount; i++) {
+ // Write the name of the method
+ writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, true);
+ // Write the type of the method
+ writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);
+ // Write reference to the selector of the method
+ writeRelativeOffsetForIsec(isec, buf, inSecOff, outSecOff, false);
+ }
+
+ // Expecting to have read all the data in the isec
+ assert(inSecOff == isec->data.size() &&
+ "Invalid actual ObjC method list size");
+ assert(
+ outSecOff == computeRelativeMethodListSize(inSecOff) &&
+ "Mismatch between input & output size when writing relative method list");
+ return outSecOff;
+}
+
+// Given the size of an ObjC method list InputSection, return the size of the
+// method list when encoded in relative offsets format. We can do this without
+// decoding the actual data, as it can be directly inferred from the size of the
+// isec.
+uint32_t ObjCMethListSection::computeRelativeMethodListSize(
+ uint32_t absoluteMethodListSize) const {
+ uint32_t oldPointersSize = absoluteMethodListSize - methodListHeaderSize;
+ uint32_t pointerCount = oldPointersSize / target->wordSize;
+ assert(((pointerCount % pointersPerStruct) == 0) &&
+ "__objc_methlist expects method lists to have multiple-of-3 pointers");
+
+ uint32_t newPointersSize = pointerCount * relativeOffsetSize;
+ uint32_t newTotalSize = methodListHeaderSize + newPointersSize;
+
+ assert((newTotalSize <= absoluteMethodListSize) &&
+ "Expected relative method list size to be smaller or equal than "
+ "original size");
+ return newTotalSize;
+}
+
+// Read a method list header from buf
+void ObjCMethListSection::readMethodListHeader(const uint8_t *buf,
+ uint32_t &structSizeAndFlags,
+ uint32_t &structCount) const {
+ structSizeAndFlags = read32le(buf);
+ structCount = read32le(buf + sizeof(uint32_t));
+}
+
+// Write a method list header to buf
+void ObjCMethListSection::writeMethodListHeader(uint8_t *buf,
+ uint32_t structSizeAndFlags,
+ uint32_t structCount) const {
+ write32le(buf, structSizeAndFlags);
+ write32le(buf + sizeof(structSizeAndFlags), structCount);
+}
+
void macho::createSyntheticSymbols() {
auto addHeaderSymbol = [](const char *name) {
symtab->addSynthetic(name, in.header->isec, /*value=*/0,
diff --git a/lld/MachO/SyntheticSections.h b/lld/MachO/SyntheticSections.h
index 6d85f0aea8e0..e8fadfef56d4 100644
--- a/lld/MachO/SyntheticSections.h
+++ b/lld/MachO/SyntheticSections.h
@@ -315,24 +315,16 @@ public:
Defined *dyldPrivate = nullptr;
};
-class ObjCSelRefsSection final : public SyntheticSection {
+class ObjCSelRefsHelper {
public:
- ObjCSelRefsSection();
- void initialize();
-
- // This SyntheticSection does not do directly write data to the output, it is
- // just a placeholder for easily creating SyntheticInputSection's which will
- // be inserted into inputSections and handeled by the default writing
- // mechanism.
- uint64_t getSize() const override { return 0; }
- bool isNeeded() const override { return false; }
- void writeTo(uint8_t *buf) const override {}
+ static void initialize();
+ static void cleanup();
- ConcatInputSection *getSelRef(StringRef methname);
- ConcatInputSection *makeSelRef(StringRef methname);
+ static ConcatInputSection *getSelRef(StringRef methname);
+ static ConcatInputSection *makeSelRef(StringRef methname);
private:
- llvm::DenseMap<llvm::CachedHashStringRef, ConcatInputSection *>
+ static llvm::DenseMap<llvm::CachedHashStringRef, ConcatInputSection *>
methnameToSelref;
};
@@ -692,6 +684,54 @@ private:
std::vector<ConcatInputSection *> sections;
};
+// This SyntheticSection is for the __objc_methlist section, which contains
+// relative method lists if the -objc_relative_method_lists option is enabled.
+class ObjCMethListSection final : public SyntheticSection {
+public:
+ ObjCMethListSection();
+
+ static bool isMethodList(const ConcatInputSection *isec);
+ void addInput(ConcatInputSection *isec) { inputs.push_back(isec); }
+ std::vector<ConcatInputSection *> getInputs() { return inputs; }
+
+ void setUp();
+ void finalize() override;
+ bool isNeeded() const override { return !inputs.empty(); }
+ uint64_t getSize() const override { return sectionSize; }
+ void writeTo(uint8_t *bufStart) const override;
+
+private:
+ void readMethodListHeader(const uint8_t *buf, uint32_t &structSizeAndFlags,
+ uint32_t &structCount) const;
+ void writeMethodListHeader(uint8_t *buf, uint32_t structSizeAndFlags,
+ uint32_t structCount) const;
+ uint32_t computeRelativeMethodListSize(uint32_t absoluteMethodListSize) const;
+ void writeRelativeOffsetForIsec(const ConcatInputSection *isec, uint8_t *buf,
+ uint32_t &inSecOff, uint32_t &outSecOff,
+ bool useSelRef) const;
+ uint32_t writeRelativeMethodList(const ConcatInputSection *isec,
+ uint8_t *buf) const;
+
+ static constexpr uint32_t methodListHeaderSize =
+ /*structSizeAndFlags*/ sizeof(uint32_t) +
+ /*structCount*/ sizeof(uint32_t);
+ // Relative method lists are supported only for 3-pointer method lists
+ static constexpr uint32_t pointersPerStruct = 3;
+ // The runtime identifies relative method lists via this magic value
+ static constexpr uint32_t relMethodHeaderFlag = 0x80000000;
+ // In the method list header, the first 2 bytes are the size of struct
+ static constexpr uint32_t structSizeMask = 0x0000FFFF;
+ // In the method list header, the last 2 bytes are the flags for the struct
+ static constexpr uint32_t structFlagsMask = 0xFFFF0000;
+ // Relative method lists have 4 byte alignment as all data in the InputSection
+ // is 4 byte
+ static constexpr uint32_t relativeOffsetSize = sizeof(uint32_t);
+
+ // The output size of the __objc_methlist section, computed during finalize()
+ uint32_t sectionSize = 0;
+ std::vector<ConcatInputSection *> inputs;
+};
+
// Chained fixups are a replacement for classic dyld opcodes. In this format,
// most of the metadata necessary for binding symbols and rebasing addresses is
// stored directly in the memory location that will have the fixup applied.
@@ -813,12 +853,12 @@ struct InStruct {
LazyPointerSection *lazyPointers = nullptr;
StubsSection *stubs = nullptr;
StubHelperSection *stubHelper = nullptr;
- ObjCSelRefsSection *objcSelRefs = nullptr;
ObjCStubsSection *objcStubs = nullptr;
UnwindInfoSection *unwindInfo = nullptr;
ObjCImageInfoSection *objCImageInfo = nullptr;
ConcatInputSection *imageLoaderCache = nullptr;
InitOffsetsSection *initOffsets = nullptr;
+ ObjCMethListSection *objcMethList = nullptr;
ChainedFixupsSection *chainedFixups = nullptr;
};
diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp
index 8f335188e12c..1c054912551e 100644
--- a/lld/MachO/Writer.cpp
+++ b/lld/MachO/Writer.cpp
@@ -720,7 +720,7 @@ static void addNonWeakDefinition(const Defined *defined) {
void Writer::scanSymbols() {
TimeTraceScope timeScope("Scan symbols");
- in.objcSelRefs->initialize();
+ ObjCSelRefsHelper::initialize();
for (Symbol *sym : symtab->getSymbols()) {
if (auto *defined = dyn_cast<Defined>(sym)) {
if (!defined->isLive())
@@ -1292,6 +1292,8 @@ template <class LP> void Writer::run() {
scanSymbols();
if (in.objcStubs->isNeeded())
in.objcStubs->setUp();
+ if (in.objcMethList->isNeeded())
+ in.objcMethList->setUp();
scanRelocations();
if (in.initOffsets->isNeeded())
in.initOffsets->setUp();
@@ -1359,11 +1361,11 @@ void macho::createSyntheticSections() {
in.got = make<GotSection>();
in.tlvPointers = make<TlvPointerSection>();
in.stubs = make<StubsSection>();
- in.objcSelRefs = make<ObjCSelRefsSection>();
in.objcStubs = make<ObjCStubsSection>();
in.unwindInfo = makeUnwindInfoSection();
in.objCImageInfo = make<ObjCImageInfoSection>();
in.initOffsets = make<InitOffsetsSection>();
+ in.objcMethList = make<ObjCMethListSection>();
// This section contains space for just a single word, and will be used by
// dyld to cache an address to the image loader it uses.
@@ -1373,9 +1375,7 @@ void macho::createSyntheticSections() {
segment_names::data, section_names::data, S_REGULAR,
ArrayRef<uint8_t>{arr, target->wordSize},
/*align=*/target->wordSize);
- // References from dyld are not visible to us, so ensure this section is
- // always treated as live.
- in.imageLoaderCache->live = true;
+ assert(in.imageLoaderCache->live);
}
OutputSection *macho::firstTLVDataSection = nullptr;
diff --git a/lld/MinGW/Driver.cpp b/lld/MinGW/Driver.cpp
index efd643f9a322..bb08c77b2e11 100644
--- a/lld/MinGW/Driver.cpp
+++ b/lld/MinGW/Driver.cpp
@@ -455,6 +455,8 @@ bool link(ArrayRef<const char *> argsArr, llvm::raw_ostream &stdoutOS,
add("-lldemit:llvm");
if (args.hasArg(OPT_lto_emit_asm))
add("-lldemit:asm");
+ if (auto *arg = args.getLastArg(OPT_lto_sample_profile))
+ add("-lto-sample-profile:" + StringRef(arg->getValue()));
if (auto *a = args.getLastArg(OPT_thinlto_cache_dir))
add("-lldltocache:" + StringRef(a->getValue()));
diff --git a/lld/MinGW/Options.td b/lld/MinGW/Options.td
index 9a0a96aac7f1..56f67e3dd96c 100644
--- a/lld/MinGW/Options.td
+++ b/lld/MinGW/Options.td
@@ -160,6 +160,8 @@ def lto_cs_profile_file: JJ<"lto-cs-profile-file=">,
HelpText<"Context sensitive profile file path">;
def lto_emit_asm: FF<"lto-emit-asm">,
HelpText<"Emit assembly code">;
+def lto_sample_profile: JJ<"lto-sample-profile=">,
+ HelpText<"Sample profile file path">;
def thinlto_cache_dir: JJ<"thinlto-cache-dir=">,
HelpText<"Path to ThinLTO cached object file directory">;
diff --git a/lld/test/COFF/export.test b/lld/test/COFF/export.test
index d340e0174b56..041b328dec9c 100644
--- a/lld/test/COFF/export.test
+++ b/lld/test/COFF/export.test
@@ -76,18 +76,55 @@ SYMTAB: exportfn3 in export.test.tmp.DLL
# RUN: lld-link /out:%t.dll /dll %t.obj /export:foo=kernel32.foobar
# RUN: llvm-objdump -p %t.dll | FileCheck --check-prefix=FORWARDER %s
+# RUN: llvm-nm -M %t.lib | FileCheck --check-prefix=SYMTAB-FWD %s
# RUN: echo "EXPORTS foo=kernel32.foobar" > %t.def
-# RUN: lld-link /out:%t.dll /dll %t.obj /def:%t.def
-# RUN: llvm-objdump -p %t.dll | FileCheck --check-prefix=FORWARDER %s
+# RUN: lld-link /out:%t-def.dll /dll %t.obj /def:%t.def
+# RUN: llvm-objdump -p %t-def.dll | FileCheck --check-prefix=FORWARDER %s
+# RUN: llvm-nm -M %t-def.lib | FileCheck --check-prefix=SYMTAB-FWD %s
FORWARDER: Export Table:
-FORWARDER: DLL name: export.test.tmp.dll
+FORWARDER: DLL name: export.test.tmp
FORWARDER: Ordinal base: 1
FORWARDER: Ordinal RVA Name
FORWARDER: 1 0x1010 exportfn
FORWARDER: 2 foo (forwarded to kernel32.foobar)
+SYMTAB-FWD: __imp_exportfn3 in export.test.tmp
+SYMTAB-FWD-NEXT: __imp_foo in export.test.tmp
+SYMTAB-FWD-NEXT: exportfn3 in export.test.tmp
+SYMTAB-FWD-NEXT: foo in export.test.tmp
+
+# RUN: lld-link /out:%t-fwd-priv.dll /dll %t.obj /export:foo=kernel32.foobar,DATA,PRIVATE
+# RUN: llvm-objdump -p %t-fwd-priv.dll | FileCheck --check-prefix=FORWARDER %s
+# RUN: llvm-nm -M %t-fwd-priv.lib | FileCheck --check-prefix=SYMTAB-FWD-PRIV %s
+
+SYMTAB-FWD-PRIV: __imp_exportfn3 in export.test.tmp-fwd-priv
+SYMTAB-FWD-PRIV-NOT: __imp_foo
+SYMTAB-FWD-PRIV-NEXT: exportfn3 in export.test.tmp-fwd-priv
+SYMTAB-FWD-PRIV-NOT: foo
+
+# RUN: echo "EXPORTS foo=kernel32.foobar DATA PRIVATE" > %t-fwd-priv.def
+# RUN: lld-link /out:%t-fwd-priv-def.dll /dll %t.obj /def:%t-fwd-priv.def
+# RUN: llvm-objdump -p %t-fwd-priv-def.dll | FileCheck --check-prefix=FORWARDER %s
+# RUN: llvm-nm -M %t-fwd-priv-def.lib | FileCheck --check-prefix=SYMTAB-FWD-PRIV %s
+
+# RUN: lld-link /out:%t-fwd-ord.dll /dll %t.obj /export:foo=kernel32.foobar,@3,NONAME
+# RUN: llvm-objdump -p %t-fwd-ord.dll | FileCheck --check-prefix=FORWARDER-ORD %s
+# RUN: llvm-nm -M %t-fwd-ord.lib | FileCheck --check-prefix=SYMTAB-FWD %s
+
+FORWARDER-ORD: Export Table:
+FORWARDER-ORD-NEXT: DLL name: export.test.tmp-fwd-ord
+FORWARDER-ORD-NEXT: Ordinal base: 3
+FORWARDER-ORD-NEXT: Ordinal RVA Name
+FORWARDER-ORD-NEXT: 3 (forwarded to kernel32.foobar)
+FORWARDER-ORD-NEXT: 4 0x1010 exportfn3
+
+# RUN: echo "EXPORTS foo=kernel32.foobar @3 NONAME" > %t-fwd-ord.def
+# RUN: lld-link /out:%t-fwd-ord-def.dll /dll %t.obj /def:%t-fwd-ord.def
+# RUN: llvm-objdump -p %t-fwd-ord-def.dll | FileCheck --check-prefix=FORWARDER-ORD %s
+# RUN: llvm-nm -M %t-fwd-ord-def.lib | FileCheck --check-prefix=SYMTAB-FWD %s
+
# RUN: lld-link /out:%t.dll /dll %t.obj /merge:.rdata=.text /export:exportfn1 /export:exportfn2
# RUN: llvm-objdump -p %t.dll | FileCheck --check-prefix=MERGE --match-full-lines %s
diff --git a/lld/test/COFF/exportas.test b/lld/test/COFF/exportas.test
index c0295c3d7fb7..d70547c39b40 100644
--- a/lld/test/COFF/exportas.test
+++ b/lld/test/COFF/exportas.test
@@ -9,6 +9,77 @@ RUN: lld-link -out:out1.dll -dll -noentry test.obj test.lib
RUN: llvm-readobj --coff-imports out1.dll | FileCheck --check-prefix=IMPORT %s
IMPORT: Symbol: expfunc
+Pass -export argument with EXPORTAS.
+
+RUN: llvm-mc -filetype=obj -triple=x86_64-windows func.s -o func.obj
+RUN: lld-link -out:out2.dll -dll -noentry func.obj -export:func,EXPORTAS,expfunc
+RUN: llvm-readobj --coff-exports out2.dll | FileCheck --check-prefix=EXPORT %s
+EXPORT: Name: expfunc
+
+RUN: llvm-readobj out2.lib | FileCheck --check-prefix=IMPLIB %s
+IMPLIB: Name type: export as
+IMPLIB-NEXT: Export name: expfunc
+IMPLIB-NEXT: Symbol: __imp_func
+IMPLIB-NEXT: Symbol: func
+
+Use .drectve section with EXPORTAS.
+
+RUN: llvm-mc -filetype=obj -triple=x86_64-windows drectve.s -o drectve.obj
+RUN: lld-link -out:out3.dll -dll -noentry func.obj drectve.obj
+RUN: llvm-readobj --coff-exports out3.dll | FileCheck --check-prefix=EXPORT %s
+RUN: llvm-readobj out3.lib | FileCheck --check-prefix=IMPLIB %s
+
+Use a .def file with EXPORTAS.
+
+RUN: lld-link -out:out4.dll -dll -noentry func.obj -def:test.def
+RUN: llvm-readobj --coff-exports out4.dll | FileCheck --check-prefix=EXPORT %s
+RUN: llvm-readobj out4.lib | FileCheck --check-prefix=IMPLIB %s
+
+Use a .def file with EXPORTAS in a forwarding export.
+
+RUN: lld-link -out:out5.dll -dll -noentry func.obj -def:test2.def
+RUN: llvm-readobj --coff-exports out5.dll | FileCheck --check-prefix=FORWARD-EXPORT %s
+FORWARD-EXPORT: Export {
+FORWARD-EXPORT-NEXT: Ordinal: 1
+FORWARD-EXPORT-NEXT: Name: expfunc
+FORWARD-EXPORT-NEXT: ForwardedTo: otherdll.otherfunc
+FORWARD-EXPORT-NEXT: }
+
+RUN: llvm-readobj out5.lib | FileCheck --check-prefix=FORWARD-IMPLIB %s
+FORWARD-IMPLIB: Name type: export as
+FORWARD-IMPLIB-NEXT: Export name: expfunc
+FORWARD-IMPLIB-NEXT: Symbol: __imp_func
+FORWARD-IMPLIB-NEXT: Symbol: func
+
+Pass -export argument with EXPORTAS in a forwarding export.
+
+RUN: lld-link -out:out6.dll -dll -noentry func.obj -export:func=otherdll.otherfunc,EXPORTAS,expfunc
+RUN: llvm-readobj --coff-exports out6.dll | FileCheck --check-prefix=FORWARD-EXPORT %s
+RUN: llvm-readobj out6.lib | FileCheck --check-prefix=FORWARD-IMPLIB %s
+
+Pass -export argument with EXPORTAS in a data export.
+
+RUN: lld-link -out:out7.dll -dll -noentry func.obj -export:func,DATA,@5,EXPORTAS,expfunc
+RUN: llvm-readobj --coff-exports out7.dll | FileCheck --check-prefix=ORD %s
+ORD: Ordinal: 5
+ORD-NEXT: Name: expfunc
+
+RUN: llvm-readobj out7.lib | FileCheck --check-prefix=ORD-IMPLIB %s
+ORD-IMPLIB: Type: data
+ORD-IMPLIB-NEXT: Name type: export as
+ORD-IMPLIB-NEXT: Export name: expfunc
+ORD-IMPLIB-NEXT: Symbol: __imp_func
+
+Check invalid EXPORTAS syntax.
+
+RUN: not lld-link -out:err1.dll -dll -noentry func.obj -export:func,EXPORTAS, 2>&1 | \
+RUN: FileCheck --check-prefix=ERR1 %s
+ERR1: error: invalid EXPORTAS value: {{$}}
+
+RUN: not lld-link -out:err2.dll -dll -noentry func.obj -export:func,EXPORTAS,expfunc,DATA 2>&1 | \
+RUN: FileCheck --check-prefix=ERR2 %s
+ERR2: error: invalid EXPORTAS value: expfunc,DATA
+
#--- test.s
.section ".test", "rd"
.rva __imp_func
@@ -17,3 +88,20 @@ IMPORT: Symbol: expfunc
LIBRARY test.dll
EXPORTS
func EXPORTAS expfunc
+
+#--- test2.def
+LIBRARY test.dll
+EXPORTS
+ func=otherdll.otherfunc EXPORTAS expfunc
+
+#--- func.s
+ .text
+ .globl func
+ .p2align 2, 0x0
+func:
+ movl $1, %eax
+ retq
+
+#--- drectve.s
+ .section .drectve, "yn"
+ .ascii " -export:func,EXPORTAS,expfunc"
diff --git a/lld/test/ELF/aarch64-gnu-ifunc-nonpreemptable.s b/lld/test/ELF/aarch64-gnu-ifunc-nonpreemptable.s
index 4f33dde9d1a9..54a27a8cc0d9 100644
--- a/lld/test/ELF/aarch64-gnu-ifunc-nonpreemptable.s
+++ b/lld/test/ELF/aarch64-gnu-ifunc-nonpreemptable.s
@@ -65,14 +65,14 @@ main:
# PIE-EMPTY:
# PIE-NEXT: <myfunc>:
# PIE-NEXT: 10270: adrp x16, 0x30000
-# PIE-NEXT: 10274: ldr x17, [x16, #896]
-# PIE-NEXT: 10278: add x16, x16, #896
-# PIE-NEXT: 1027c: br x17
+# PIE-NEXT: ldr x17, [x16, #832]
+# PIE-NEXT: add x16, x16, #832
+# PIE-NEXT: br x17
# PIE-RELOC: .rela.dyn {
-# PIE-RELOC-NEXT: 0x30380 R_AARCH64_IRELATIVE - 0x10260
+# PIE-RELOC-NEXT: 0x30340 R_AARCH64_IRELATIVE - 0x10260
# PIE-RELOC-NEXT: }
# PIE-RELOC: Hex dump of section '.got.plt':
-# NO-APPLY: 0x00030380 00000000 00000000
-# APPLY: 0x00030380 60020100 00000000
+# NO-APPLY: 0x00030340 00000000 00000000
+# APPLY: 0x00030340 60020100 00000000
# PIE-RELOC-EMPTY:
diff --git a/lld/test/ELF/aarch64-gnu-ifunc.s b/lld/test/ELF/aarch64-gnu-ifunc.s
index dee24779d913..d76b54eabf8a 100644
--- a/lld/test/ELF/aarch64-gnu-ifunc.s
+++ b/lld/test/ELF/aarch64-gnu-ifunc.s
@@ -11,13 +11,12 @@
// CHECK-NEXT: Type: SHT_RELA
// CHECK-NEXT: Flags [
// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: SHF_INFO_LINK
// CHECK-NEXT: ]
// CHECK-NEXT: Address: [[RELA:.*]]
// CHECK-NEXT: Offset: 0x158
// CHECK-NEXT: Size: 48
// CHECK-NEXT: Link: 0
-// CHECK-NEXT: Info: 4
+// CHECK-NEXT: Info: 0
// CHECK-NEXT: AddressAlignment: 8
// CHECK-NEXT: EntrySize: 24
// CHECK-NEXT: }
diff --git a/lld/test/ELF/allow-multiple-definition.s b/lld/test/ELF/allow-multiple-definition.s
index 492784a3601d..96fa2627e1bf 100644
--- a/lld/test/ELF/allow-multiple-definition.s
+++ b/lld/test/ELF/allow-multiple-definition.s
@@ -9,6 +9,9 @@
# RUN: llvm-objdump --no-print-imm-hex -d %t3 | FileCheck %s
# RUN: llvm-objdump --no-print-imm-hex -d %t4 | FileCheck --check-prefix=REVERT %s
+# RUN: ld.lld --noinhibit-exec %t2 %t1 -o /dev/null 2>&1 | FileCheck %s --check-prefix=WARN
+# WARN: warning: duplicate symbol: _bar
+
# RUN: ld.lld -z muldefs --fatal-warnings %t1 %t2 -o %t3
# RUN: ld.lld -z muldefs --fatal-warnings %t2 %t1 -o %t4
# RUN: llvm-objdump --no-print-imm-hex -d %t3 | FileCheck %s
diff --git a/lld/test/ELF/allow-shlib-undefined.s b/lld/test/ELF/allow-shlib-undefined.s
index 4b7151c8bc0d..c69c1ea20ce3 100644
--- a/lld/test/ELF/allow-shlib-undefined.s
+++ b/lld/test/ELF/allow-shlib-undefined.s
@@ -31,10 +31,12 @@
## Test some cases when a relocatable object file provides a non-exported definition.
# RUN: not ld.lld main.o a.so def-hidden.o -o /dev/null 2>&1 | FileCheck %s --check-prefix=NONEXPORTED
+# RUN: not ld.lld main.o def-hidden.o a.so -o /dev/null 2>&1 | FileCheck %s --check-prefix=NONEXPORTED
# RUN: not ld.lld main.o a.so def-hidden.o -shared --no-allow-shlib-undefined -o /dev/null 2>&1 | FileCheck %s --check-prefix=NONEXPORTED
# RUN: ld.lld main.o a.so def-hidden.o --allow-shlib-undefined --fatal-warnings -o /dev/null
## Test a relocatable object file definition that is converted to STB_LOCAL.
# RUN: not ld.lld main.o a.so def-hidden.o --version-script=local.ver -o /dev/null 2>&1 | FileCheck %s --check-prefix=NONEXPORTED
+# RUN: not ld.lld main.o def-hidden.o a.so --version-script=local.ver -o /dev/null 2>&1 | FileCheck %s --check-prefix=NONEXPORTED
## The section containing the definition is discarded, and we report an error.
# RUN: not ld.lld --gc-sections main.o a.so def-hidden.o -o /dev/null 2>&1 | FileCheck %s
diff --git a/lld/test/ELF/arm-gnu-ifunc.s b/lld/test/ELF/arm-gnu-ifunc.s
index 562478256fd8..d49ca18e991e 100644
--- a/lld/test/ELF/arm-gnu-ifunc.s
+++ b/lld/test/ELF/arm-gnu-ifunc.s
@@ -30,13 +30,12 @@ _start:
// CHECK-NEXT: Type: SHT_REL
// CHECK-NEXT: Flags [
// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: SHF_INFO_LINK
// CHECK-NEXT: ]
// CHECK-NEXT: Address: 0x100F4
// CHECK-NEXT: Offset: 0xF4
// CHECK-NEXT: Size: 16
// CHECK-NEXT: Link:
-// CHECK-NEXT: Info: 4
+// CHECK-NEXT: Info: 0
// CHECK: Name: .iplt
// CHECK-NEXT: Type: SHT_PROGBITS
// CHECK-NEXT: Flags [
diff --git a/lld/test/ELF/common-gc2.s b/lld/test/ELF/common-gc2.s
index fec1c4be86b5..1ecaef7d9af5 100644
--- a/lld/test/ELF/common-gc2.s
+++ b/lld/test/ELF/common-gc2.s
@@ -1,7 +1,9 @@
# REQUIRES: x86
-# RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux %s -o %t
-# RUN: ld.lld -gc-sections -export-dynamic %t -o %t1
-# RUN: llvm-readobj --dyn-symbols %t1 | FileCheck %s
+# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o %t.o
+# RUN: llvm-mc -filetype=obj -triple=x86_64 /dev/null -o %t2.o
+# RUN: ld.lld -shared -soname=t2 %t2.o -o %t2.so
+# RUN: ld.lld -gc-sections -export-dynamic %t.o %t2.so -o %t
+# RUN: llvm-readobj --dyn-symbols %t | FileCheck %s
# CHECK: Name: bar
# CHECK: Name: foo
diff --git a/lld/test/ELF/driver.test b/lld/test/ELF/driver.test
index b8f4584a3267..49deb902aa0f 100644
--- a/lld/test/ELF/driver.test
+++ b/lld/test/ELF/driver.test
@@ -3,55 +3,47 @@
# RUN: not ld.lld --unknown1 --unkn=own2 -m foo /no/such/file -lnosuchlib \
# RUN: 2>&1 | FileCheck -check-prefix=UNKNOWN %s
-# UNKNOWN: unknown argument '--unknown1'
-# UNKNOWN: unknown argument '--unkn=own2'
-# UNKNOWN: unknown emulation: foo
-# UNKNOWN: cannot open /no/such/file
-# UNKNOWN: unable to find library -lnosuchlib
+# UNKNOWN: error: unknown argument '--unknown1'
+# UNKNOWN: error: unknown argument '--unkn=own2'
+# UNKNOWN: error: unknown emulation: foo
+# UNKNOWN: error: cannot open /no/such/file
+# UNKNOWN: error: unable to find library -lnosuchlib
# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t
# RUN: not ld.lld %t -o /no/such/file 2>&1 | FileCheck -check-prefix=MISSING %s
-# MISSING: cannot open output file /no/such/file
+# MISSING: error: cannot open output file /no/such/file
# RUN: ld.lld --help 2>&1 | FileCheck -check-prefix=HELP %s
# HELP: USAGE:
# HELP: : supported targets:{{.*}} elf
# RUN: not ld.lld --versin 2>&1 | FileCheck -check-prefix=SPELLVERSION %s
-# SPELLVERSION: unknown argument '--versin', did you mean '--version'
+# SPELLVERSION: error: unknown argument '--versin', did you mean '--version'
## Attempt to link DSO with -r
# RUN: ld.lld -shared %t -o %t.so
# RUN: not ld.lld -r %t.so %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR %s
-# ERR: attempted static link of dynamic object
+# ERR: error: attempted static link of dynamic object
-## Attempt to use -r and -shared together
-# RUN: not ld.lld -r -shared %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR2 %s
-# ERR2: -r and -shared may not be used together
+# RUN: not ld.lld -r -shared -pie --export-dynamic %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR2 %s
+# ERR2: error: -r and -shared may not be used together
+# ERR2: error: -r and -pie may not be used together
+# ERR2: error: -r and --export-dynamic may not be used together
-## Attempt to use -r and --gdb-index together
-# RUN: not ld.lld -r --gdb-index %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR4 %s
-# ERR4: -r and --gdb-index may not be used together
+# RUN: not ld.lld -r --icf=all --gdb-index %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR4 %s
+# ERR4: error: -r and --gdb-index may not be used together
+# ERR4: error: -r and --icf may not be used together
-## Attempt to use -r and --icf together
-# RUN: not ld.lld -r --icf=all %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR5 %s
-# ERR5: -r and --icf may not be used together
-
-## Attempt to use -r and -pie together
-# RUN: not ld.lld -r -pie %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR6 %s
-# ERR6: -r and -pie may not be used together
-
-## Attempt to use -shared and -pie together
# RUN: not ld.lld -shared -pie %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR7 %s
-# ERR7: -shared and -pie may not be used together
+# ERR7: error: -shared and -pie may not be used together
## "--output=foo" is equivalent to "-o foo".
# RUN: not ld.lld %t --output=/no/such/file 2>&1 | FileCheck -check-prefix=ERR8 %s
-# ERR8: cannot open output file /no/such/file
+# ERR8: error: cannot open output file /no/such/file
## "-output=foo" is equivalent to "-o utput=foo".
# RUN: not ld.lld %t -output=/no/such/file 2>&1 | FileCheck -check-prefix=ERR9 %s
-# ERR9: cannot open output file utput=/no/such/file
+# ERR9: error: cannot open output file utput=/no/such/file
# RUN: ld.lld %t -z foo -o /dev/null 2>&1 | FileCheck -check-prefix=ERR10 %s --implicit-check-not=warning:
# RUN: ld.lld %t -z foo -z rel -z rela -z max-page-size=1 -z common-page-size=1 -o /dev/null --version 2>&1 | \
@@ -68,10 +60,6 @@
# RUN: not ld.lld %t -z max-page-size 2>&1 | FileCheck -check-prefix=ERR11 %s
# ERR11: error: invalid max-page-size:
-## Attempt to use -r and --export-dynamic together
-# RUN: not ld.lld -r -export-dynamic %t -o /dev/null 2>&1 | FileCheck -check-prefix=ERR12 %s
-# ERR12: -r and --export-dynamic may not be used together
-
.globl _start
_start:
nop
diff --git a/lld/test/ELF/executable-undefined-ignoreall.s b/lld/test/ELF/executable-undefined-ignoreall.s
index cc38e17cdf61..073b22bd8454 100644
--- a/lld/test/ELF/executable-undefined-ignoreall.s
+++ b/lld/test/ELF/executable-undefined-ignoreall.s
@@ -7,8 +7,6 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t.o
# RUN: ld.lld %t.o -o %t --unresolved-symbols=ignore-all -pie
# RUN: llvm-readobj -r %t | FileCheck %s
-# RUN: ld.lld %t.o -o %t --unresolved-symbols=ignore-all --export-dynamic
-# RUN: llvm-readobj -r %t | FileCheck %s
# CHECK: Relocations [
# CHECK-NEXT: Section ({{.*}}) .rela.plt {
diff --git a/lld/test/ELF/gc-sections-with-provide.s b/lld/test/ELF/gc-sections-with-provide.s
new file mode 100644
index 000000000000..3e5b1b1efe6c
--- /dev/null
+++ b/lld/test/ELF/gc-sections-with-provide.s
@@ -0,0 +1,60 @@
+# REQUIRES: x86
+
+# This test verifies that garbage-collection is correctly garbage collecting
+# unused sections when the symbol of the unused section is only referred by
+# an unused PROVIDE symbol.
+
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=x86_64 a.s -o a.o
+# RUN: ld.lld -o a_nogc a.o -T script.t
+# RUN: llvm-nm a_nogc | FileCheck -check-prefix=NOGC %s
+# RUN: ld.lld -o a_gc a.o --gc-sections --print-gc-sections -T script.t | FileCheck --check-prefix=GC_LINK %s
+# RUN: llvm-nm a_gc | FileCheck -check-prefix=GC %s
+
+NOGC-NOT: another_unused
+NOGC: another_used
+NOGC: bar
+NOGC: baz
+NOGC: baz_ref
+NOGC: foo
+NOGC-NOT: unused
+NOGC: used
+
+GC_LINK: removing unused section a.o:(.text.bar)
+
+GC-NOT: another_unused
+GC: another_used
+GC-NOT: bar
+GC: baz
+GC: baz_ref
+GC: foo
+GC-NOT: unused
+GC: used
+
+#--- a.s
+.global _start
+_start:
+ call foo
+ call used
+
+.section .text.foo,"ax",@progbits
+foo:
+ nop
+
+.section .text.bar,"ax",@progbits
+.global bar
+bar:
+ nop
+
+.section .text.baz,"ax",@progbits
+.global baz
+baz:
+ nop
+
+
+#--- script.t
+PROVIDE(unused = bar + used);
+PROVIDE(used = another_used);
+PROVIDE(baz_ref = baz);
+PROVIDE(another_used = baz_ref);
+PROVIDE(another_unused = unused + bar + 0x1);
diff --git a/lld/test/ELF/gnu-ifunc-dyntags.s b/lld/test/ELF/gnu-ifunc-dyntags.s
index fd80dc24f2f8..57a17245a3e8 100644
--- a/lld/test/ELF/gnu-ifunc-dyntags.s
+++ b/lld/test/ELF/gnu-ifunc-dyntags.s
@@ -9,15 +9,13 @@
# CHECK: Name Size VMA
# CHECK: .rela.dyn 00000030 0000000000000248
-# CHECK: .got.plt 00000010 00000000000033b0
+# CHECK: .got.plt 00000010 0000000000003370
# TAGS: Tag Type Name/Value
# TAGS: 0x0000000000000007 RELA 0x248
# TAGS: 0x0000000000000008 RELASZ 48 (bytes)
-# TAGS: 0x0000000000000017 JMPREL 0x0
-# TAGS: 0x0000000000000002 PLTRELSZ 0 (bytes)
-# TAGS: 0x0000000000000003 PLTGOT 0x33B0
-# TAGS: 0x0000000000000014 PLTREL RELA
+# TAGS-NOT: JMPREL
+# TAGS-NOT: PLTREL
# TAGS: Relocations [
# TAGS-NEXT: Section {{.*}} .rela.dyn {
diff --git a/lld/test/ELF/gnu-ifunc-i386.s b/lld/test/ELF/gnu-ifunc-i386.s
index b502fd6e9ae2..43b19b27ea4e 100644
--- a/lld/test/ELF/gnu-ifunc-i386.s
+++ b/lld/test/ELF/gnu-ifunc-i386.s
@@ -11,13 +11,12 @@
// CHECK-NEXT: Type: SHT_REL
// CHECK-NEXT: Flags [
// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: SHF_INFO_LINK
// CHECK-NEXT: ]
// CHECK-NEXT: Address: [[RELA:.*]]
// CHECK-NEXT: Offset: 0xD4
// CHECK-NEXT: Size: 16
// CHECK-NEXT: Link: 0
-// CHECK-NEXT: Info: 4
+// CHECK-NEXT: Info: 0
// CHECK-NEXT: AddressAlignment: 4
// CHECK-NEXT: EntrySize: 8
// CHECK-NEXT: }
diff --git a/lld/test/ELF/linkerscript/symbolreferenced.s b/lld/test/ELF/linkerscript/symbolreferenced.s
index ba7a7721ea96..6f583d20e276 100644
--- a/lld/test/ELF/linkerscript/symbolreferenced.s
+++ b/lld/test/ELF/linkerscript/symbolreferenced.s
@@ -21,11 +21,31 @@
# RUN: ld.lld -o chain -T chain.t a.o
# RUN: llvm-nm chain | FileCheck %s
-# CHECK: 0000000000001000 a f1
-# CHECK-NEXT: 0000000000001000 A f2
-# CHECK-NEXT: 0000000000001000 a g1
-# CHECK-NEXT: 0000000000001000 A g2
-# CHECK-NEXT: 0000000000001000 A newsym
+# CHECK-NOT: another_unused
+# CHECK: 0000000000007000 a f1
+# CHECK-NEXT: 0000000000007000 A f2
+# CHECK-NEXT: 0000000000007000 A f3
+# CHECK-NEXT: 0000000000007000 A f4
+# CHECK-NEXT: 0000000000006000 A f5
+# CHECK-NEXT: 0000000000003000 A f6
+# CHECK-NEXT: 0000000000001000 A f7
+# CHECK-NOT: g1
+# CHECK-NOT: g2
+# CHECK-NEXT: 0000000000007500 A newsym
+# CHECK: 0000000000002000 A u
+# CHECK-NOT: unused
+# CHECK-NEXT: 0000000000002000 A v
+# CHECK-NEXT: 0000000000002000 A w
+
+
+# RUN: ld.lld -o chain_with_cycle -T chain_with_cycle.t a.o
+# RUN: llvm-nm chain_with_cycle | FileCheck %s --check-prefix=CHAIN_WITH_CYCLE
+
+# CHAIN_WITH_CYCLE: 000 A f1
+# CHAIN_WITH_CYCLE: 000 A f2
+# CHAIN_WITH_CYCLE: 000 A f3
+# CHAIN_WITH_CYCLE: 000 A f4
+# CHAIN_WITH_CYCLE: 000 A newsym
# RUN: not ld.lld -T chain2.t a.o 2>&1 | FileCheck %s --check-prefix=ERR --implicit-check-not=error:
# ERR-COUNT-3: error: chain2.t:1: symbol not found: undef
@@ -40,13 +60,30 @@ patatino:
movl newsym, %eax
#--- chain.t
-PROVIDE(f2 = 0x1000);
+PROVIDE(f7 = 0x1000);
+PROVIDE(f5 = f6 + 0x3000);
+PROVIDE(f6 = f7 + 0x2000);
+PROVIDE(f4 = f5 + 0x1000);
+PROVIDE(f3 = f4);
+PROVIDE(f2 = f3);
PROVIDE_HIDDEN(f1 = f2);
-PROVIDE(newsym = f1);
+PROVIDE(newsym = f1 + 0x500);
+
+u = v;
+PROVIDE(w = 0x2000);
+PROVIDE(v = w);
PROVIDE(g2 = 0x1000);
PROVIDE_HIDDEN(g1 = g2);
PROVIDE(unused = g1);
+PROVIDE_HIDDEN(another_unused = g1);
+
+#--- chain_with_cycle.t
+PROVIDE(f1 = f2 + f3);
+PROVIDE(f2 = f3 + f4);
+PROVIDE(f3 = f4);
+PROVIDE(f4 = f1);
+PROVIDE(newsym = f1);
#--- chain2.t
PROVIDE(f2 = undef);
diff --git a/lld/test/ELF/lto/libcall-archive.ll b/lld/test/ELF/lto/libcall-archive.ll
index bd91d0391dc2..0f3d9c37d729 100644
--- a/lld/test/ELF/lto/libcall-archive.ll
+++ b/lld/test/ELF/lto/libcall-archive.ll
@@ -4,11 +4,15 @@
; RUN: llvm-as -o %t2.o %S/Inputs/libcall-archive.ll
; RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux -o %t3.o %S/Inputs/libcall-archive.s
; RUN: llvm-ar rcs %t.a %t2.o %t3.o
-; RUN: ld.lld -o %t %t.o %t.a
+; RUN: ld.lld --why-extract=%t.why.txt -o %t %t.o %t.a
+; RUN: FileCheck %s --input-file=%t.why.txt --check-prefix=CHECK-WHY
; RUN: llvm-nm %t | FileCheck %s
; RUN: ld.lld -o %t2 %t.o --start-lib %t2.o %t3.o --end-lib
; RUN: llvm-nm %t2 | FileCheck %s
+; CHECK-WHY: reference extracted symbol
+; CHECK-WHY-NEXT: <libcall> {{.*}}tmp.a({{.*}}tmp2.o) memcpy
+
; CHECK-NOT: T __sync_val_compare_and_swap_8
; CHECK: T _start
; CHECK: T memcpy
diff --git a/lld/test/ELF/pack-dyn-relocs-ifunc.s b/lld/test/ELF/pack-dyn-relocs-ifunc.s
new file mode 100644
index 000000000000..6168d06f99d9
--- /dev/null
+++ b/lld/test/ELF/pack-dyn-relocs-ifunc.s
@@ -0,0 +1,49 @@
+# REQUIRES: aarch64
+## Prior to Android V, there was a bug that caused RELR relocations to be
+## applied after packed relocations. This meant that resolvers referenced by
+## IRELATIVE relocations in the packed relocation section would read unrelocated
+## globals when --pack-relative-relocs=android+relr is enabled. Work around this
+## by placing IRELATIVE in .rela.plt.
+
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-android a.s -o a.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-android b.s -o b.o
+# RUN: ld.lld -shared b.o -o b.so
+# RUN: ld.lld -pie --pack-dyn-relocs=android+relr -z separate-loadable-segments a.o b.so -o a
+# RUN: llvm-readobj -r a | FileCheck %s
+# RUN: llvm-objdump -d a | FileCheck %s --check-prefix=ASM
+
+# CHECK: .relr.dyn {
+# CHECK-NEXT: 0x30000 R_AARCH64_RELATIVE -
+# CHECK-NEXT: }
+# CHECK: .rela.plt {
+# CHECK-NEXT: 0x30020 R_AARCH64_JUMP_SLOT bar 0x0
+# CHECK-NEXT: 0x30028 R_AARCH64_IRELATIVE - 0x10000
+# CHECK-NEXT: }
+
+# ASM: <.iplt>:
+# ASM-NEXT: adrp x16, 0x30000
+# ASM-NEXT: ldr x17, [x16, #0x28]
+# ASM-NEXT: add x16, x16, #0x28
+# ASM-NEXT: br x17
+
+#--- a.s
+.text
+.type foo, %gnu_indirect_function
+.globl foo
+foo:
+ ret
+
+.globl _start
+_start:
+ bl foo
+ bl bar
+
+.data
+.balign 8
+.quad .data
+
+#--- b.s
+.globl bar
+bar:
+ ret
diff --git a/lld/test/ELF/ppc32-ifunc-nonpreemptible-pic.s b/lld/test/ELF/ppc32-ifunc-nonpreemptible-pic.s
index a93f3cecb0c6..c9a0381b610a 100644
--- a/lld/test/ELF/ppc32-ifunc-nonpreemptible-pic.s
+++ b/lld/test/ELF/ppc32-ifunc-nonpreemptible-pic.s
@@ -10,16 +10,16 @@
# RUN: llvm-readelf -x .got2 %t | FileCheck --check-prefix=HEX2 %s
# RELOC: .rela.dyn {
-# RELOC-NEXT: 0x3024C R_PPC_RELATIVE - 0x101A0
-# RELOC-NEXT: 0x30250 R_PPC_IRELATIVE - 0x10188
+# RELOC-NEXT: 0x3022C R_PPC_RELATIVE - 0x101A0
+# RELOC-NEXT: 0x30230 R_PPC_IRELATIVE - 0x10188
# RELOC-NEXT: }
# SYM: 000101a0 0 FUNC GLOBAL DEFAULT {{.*}} func
# HEX: Hex dump of section '.got2':
-# HEX-NEXT: 0x0003024c 00000000 ....
+# HEX-NEXT: 0x0003022c 00000000 ....
# HEX2: Hex dump of section '.got2':
-# HEX2-NEXT: 0x0003024c 000101a0 ....
+# HEX2-NEXT: 0x0003022c 000101a0 ....
.section .got2,"aw"
.long func
diff --git a/lld/test/ELF/relro-non-contiguous-script-data.s b/lld/test/ELF/relro-non-contiguous-script-data.s
index fd485e89167f..530fc7c84eb9 100644
--- a/lld/test/ELF/relro-non-contiguous-script-data.s
+++ b/lld/test/ELF/relro-non-contiguous-script-data.s
@@ -1,19 +1,21 @@
// REQUIRES: x86
+// RUN: llvm-mc -filetype=obj -triple=x86_64 /dev/null -o %t2.o
+// RUN: ld.lld -shared -soname=t2 %t2.o -o %t2.so
// RUN: echo "SECTIONS { \
// RUN: .dynamic : { *(.dynamic) } \
// RUN: .non_ro : { . += 1; } \
// RUN: .jcr : { *(.jcr) } \
// RUN: } " > %t.script
// RUN: llvm-mc -filetype=obj -triple=x86_64-pc-linux %s -o %t.o
-// RUN: not ld.lld --export-dynamic %t.o -o /dev/null --script=%t.script 2>&1 | FileCheck %s
+// RUN: not ld.lld %t.o %t2.so -o /dev/null --script=%t.script 2>&1 | FileCheck %s
// RUN: echo "SECTIONS { \
// RUN: .dynamic : { *(.dynamic) } \
// RUN: .non_ro : { BYTE(1); } \
// RUN: .jcr : { *(.jcr) } \
// RUN: } " > %t2.script
-// RUN: not ld.lld --export-dynamic %t.o -o /dev/null --script=%t2.script 2>&1 | FileCheck %s
+// RUN: not ld.lld %t.o %t2.so -o /dev/null --script=%t2.script 2>&1 | FileCheck %s
// CHECK: error: section: .jcr is not contiguous with other relro sections
diff --git a/lld/test/ELF/riscv-ifunc-nonpreemptible.s b/lld/test/ELF/riscv-ifunc-nonpreemptible.s
index 21c607545110..eda5548eef8b 100644
--- a/lld/test/ELF/riscv-ifunc-nonpreemptible.s
+++ b/lld/test/ELF/riscv-ifunc-nonpreemptible.s
@@ -16,11 +16,11 @@
# RUN: llvm-objdump -d --no-show-raw-insn %t.64 | FileCheck --check-prefix=DIS64 %s
# RELOC32: .rela.dyn {
-# RELOC32-NEXT: 0x3220 R_RISCV_IRELATIVE - 0x117C
+# RELOC32-NEXT: 0x3200 R_RISCV_IRELATIVE - 0x117C
# RELOC32-NEXT: }
# RELOC32-LABEL: Hex dump of section '.got.plt':
-# NO-APPLY-RELOC32: 0x00003220 00000000
-# APPLY-RELOC32: 0x00003220 7c110000
+# NO-APPLY-RELOC32: 0x00003200 00000000
+# APPLY-RELOC32: 0x00003200 7c110000
# RELOC32-EMPTY:
# SYM32: 0001190 0 FUNC GLOBAL DEFAULT {{.*}} func
@@ -30,18 +30,18 @@
# DIS32-NEXT: addi a0, a0, 0x10
# DIS32: Disassembly of section .iplt:
# DIS32: <func>:
-## 32-bit: &.got.plt[func]-. = 0x3220-0x1190 = 4096*2+144
+## 32-bit: &.got.plt[func]-. = 0x3200-0x1190 = 4096*2+0x70
# DIS32-NEXT: 1190: auipc t3, 0x2
-# DIS32-NEXT: lw t3, 0x90(t3)
+# DIS32-NEXT: lw t3, 0x70(t3)
# DIS32-NEXT: jalr t1, t3
# DIS32-NEXT: nop
# RELOC64: .rela.dyn {
-# RELOC64-NEXT: 0x3380 R_RISCV_IRELATIVE - 0x1260
+# RELOC64-NEXT: 0x3340 R_RISCV_IRELATIVE - 0x1260
# RELOC64-NEXT: }
# RELOC64-LABEL: Hex dump of section '.got.plt':
-# NO-APPLY-RELOC64: 0x00003380 00000000 00000000
-# APPLY-RELOC64: 0x00003380 60120000 00000000
+# NO-APPLY-RELOC64: 0x00003340 00000000 00000000
+# APPLY-RELOC64: 0x00003340 60120000 00000000
# RELOC64-EMPTY:
# SYM64: 000000000001270 0 FUNC GLOBAL DEFAULT {{.*}} func
@@ -51,9 +51,9 @@
# DIS64-NEXT: addi a0, a0, 0xc
# DIS64: Disassembly of section .iplt:
# DIS64: <func>:
-## 64-bit: &.got.plt[func]-. = 0x3380-0x1270 = 4096*2+272
+## 64-bit: &.got.plt[func]-. = 0x3340-0x1270 = 4096*2+0xd0
# DIS64-NEXT: 1270: auipc t3, 0x2
-# DIS64-NEXT: ld t3, 0x110(t3)
+# DIS64-NEXT: ld t3, 0xd0(t3)
# DIS64-NEXT: jalr t1, t3
# DIS64-NEXT: nop
diff --git a/lld/test/ELF/riscv-tlsdesc-relax.s b/lld/test/ELF/riscv-tlsdesc-relax.s
index fb24317e6535..5718d4175be1 100644
--- a/lld/test/ELF/riscv-tlsdesc-relax.s
+++ b/lld/test/ELF/riscv-tlsdesc-relax.s
@@ -33,12 +33,14 @@
# GD64-NEXT: c.add a0, tp
# GD64-NEXT: jal {{.*}} <foo>
## &.got[c]-. = 0x20c0+8 - 0x1020 = 0x10a8
+# GD64-LABEL: <.Ltlsdesc_hi1>:
# GD64-NEXT: 1020: auipc a4, 0x1
# GD64-NEXT: ld a5, 0xa8(a4)
# GD64-NEXT: addi a0, a4, 0xa8
# GD64-NEXT: jalr t0, 0x0(a5)
# GD64-NEXT: c.add a0, tp
## &.got[c]-. = 0x20c0+8 - 0x1032 = 0x1096
+# GD64-LABEL: <.Ltlsdesc_hi2>:
# GD64-NEXT: 1032: auipc a6, 0x1
# GD64-NEXT: ld a7, 0x96(a6)
# GD64-NEXT: addi a0, a6, 0x96
@@ -64,6 +66,7 @@
# LE64-NEXT: jal {{.*}} <foo>
# LE64-NEXT: R_RISCV_JAL foo
# LE64-NEXT: R_RISCV_RELAX *ABS*
+# LE64-LABEL: <.Ltlsdesc_hi1>:
# LE64-NEXT: addi a0, zero, 0x7ff
# LE64-NEXT: R_RISCV_TLSDESC_HI20 b
# LE64-NEXT: R_RISCV_RELAX *ABS*
@@ -71,6 +74,7 @@
# LE64-NEXT: R_RISCV_TLSDESC_ADD_LO12 .Ltlsdesc_hi1
# LE64-NEXT: R_RISCV_TLSDESC_CALL .Ltlsdesc_hi1
# LE64-NEXT: c.add a0, tp
+# LE64-LABEL: <.Ltlsdesc_hi2>:
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: R_RISCV_TLSDESC_HI20 b
# LE64-NEXT: addi zero, zero, 0x0
@@ -93,9 +97,11 @@
# LE64A-NEXT: addi a0, a0, -0x479
# LE64A-NEXT: c.add a0, tp
# LE64A-NEXT: jal {{.*}} <foo>
+# LE64A-LABEL: <.Ltlsdesc_hi1>:
# LE64A-NEXT: lui a0, 0x2
# LE64A-NEXT: addi a0, a0, -0x479
# LE64A-NEXT: c.add a0, tp
+# LE64A-LABEL: <.Ltlsdesc_hi2>:
# LE64A-NEXT: addi zero, zero, 0x0
# LE64A-NEXT: addi zero, zero, 0x0
# LE64A-NEXT: lui a0, 0x2
@@ -115,10 +121,12 @@
# IE64-NEXT: c.add a0, tp
# IE64-NEXT: jal {{.*}} <foo>
## &.got[c]-. = 0x120e0+8 - 0x11018 = 0x10d0
+# IE64-LABEL: <.Ltlsdesc_hi1>:
# IE64-NEXT: 11018: auipc a0, 0x1
# IE64-NEXT: ld a0, 0xd0(a0)
# IE64-NEXT: c.add a0, tp
## &.got[c]-. = 0x120e0+8 - 0x1102a = 0x10be
+# IE64-LABEL: <.Ltlsdesc_hi2>:
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: 1102a: auipc a0, 0x1
diff --git a/lld/test/ELF/riscv-tlsdesc.s b/lld/test/ELF/riscv-tlsdesc.s
index c583e15cf30c..935ecbddfbff 100644
--- a/lld/test/ELF/riscv-tlsdesc.s
+++ b/lld/test/ELF/riscv-tlsdesc.s
@@ -29,11 +29,13 @@
# RUN: ld.lld -e 0 -z now a.32.o c.32.so -o a.32.ie
# RUN: llvm-objdump --no-show-raw-insn -M no-aliases -h -d a.32.ie | FileCheck %s --check-prefix=IE32
-# RUN: llvm-mc -triple=riscv64 -filetype=obj d.s -o d.64.o
-# RUN: not ld.lld -shared -soname=d.64.so -o d.64.so d.64.o 2>&1 | FileCheck %s --check-prefix=BADTLSLABEL
+## Prior to https://github.com/llvm/llvm-project/pull/85817 the local TLSDESC
+## labels would be marked STT_TLS, resulting in an error "has an STT_TLS symbol but doesn't have an SHF_TLS section"
+# RUN: llvm-mc -triple=riscv64 -filetype=obj d.s -o d.64.o
+# RUN: ld.lld -shared -soname=d.64.so -o d.64.so d.64.o --fatal-warnings
# RUN: llvm-mc -triple=riscv32 -filetype=obj d.s -o d.32.o --defsym ELF32=1
-# RUN: not ld.lld -shared -soname=d.32.so -o d.32.so d.32.o 2>&1 | FileCheck %s --check-prefix=BADTLSLABEL
+# RUN: ld.lld -shared -soname=d.32.so -o d.32.so d.32.o --fatal-warnings
# GD64-RELA: .rela.dyn {
# GD64-RELA-NEXT: 0x2408 R_RISCV_TLSDESC - 0x7FF
@@ -74,14 +76,14 @@
# GD64-NEXT: add a0, a0, tp
## &.got[b]-. = 0x23e0+40 - 0x12f4 = 0x1114
-# GD64-NEXT: 12f4: auipc a2, 0x1
+# GD64: 12f4: auipc a2, 0x1
# GD64-NEXT: ld a3, 0x114(a2)
# GD64-NEXT: addi a0, a2, 0x114
# GD64-NEXT: jalr t0, 0x0(a3)
# GD64-NEXT: add a0, a0, tp
## &.got[c]-. = 0x23e0+24 - 0x1308 = 0x10f0
-# GD64-NEXT: 1308: auipc a4, 0x1
+# GD64: 1308: auipc a4, 0x1
# GD64-NEXT: ld a5, 0xf0(a4)
# GD64-NEXT: addi a0, a4, 0xf0
# GD64-NEXT: jalr t0, 0x0(a5)
@@ -89,7 +91,7 @@
# NOREL: no relocations
-# LE64-LABEL: <.text>:
+# LE64-LABEL: <.Ltlsdesc_hi0>:
## st_value(a) = 8
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: addi zero, zero, 0x0
@@ -97,12 +99,14 @@
# LE64-NEXT: addi a0, zero, 0x8
# LE64-NEXT: add a0, a0, tp
## st_value(b) = 2047
+# LE64-LABEL: <.Ltlsdesc_hi1>:
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: addi a0, zero, 0x7ff
# LE64-NEXT: add a0, a0, tp
## st_value(c) = 2048
+# LE64-LABEL: <.Ltlsdesc_hi2>:
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: addi zero, zero, 0x0
# LE64-NEXT: lui a0, 0x1
@@ -116,18 +120,20 @@
# IE64: .got 00000010 00000000000123a8
## a and b are optimized to use LE. c is optimized to IE.
-# IE64-LABEL: <.text>:
+# IE64-LABEL: <.Ltlsdesc_hi0>:
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi a0, zero, 0x8
# IE64-NEXT: add a0, a0, tp
+# IE64-LABEL: <.Ltlsdesc_hi1>:
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi a0, zero, 0x7ff
# IE64-NEXT: add a0, a0, tp
## &.got[c]-. = 0x123a8+8 - 0x112b8 = 0x10f8
+# IE64-LABEL: <.Ltlsdesc_hi2>:
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: addi zero, zero, 0x0
# IE64-NEXT: 112b8: auipc a0, 0x1
@@ -136,7 +142,7 @@
# IE32: .got 00000008 00012248
-# IE32-LABEL: <.text>:
+# IE32-LABEL: <.Ltlsdesc_hi0>:
## st_value(a) = 8
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: addi zero, zero, 0x0
@@ -144,21 +150,20 @@
# IE32-NEXT: addi a0, zero, 0x8
# IE32-NEXT: add a0, a0, tp
## st_value(b) = 2047
+# IE32-LABEL: <.Ltlsdesc_hi1>:
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: addi a0, zero, 0x7ff
# IE32-NEXT: add a0, a0, tp
## &.got[c]-. = 0x12248+4 - 0x111cc = 0x1080
+# IE32-LABEL: <.Ltlsdesc_hi2>:
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: addi zero, zero, 0x0
# IE32-NEXT: 111cc: auipc a0, 0x1
# IE32-NEXT: lw a0, 0x80(a0)
# IE32-NEXT: add a0, a0, tp
-## FIXME This should not pass, but the code MC layer needs a fix to prevent this.
-# BADTLSLABEL: error: d.{{.*}}.o has an STT_TLS symbol but doesn't have an SHF_TLS section
-
#--- a.s
.macro load dst, src
.ifdef ELF32
diff --git a/lld/test/ELF/riscv-undefined-weak.s b/lld/test/ELF/riscv-undefined-weak.s
index 303a27f920c5..8a78e1f83833 100644
--- a/lld/test/ELF/riscv-undefined-weak.s
+++ b/lld/test/ELF/riscv-undefined-weak.s
@@ -1,4 +1,6 @@
# REQUIRES: riscv
+# RUN: llvm-mc -filetype=obj -triple=riscv64 /dev/null -o %t2.o
+# RUN: ld.lld -shared -soname=t2 %t2.o -o %t2.so
# RUN: llvm-mc -filetype=obj -triple=riscv64 -riscv-asm-relax-branches=0 %s -o %t.o
# RUN: llvm-readobj -r %t.o | FileCheck --check-prefix=RELOC %s
@@ -6,7 +8,7 @@
# RUN: llvm-objdump -d --no-show-raw-insn %t | FileCheck --check-prefixes=CHECK,PC %s
# RUN: llvm-readelf -x .data %t | FileCheck --check-prefixes=HEX,HEX-WITHOUT-PLT %s
-# RUN: ld.lld -e absolute %t.o -o %t --export-dynamic
+# RUN: ld.lld -e absolute %t.o -o %t %t2.so
# RUN: llvm-objdump -d --no-show-raw-insn %t | FileCheck --check-prefixes=CHECK,PLT %s
# RUN: llvm-readelf -x .data %t | FileCheck --check-prefixes=HEX,HEX-WITH-PLT %s
@@ -34,11 +36,11 @@ absolute:
# CHECK-LABEL: <relative>:
# CHECK-NEXT: 11{{...}}: auipc a1, 0xfffef
# PC-NEXT: addi a1, a1, -0x160
-# PLT-NEXT: addi a1, a1, -0x318
+# PLT-NEXT: addi a1, a1, -0x290
# CHECK-LABEL: <.Lpcrel_hi1>:
# CHECK-NEXT: 11{{...}}: auipc t1, 0xfffef
# PC-NEXT: sd a2, -0x166(t1)
-# PLT-NEXT: sd a2, -0x31e(t1)
+# PLT-NEXT: sd a2, -0x296(t1)
relative:
la a1, target
sd a2, target+2, t1
@@ -62,7 +64,7 @@ relative:
## We create a PLT entry and redirect the reference to it.
# PLT-LABEL: <branch>:
# PLT-NEXT: auipc ra, 0x0
-# PLT-NEXT: jalr 0x38(ra)
+# PLT-NEXT: jalr 0x30(ra)
# PLT-NEXT: [[#%x,ADDR:]]:
# PLT-SAME: j 0x[[#ADDR]]
# PLT-NEXT: [[#%x,ADDR:]]:
@@ -84,12 +86,8 @@ branch:
## A plt entry is created for target, so this is the offset between the
## plt entry and this address.
##
-## S = 0x11360 (the address of the plt entry for target)
-## A = 0
-## P = 0x1343c (the address of `.`)
-##
-## S - A + P = -0x0x20dc = 0xffffdf24
-# HEX-WITH-PLT-SAME: 24dfffff
+## S - A + P = -0x0x20ec = 0xffffdf14
+# HEX-WITH-PLT-SAME: 14dfffff
.data
.p2align 3
diff --git a/lld/test/ELF/shlib-undefined-local.s b/lld/test/ELF/shlib-undefined-local.s
index 8fceec1bf60f..6d3e8da34e29 100644
--- a/lld/test/ELF/shlib-undefined-local.s
+++ b/lld/test/ELF/shlib-undefined-local.s
@@ -5,10 +5,9 @@
# RUN: llvm-mc -filetype=obj -triple=x86_64-linux-gnu -o %t2.o %s
# RUN: echo "{ local: *; };" > %t.script
-# RUN: ld.lld -version-script %t.script -o %t %t2.o %t.so
-# RUN: llvm-nm -g %t | FileCheck -allow-empty %s
+# RUN: not ld.lld -version-script %t.script %t2.o %t.so -o /dev/null 2>&1 | FileCheck %s --check-prefix=ERR
-# CHECK-NOT: should_not_be_exported
+# ERR: error: non-exported symbol 'should_not_be_exported' in '{{.*}}tmp2.o' is referenced by DSO '{{.*}}tmp.so'
.globl should_not_be_exported
should_not_be_exported:
diff --git a/lld/test/ELF/static-with-export-dynamic.s b/lld/test/ELF/static-with-export-dynamic.s
deleted file mode 100644
index b0349b85e303..000000000000
--- a/lld/test/ELF/static-with-export-dynamic.s
+++ /dev/null
@@ -1,32 +0,0 @@
-// REQUIRES: x86
-// RUN: llvm-mc -filetype=obj -triple=i686-unknown-cloudabi %s -o %t.o
-// RUN: ld.lld --export-dynamic %t.o -o %t
-// RUN: llvm-readobj --dyn-syms %t | FileCheck %s
-
-// Ensure that a dynamic symbol table is present when --export-dynamic
-// is passed in, even when creating statically linked executables.
-//
-// CHECK: DynamicSymbols [
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name:
-// CHECK-NEXT: Value: 0x0
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Local
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: Undefined
-// CHECK-NEXT: }
-// CHECK-NEXT: Symbol {
-// CHECK-NEXT: Name: _start
-// CHECK-NEXT: Value:
-// CHECK-NEXT: Size: 0
-// CHECK-NEXT: Binding: Global
-// CHECK-NEXT: Type: None
-// CHECK-NEXT: Other: 0
-// CHECK-NEXT: Section: .text
-// CHECK-NEXT: }
-// CHECK-NEXT: ]
-
-.global _start
-_start:
- ret
diff --git a/lld/test/ELF/systemz-ifunc-nonpreemptible.s b/lld/test/ELF/systemz-ifunc-nonpreemptible.s
index 5056db302ca1..892bbde8d9c7 100644
--- a/lld/test/ELF/systemz-ifunc-nonpreemptible.s
+++ b/lld/test/ELF/systemz-ifunc-nonpreemptible.s
@@ -10,7 +10,7 @@
# CHECK: Section Headers:
# CHECK-NEXT: [Nr] Name Type Address Off Size ES Flg Lk Inf Al
# CHECK-NEXT: [ 0] NULL 0000000000000000 000000 000000 00 0 0 0
-# CHECK-NEXT: [ 1] .rela.dyn RELA 0000000001000158 000158 000030 18 AI 0 4 8
+# CHECK-NEXT: [ 1] .rela.dyn RELA 0000000001000158 000158 000030 18 A 0 0 8
# CHECK-NEXT: [ 2] .text PROGBITS 0000000001001188 000188 00001c 00 AX 0 0 4
# CHECK-NEXT: [ 3] .iplt PROGBITS 00000000010011b0 0001b0 000040 00 AX 0 0 16
# CHECK-NEXT: [ 4] .got.plt PROGBITS 00000000010021f0 0001f0 000010 00 WA 0 0 8
diff --git a/lld/test/ELF/weak-undef.s b/lld/test/ELF/weak-undef.s
index 3a9d5f462c21..21488023a79e 100644
--- a/lld/test/ELF/weak-undef.s
+++ b/lld/test/ELF/weak-undef.s
@@ -16,10 +16,11 @@
# RELOC-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
# RELOC-NEXT: {{.*}} 0000000100000001 R_X86_64_64 0000000000000000 foo + 0
-# COMMON: Symbol table '.dynsym' contains 2 entries:
-# COMMON-NEXT: Num: Value Size Type Bind Vis Ndx Name
-# COMMON-NEXT: 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND
-# COMMON-NEXT: 1: 0000000000000000 0 NOTYPE WEAK DEFAULT UND foo
+# NORELOC-NOT: Symbol table '.dynsym'
+# RELOC: Symbol table '.dynsym' contains 2 entries:
+# RELOC-NEXT: Num: Value Size Type Bind Vis Ndx Name
+# RELOC-NEXT: 0: 0000000000000000 0 NOTYPE LOCAL DEFAULT UND
+# RELOC-NEXT: 1: 0000000000000000 0 NOTYPE WEAK DEFAULT UND foo
# COMMON: Hex dump of section '.data':
# COMMON-NEXT: {{.*}} 00000000 00000000
# COMMON-EMPTY:
diff --git a/lld/test/ELF/x86-64-dyn-rel-error.s b/lld/test/ELF/x86-64-dyn-rel-error.s
index a03adf89072f..1590045312d4 100644
--- a/lld/test/ELF/x86-64-dyn-rel-error.s
+++ b/lld/test/ELF/x86-64-dyn-rel-error.s
@@ -19,7 +19,7 @@
# CHECK-NOT: error:
# RUN: ld.lld --noinhibit-exec %t.o %t2.so -o /dev/null 2>&1 | FileCheck --check-prefix=WARN %s
-# RUN: not ld.lld --export-dynamic --unresolved-symbols=ignore-all %t.o -o /dev/null 2>&1 | FileCheck --check-prefix=WARN %s
+# RUN: not ld.lld --export-dynamic --unresolved-symbols=ignore-all %t.o %t2.so -o /dev/null 2>&1 | FileCheck --check-prefix=WARN %s
# WARN: relocation R_X86_64_32 cannot be used against symbol 'zed'; recompile with -fPIC
# WARN: relocation R_X86_64_PC32 cannot be used against symbol 'zed'; recompile with -fPIC
diff --git a/lld/test/ELF/x86-64-gotpc-relax-too-far.s b/lld/test/ELF/x86-64-gotpc-relax-too-far.s
index 74aa6d8f65a0..ba41faab67de 100644
--- a/lld/test/ELF/x86-64-gotpc-relax-too-far.s
+++ b/lld/test/ELF/x86-64-gotpc-relax-too-far.s
@@ -5,7 +5,10 @@
# RUN: llvm-objdump --no-print-imm-hex -d %t/bin | FileCheck --check-prefix=DISASM %s
# RUN: llvm-readelf -S %t/bin | FileCheck --check-prefixes=GOT %s
# RUN: ld.lld -T %t/lds2 %t/a.o -o %t/bin2
-# RUN: llvm-readelf -S %t/bin2 | FileCheck --check-prefixes=UNNECESSARY-GOT %s
+# RUN: llvm-objdump --no-print-imm-hex -d %t/bin2 | FileCheck --check-prefix=DISASM %s
+# RUN: llvm-readelf -S %t/bin2 | FileCheck --check-prefixes=GOT %s
+# RUN: ld.lld -T %t/lds3 %t/a.o -o %t/bin3
+# RUN: llvm-readelf -S %t/bin3 | FileCheck --check-prefixes=UNNECESSARY-GOT %s
# DISASM: <_foo>:
# DISASM-NEXT: movl 2097146(%rip), %eax
@@ -49,6 +52,13 @@ SECTIONS {
#--- lds2
SECTIONS {
.text.foo 0x100000 : { *(.text.foo) }
+ .text 0x1ff000 : { . = . + 0x1000 ; *(.text) }
+ .got 0x300000 : { *(.got) }
+ data 0x80200000 : { *(data) }
+}
+#--- lds3
+SECTIONS {
+ .text.foo 0x100000 : { *(.text.foo) }
.text 0x200000 : { *(.text) }
.got 0x300000 : { *(.got) }
data 0x400000 : { *(data) }
diff --git a/lld/test/MachO/objc-relative-method-lists-simple.s b/lld/test/MachO/objc-relative-method-lists-simple.s
new file mode 100644
index 000000000000..9f54b5ad828a
--- /dev/null
+++ b/lld/test/MachO/objc-relative-method-lists-simple.s
@@ -0,0 +1,250 @@
+# REQUIRES: aarch64
+# UNSUPPORTED: target=arm{{.*}}-unknown-linux-gnueabihf
+# RUN: rm -rf %t; split-file %s %t && cd %t
+
+## Compile a64_rel_dylib.o
+# RUN: llvm-mc -filetype=obj -triple=arm64-apple-macos -o a64_rel_dylib.o a64_simple_class.s
+
+## Test arm64 + relative method lists
+# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -objc_relative_method_lists
+# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_REL
+
+## Test arm64 + relative method lists + dead-strip
+# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -objc_relative_method_lists -dead_strip
+# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_REL
+
+## Test arm64 + traditional method lists (no relative offsets)
+# RUN: %no-lsystem-lld a64_rel_dylib.o -o a64_rel_dylib.dylib -map a64_rel_dylib.map -dylib -arch arm64 -no_objc_relative_method_lists
+# RUN: llvm-objdump --macho --objc-meta-data a64_rel_dylib.dylib | FileCheck %s --check-prefix=CHK_NO_REL
+
+
+CHK_REL: Contents of (__DATA_CONST,__objc_classlist) section
+CHK_REL-NEXT: _OBJC_CLASS_$_MyClass
+CHK_REL: baseMethods
+CHK_REL-NEXT: entsize 12 (relative)
+CHK_REL-NEXT: count 3
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) instance_method_00
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) -[MyClass instance_method_00]
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) instance_method_01
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) -[MyClass instance_method_01]
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) instance_method_02
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) -[MyClass instance_method_02]
+
+CHK_REL: Meta Class
+CHK_REL-NEXT: isa 0x{{[0-9a-f]*}} _OBJC_METACLASS_$_MyClass
+CHK_REL: baseMethods 0x{{[0-9a-f]*}} (struct method_list_t *)
+CHK_REL-NEXT: entsize 12 (relative)
+CHK_REL-NEXT: count 3
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) class_method_00
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) +[MyClass class_method_00]
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) class_method_01
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) +[MyClass class_method_01]
+CHK_REL-NEXT: name 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) class_method_02
+CHK_REL-NEXT: types 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) v16@0:8
+CHK_REL-NEXT: imp 0x{{[0-9a-f]*}} (0x{{[0-9a-f]*}}) +[MyClass class_method_02]
+
+
+CHK_NO_REL-NOT: (relative)
+
+CHK_NO_REL: Contents of (__DATA_CONST,__objc_classlist) section
+CHK_NO_REL-NEXT: _OBJC_CLASS_$_MyClass
+
+CHK_NO_REL: baseMethods 0x{{[0-9a-f]*}} (struct method_list_t *)
+CHK_NO_REL-NEXT: entsize 24
+CHK_NO_REL-NEXT: count 3
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} instance_method_00
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp -[MyClass instance_method_00]
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} instance_method_01
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp -[MyClass instance_method_01]
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} instance_method_02
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp -[MyClass instance_method_02]
+
+
+CHK_NO_REL: Meta Class
+CHK_NO_REL-NEXT: _OBJC_METACLASS_$_MyClass
+
+CHK_NO_REL: baseMethods 0x{{[0-9a-f]*}} (struct method_list_t *)
+CHK_NO_REL-NEXT: entsize 24
+CHK_NO_REL-NEXT: count 3
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} class_method_00
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp +[MyClass class_method_00]
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} class_method_01
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp +[MyClass class_method_01]
+CHK_NO_REL-NEXT: name 0x{{[0-9a-f]*}} class_method_02
+CHK_NO_REL-NEXT: types 0x{{[0-9a-f]*}} v16@0:8
+CHK_NO_REL-NEXT: imp +[MyClass class_method_02]
+
+
+######################## Generate a64_simple_class.s #########################
+# clang -c simple_class.mm -s -o a64_simple_class.s -target arm64-apple-macos -arch arm64 -Oz
+
+######################## simple_class.mm ########################
+# __attribute__((objc_root_class))
+# @interface MyClass
+# - (void)instance_method_00;
+# - (void)instance_method_01;
+# - (void)instance_method_02;
+# + (void)class_method_00;
+# + (void)class_method_01;
+# + (void)class_method_02;
+# @end
+#
+# @implementation MyClass
+# - (void)instance_method_00 {}
+# - (void)instance_method_01 {}
+# - (void)instance_method_02 {}
+# + (void)class_method_00 {}
+# + (void)class_method_01 {}
+# + (void)class_method_02 {}
+# @end
+#
+# void *_objc_empty_cache;
+# void *_objc_empty_vtable;
+#
+
+#--- objc-macros.s
+.macro .objc_selector_def name
+ .p2align 2
+"\name":
+ .cfi_startproc
+ ret
+ .cfi_endproc
+.endm
+
+#--- a64_simple_class.s
+.include "objc-macros.s"
+
+.section __TEXT,__text,regular,pure_instructions
+.build_version macos, 11, 0
+
+.objc_selector_def "-[MyClass instance_method_00]"
+.objc_selector_def "-[MyClass instance_method_01]"
+.objc_selector_def "-[MyClass instance_method_02]"
+
+.objc_selector_def "+[MyClass class_method_00]"
+.objc_selector_def "+[MyClass class_method_01]"
+.objc_selector_def "+[MyClass class_method_02]"
+
+.globl __objc_empty_vtable
+.zerofill __DATA,__common,__objc_empty_vtable,8,3
+.section __DATA,__objc_data
+.globl _OBJC_CLASS_$_MyClass
+.p2align 3, 0x0
+
+_OBJC_CLASS_$_MyClass:
+ .quad _OBJC_METACLASS_$_MyClass
+ .quad 0
+ .quad __objc_empty_cache
+ .quad __objc_empty_vtable
+ .quad __OBJC_CLASS_RO_$_MyClass
+ .globl _OBJC_METACLASS_$_MyClass
+ .p2align 3, 0x0
+
+_OBJC_METACLASS_$_MyClass:
+ .quad _OBJC_METACLASS_$_MyClass
+ .quad _OBJC_CLASS_$_MyClass
+ .quad __objc_empty_cache
+ .quad __objc_empty_vtable
+ .quad __OBJC_METACLASS_RO_$_MyClass
+
+ .section __TEXT,__objc_classname,cstring_literals
+l_OBJC_CLASS_NAME_:
+ .asciz "MyClass"
+ .section __TEXT,__objc_methname,cstring_literals
+l_OBJC_METH_VAR_NAME_:
+ .asciz "class_method_00"
+ .section __TEXT,__objc_methtype,cstring_literals
+l_OBJC_METH_VAR_TYPE_:
+ .asciz "v16@0:8"
+ .section __TEXT,__objc_methname,cstring_literals
+l_OBJC_METH_VAR_NAME_.1:
+ .asciz "class_method_01"
+l_OBJC_METH_VAR_NAME_.2:
+ .asciz "class_method_02"
+ .section __DATA,__objc_const
+ .p2align 3, 0x0
+__OBJC_$_CLASS_METHODS_MyClass:
+ .long 24
+ .long 3
+ .quad l_OBJC_METH_VAR_NAME_
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "+[MyClass class_method_00]"
+ .quad l_OBJC_METH_VAR_NAME_.1
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "+[MyClass class_method_01]"
+ .quad l_OBJC_METH_VAR_NAME_.2
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "+[MyClass class_method_02]"
+ .p2align 3, 0x0
+
+__OBJC_METACLASS_RO_$_MyClass:
+ .long 3
+ .long 40
+ .long 40
+ .space 4
+ .quad 0
+ .quad l_OBJC_CLASS_NAME_
+ .quad __OBJC_$_CLASS_METHODS_MyClass
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+
+ .section __TEXT,__objc_methname,cstring_literals
+l_OBJC_METH_VAR_NAME_.3:
+ .asciz "instance_method_00"
+l_OBJC_METH_VAR_NAME_.4:
+ .asciz "instance_method_01"
+l_OBJC_METH_VAR_NAME_.5:
+ .asciz "instance_method_02"
+
+ .section __DATA,__objc_const
+ .p2align 3, 0x0
+__OBJC_$_INSTANCE_METHODS_MyClass:
+ .long 24
+ .long 3
+ .quad l_OBJC_METH_VAR_NAME_.3
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "-[MyClass instance_method_00]"
+ .quad l_OBJC_METH_VAR_NAME_.4
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "-[MyClass instance_method_01]"
+ .quad l_OBJC_METH_VAR_NAME_.5
+ .quad l_OBJC_METH_VAR_TYPE_
+ .quad "-[MyClass instance_method_02]"
+ .p2align 3, 0x0
+
+__OBJC_CLASS_RO_$_MyClass:
+ .long 2
+ .long 0
+ .long 0
+ .space 4
+ .quad 0
+ .quad l_OBJC_CLASS_NAME_
+ .quad __OBJC_$_INSTANCE_METHODS_MyClass
+ .quad 0
+ .quad 0
+ .quad 0
+ .quad 0
+ .globl __objc_empty_cache
+
+.zerofill __DATA,__common,__objc_empty_cache,8,3
+ .section __DATA,__objc_classlist,regular,no_dead_strip
+ .p2align 3, 0x0
+l_OBJC_LABEL_CLASS_$:
+ .quad _OBJC_CLASS_$_MyClass
+ .section __DATA,__objc_imageinfo,regular,no_dead_strip
+L_OBJC_IMAGE_INFO:
+ .long 0
+ .long 64
+.subsections_via_symbols
diff --git a/lld/test/MachO/silent-ignore.s b/lld/test/MachO/silent-ignore.s
index e57342c28a7a..58c3cc148f07 100644
--- a/lld/test/MachO/silent-ignore.s
+++ b/lld/test/MachO/silent-ignore.s
@@ -20,7 +20,7 @@
## Check that we don't emit any warnings nor errors for these unimplemented flags.
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %s -o %t.o
-# RUN: %lld %t.o -o /dev/null -objc_abi_version 2 -debug_variant
+# RUN: %lld %t.o -o /dev/null -objc_abi_version 2 -debug_variant -no_warn_duplicate_libraries
.globl _main
_main:
diff --git a/lld/test/MinGW/driver.test b/lld/test/MinGW/driver.test
index a4e9e5e1b19b..619fee8dee7c 100644
--- a/lld/test/MinGW/driver.test
+++ b/lld/test/MinGW/driver.test
@@ -422,6 +422,9 @@ LTO_EMIT_ASM: -lldemit:asm
RUN: ld.lld -### foo.o -m i386pe -plugin-opt=emit-llvm 2>&1 | FileCheck -check-prefix=LTO_EMIT_LLVM %s
LTO_EMIT_LLVM: -lldemit:llvm
+RUN: ld.lld -### foo.o -m i386pep --lto-sample-profile=foo 2>&1 | FileCheck -check-prefix=LTO_SAMPLE_PROFILE %s
+LTO_SAMPLE_PROFILE: -lto-sample-profile:foo
+
Test GCC specific LTO options that GCC passes unconditionally, that we ignore.
RUN: ld.lld -### foo.o -m i386pep -plugin /usr/lib/gcc/x86_64-w64-mingw32/10-posix/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-w64-mingw32/10-posix/lto-wrapper -plugin-opt=-fresolution=/tmp/ccM9d4fP.res -plugin-opt=-pass-through=-lmingw32 2> /dev/null
diff --git a/lldb/docs/use/python-reference.rst b/lldb/docs/use/python-reference.rst
index e5195a2471d9..795e38fab379 100644
--- a/lldb/docs/use/python-reference.rst
+++ b/lldb/docs/use/python-reference.rst
@@ -491,14 +491,17 @@ which will work like all the natively defined lldb commands. This provides a
very flexible and easy way to extend LLDB to meet your debugging requirements.
To write a python function that implements a new LLDB command define the
-function to take four arguments as follows:
+function to take five arguments as follows:
::
- def command_function(debugger, command, result, internal_dict):
+ def command_function(debugger, command, exe_ctx, result, internal_dict):
# Your code goes here
-Optionally, you can also provide a Python docstring, and LLDB will use it when providing help for your command, as in:
+The meaning of the arguments is given in the table below.
+
+If you provide a Python docstring in your command function LLDB will use it
+when providing "long help" for your command, as in:
::
@@ -506,19 +509,24 @@ Optionally, you can also provide a Python docstring, and LLDB will use it when p
"""This command takes a lot of options and does many fancy things"""
# Your code goes here
-Since lldb 3.5.2, LLDB Python commands can also take an SBExecutionContext as an
-argument. This is useful in cases where the command's notion of where to act is
-independent of the currently-selected entities in the debugger.
+though providing help can also be done programmatically (see below).
-This feature is enabled if the command-implementing function can be recognized
-as taking 5 arguments, or a variable number of arguments, and it alters the
-signature as such:
+Prior to lldb 3.5.2 (April 2015), LLDB Python command definitions didn't take the SBExecutionContext
+argument. So you may still see commands where the command definition is:
::
- def command_function(debugger, command, exe_ctx, result, internal_dict):
+ def command_function(debugger, command, result, internal_dict):
# Your code goes here
+Using this form is strongly discouraged because it can only operate on the "currently selected"
+target, process, thread, frame. The command will behave as expected when run
+directly on the command line. But if the command is used in a stop-hook, breakpoint
+callback, etc. where the response to the callback determines whether we will select
+this or that particular process/frame/thread, the global "currently selected"
+entity is not necessarily the one the callback is meant to handle. In that case, this
+command definition form can't do the right thing.
+
+-------------------+--------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
| Argument | Type | Description |
+-------------------+--------------------------------+----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/lldb/include/lldb/Core/Disassembler.h b/lldb/include/lldb/Core/Disassembler.h
index 885ac1bb4a7e..e037a49f152c 100644
--- a/lldb/include/lldb/Core/Disassembler.h
+++ b/lldb/include/lldb/Core/Disassembler.h
@@ -538,7 +538,7 @@ protected:
ElideMixedSourceAndDisassemblyLine(const ExecutionContext &exe_ctx,
const SymbolContext &sc, LineEntry &line) {
SourceLine sl;
- sl.file = line.file;
+ sl.file = line.GetFile();
sl.line = line.line;
sl.column = line.column;
return ElideMixedSourceAndDisassemblyLine(exe_ctx, sc, sl);
diff --git a/lldb/include/lldb/Core/Progress.h b/lldb/include/lldb/Core/Progress.h
index c38f6dd0a140..cd87be79c4f0 100644
--- a/lldb/include/lldb/Core/Progress.h
+++ b/lldb/include/lldb/Core/Progress.h
@@ -9,6 +9,7 @@
#ifndef LLDB_CORE_PROGRESS_H
#define LLDB_CORE_PROGRESS_H
+#include "lldb/Host/Alarm.h"
#include "lldb/lldb-forward.h"
#include "lldb/lldb-types.h"
#include "llvm/ADT/StringMap.h"
@@ -66,7 +67,11 @@ public:
/// @param [in] title The title of this progress activity.
///
/// @param [in] details Specific information about what the progress report
- /// is currently working on.
+ /// is currently working on. Although not required, if the progress report is
+ /// updated with Progress::Increment() then this field will be overwritten
+ /// with the new set of details passed into that function, and the details
+ /// passed initially will act as an "item 0" for the total set of
+ /// items being reported on.
///
/// @param [in] total The total units of work to be done if specified, if
/// set to std::nullopt then an indeterminate progress indicator should be
@@ -146,9 +151,12 @@ public:
void Increment(const Progress::ProgressData &);
void Decrement(const Progress::ProgressData &);
+ static void Initialize();
+ static void Terminate();
+ static bool Enabled();
static ProgressManager &Instance();
-private:
+protected:
enum class EventType {
Begin,
End,
@@ -156,9 +164,32 @@ private:
static void ReportProgress(const Progress::ProgressData &progress_data,
EventType type);
- llvm::StringMap<std::pair<uint64_t, Progress::ProgressData>>
- m_progress_category_map;
- std::mutex m_progress_map_mutex;
+ static std::optional<ProgressManager> &InstanceImpl();
+
+ /// Helper function for reporting progress when the alarm in the corresponding
+ /// entry in the map expires.
+ void Expire(llvm::StringRef key);
+
+ /// Entry used for bookkeeping.
+ struct Entry {
+ /// Reference count used for overlapping events.
+ uint64_t refcount = 0;
+
+ /// Data used to emit progress events.
+ Progress::ProgressData data;
+
+ /// Alarm handle used when the refcount reaches zero.
+ Alarm::Handle handle = Alarm::INVALID_HANDLE;
+ };
+
+ /// Map used for bookkeeping.
+ llvm::StringMap<Entry> m_entries;
+
+ /// Mutex to provide the map.
+ std::mutex m_entries_mutex;
+
+ /// Alarm instance to coalesce progress events.
+ Alarm m_alarm;
};
} // namespace lldb_private
diff --git a/lldb/include/lldb/Symbol/LineEntry.h b/lldb/include/lldb/Symbol/LineEntry.h
index 31e1cd0b36f9..8da59cf0bd24 100644
--- a/lldb/include/lldb/Symbol/LineEntry.h
+++ b/lldb/include/lldb/Symbol/LineEntry.h
@@ -130,11 +130,14 @@ struct LineEntry {
/// Shared pointer to the target this LineEntry belongs to.
void ApplyFileMappings(lldb::TargetSP target_sp);
+ /// Helper to access the file.
+ const FileSpec &GetFile() const { return file_sp->GetSpecOnly(); }
+
/// The section offset address range for this line entry.
AddressRange range;
/// The source file, possibly mapped by the target.source-map setting.
- FileSpec file;
+ lldb::SupportFileSP file_sp;
/// The original source file, from debug info.
lldb::SupportFileSP original_file_sp;
diff --git a/lldb/include/lldb/Symbol/UnwindTable.h b/lldb/include/lldb/Symbol/UnwindTable.h
index f0ce7047de2d..26826e5d1b49 100644
--- a/lldb/include/lldb/Symbol/UnwindTable.h
+++ b/lldb/include/lldb/Symbol/UnwindTable.h
@@ -57,6 +57,10 @@ public:
ArchSpec GetArchitecture();
+ /// Called after a SymbolFile has been added to a Module to add any new
+ /// unwind sections that may now be available.
+ void Update();
+
private:
void Dump(Stream &s);
diff --git a/lldb/include/lldb/Utility/Scalar.h b/lldb/include/lldb/Utility/Scalar.h
index 8e087a5ddeb8..d7155884c6d1 100644
--- a/lldb/include/lldb/Utility/Scalar.h
+++ b/lldb/include/lldb/Utility/Scalar.h
@@ -71,6 +71,7 @@ public:
: m_type(e_int), m_integer(std::move(v), false), m_float(0.0f) {}
Scalar(llvm::APSInt v)
: m_type(e_int), m_integer(std::move(v)), m_float(0.0f) {}
+ Scalar(llvm::APFloat v) : m_type(e_float), m_integer(0), m_float(v) {}
bool SignExtend(uint32_t bit_pos);
@@ -186,6 +187,10 @@ public:
Status SetValueFromData(const DataExtractor &data, lldb::Encoding encoding,
size_t byte_size);
+ llvm::APFloat CreateAPFloatFromAPSInt(lldb::BasicType basic_type);
+
+ llvm::APFloat CreateAPFloatFromAPFloat(lldb::BasicType basic_type);
+
protected:
Scalar::Type m_type = e_void;
llvm::APSInt m_integer;
diff --git a/lldb/include/lldb/Utility/SupportFile.h b/lldb/include/lldb/Utility/SupportFile.h
index 0ea0ca4e7c97..7505d7f345c5 100644
--- a/lldb/include/lldb/Utility/SupportFile.h
+++ b/lldb/include/lldb/Utility/SupportFile.h
@@ -45,6 +45,9 @@ public:
/// Materialize the file to disk and return the path to that temporary file.
virtual const FileSpec &Materialize() { return m_file_spec; }
+ /// Change the file name.
+ void Update(const FileSpec &file_spec) { m_file_spec = file_spec; }
+
protected:
FileSpec m_file_spec;
Checksum m_checksum;
diff --git a/lldb/source/API/SBLineEntry.cpp b/lldb/source/API/SBLineEntry.cpp
index 28d12e65fdaf..99a7b8fe644c 100644
--- a/lldb/source/API/SBLineEntry.cpp
+++ b/lldb/source/API/SBLineEntry.cpp
@@ -81,8 +81,8 @@ SBFileSpec SBLineEntry::GetFileSpec() const {
LLDB_INSTRUMENT_VA(this);
SBFileSpec sb_file_spec;
- if (m_opaque_up.get() && m_opaque_up->file)
- sb_file_spec.SetFileSpec(m_opaque_up->file);
+ if (m_opaque_up.get() && m_opaque_up->GetFile())
+ sb_file_spec.SetFileSpec(m_opaque_up->GetFile());
return sb_file_spec;
}
@@ -109,9 +109,9 @@ void SBLineEntry::SetFileSpec(lldb::SBFileSpec filespec) {
LLDB_INSTRUMENT_VA(this, filespec);
if (filespec.IsValid())
- ref().file = filespec.ref();
+ ref().file_sp = std::make_shared<SupportFile>(filespec.ref());
else
- ref().file.Clear();
+ ref().file_sp = std::make_shared<SupportFile>();
}
void SBLineEntry::SetLine(uint32_t line) {
LLDB_INSTRUMENT_VA(this, line);
@@ -168,7 +168,7 @@ bool SBLineEntry::GetDescription(SBStream &description) {
if (m_opaque_up) {
char file_path[PATH_MAX * 2];
- m_opaque_up->file.GetPath(file_path, sizeof(file_path));
+ m_opaque_up->GetFile().GetPath(file_path, sizeof(file_path));
strm.Printf("%s:%u", file_path, GetLine());
if (GetColumn() > 0)
strm.Printf(":%u", GetColumn());
diff --git a/lldb/source/API/SBThread.cpp b/lldb/source/API/SBThread.cpp
index fa4c80e59d97..eb9cf063802c 100644
--- a/lldb/source/API/SBThread.cpp
+++ b/lldb/source/API/SBThread.cpp
@@ -819,7 +819,7 @@ SBError SBThread::StepOverUntil(lldb::SBFrame &sb_frame,
step_file_spec = sb_file_spec.ref();
} else {
if (frame_sc.line_entry.IsValid())
- step_file_spec = frame_sc.line_entry.file;
+ step_file_spec = frame_sc.line_entry.GetFile();
else {
sb_error.SetErrorString("invalid file argument or no file for frame");
return sb_error;
diff --git a/lldb/source/API/SystemInitializerFull.cpp b/lldb/source/API/SystemInitializerFull.cpp
index c48466f25ede..995d14f7c1fa 100644
--- a/lldb/source/API/SystemInitializerFull.cpp
+++ b/lldb/source/API/SystemInitializerFull.cpp
@@ -10,6 +10,7 @@
#include "lldb/API/SBCommandInterpreter.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/PluginManager.h"
+#include "lldb/Core/Progress.h"
#include "lldb/Host/Config.h"
#include "lldb/Host/Host.h"
#include "lldb/Initialization/SystemInitializerCommon.h"
@@ -57,6 +58,7 @@ llvm::Error SystemInitializerFull::Initialize() {
llvm::InitializeAllAsmPrinters();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllDisassemblers();
+
// Initialize the command line parser in LLVM. This usually isn't necessary
// as we aren't dealing with command line options here, but otherwise some
// other code in Clang/LLVM might be tempted to call this function from a
@@ -65,10 +67,13 @@ llvm::Error SystemInitializerFull::Initialize() {
const char *arg0 = "lldb";
llvm::cl::ParseCommandLineOptions(1, &arg0);
+ // Initialize the progress manager.
+ ProgressManager::Initialize();
+
#define LLDB_PLUGIN(p) LLDB_PLUGIN_INITIALIZE(p);
#include "Plugins/Plugins.def"
- // Scan for any system or user LLDB plug-ins
+ // Scan for any system or user LLDB plug-ins.
PluginManager::Initialize();
// The process settings need to know about installed plug-ins, so the
@@ -84,15 +89,18 @@ llvm::Error SystemInitializerFull::Initialize() {
void SystemInitializerFull::Terminate() {
Debugger::SettingsTerminate();
- // Terminate plug-ins in core LLDB
+ // Terminate plug-ins in core LLDB.
ProcessTrace::Terminate();
- // Terminate and unload and loaded system or user LLDB plug-ins
+ // Terminate and unload and loaded system or user LLDB plug-ins.
PluginManager::Terminate();
#define LLDB_PLUGIN(p) LLDB_PLUGIN_TERMINATE(p);
#include "Plugins/Plugins.def"
+ // Terminate the progress manager.
+ ProgressManager::Terminate();
+
// Now shutdown the common parts, in reverse order.
SystemInitializerCommon::Terminate();
}
diff --git a/lldb/source/Breakpoint/BreakpointResolver.cpp b/lldb/source/Breakpoint/BreakpointResolver.cpp
index 1861a0fe7c4f..ff4e2a998519 100644
--- a/lldb/source/Breakpoint/BreakpointResolver.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolver.cpp
@@ -221,7 +221,7 @@ void BreakpointResolver::SetSCMatchesByLine(
auto &match = all_scs[0];
auto worklist_begin = std::partition(
all_scs.begin(), all_scs.end(), [&](const SymbolContext &sc) {
- if (sc.line_entry.file == match.line_entry.file ||
+ if (sc.line_entry.GetFile() == match.line_entry.GetFile() ||
*sc.line_entry.original_file_sp ==
*match.line_entry.original_file_sp) {
// When a match is found, keep track of the smallest line number.
diff --git a/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp b/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
index cc4e1d26724f..d7d8c714867e 100644
--- a/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
+++ b/lldb/source/Breakpoint/BreakpointResolverFileLine.cpp
@@ -147,8 +147,9 @@ void BreakpointResolverFileLine::FilterContexts(SymbolContextList &sc_list) {
else
continue;
- if (file != sc.line_entry.file) {
- LLDB_LOG(log, "unexpected symbol context file {0}", sc.line_entry.file);
+ if (file != sc.line_entry.GetFile()) {
+ LLDB_LOG(log, "unexpected symbol context file {0}",
+ sc.line_entry.GetFile());
continue;
}
@@ -223,7 +224,7 @@ void BreakpointResolverFileLine::DeduceSourceMapping(
const bool case_sensitive = request_file.IsCaseSensitive();
for (const SymbolContext &sc : sc_list) {
- FileSpec sc_file = sc.line_entry.file;
+ FileSpec sc_file = sc.line_entry.GetFile();
if (FileSpec::Equal(sc_file, request_file, /*full*/ true))
continue;
diff --git a/lldb/source/Commands/CommandObjectBreakpoint.cpp b/lldb/source/Commands/CommandObjectBreakpoint.cpp
index fbece865f113..cd4c7790f447 100644
--- a/lldb/source/Commands/CommandObjectBreakpoint.cpp
+++ b/lldb/source/Commands/CommandObjectBreakpoint.cpp
@@ -780,8 +780,8 @@ private:
} else {
const SymbolContext &sc =
cur_frame->GetSymbolContext(eSymbolContextLineEntry);
- if (sc.line_entry.file) {
- file = sc.line_entry.file;
+ if (sc.line_entry.GetFile()) {
+ file = sc.line_entry.GetFile();
} else {
result.AppendError("Can't find the file for the selected frame to "
"use as the default file.");
diff --git a/lldb/source/Commands/CommandObjectSource.cpp b/lldb/source/Commands/CommandObjectSource.cpp
index fde74f02aea6..0c1267456a18 100644
--- a/lldb/source/Commands/CommandObjectSource.cpp
+++ b/lldb/source/Commands/CommandObjectSource.cpp
@@ -158,7 +158,7 @@ protected:
if (module_list.GetSize() &&
module_list.GetIndexForModule(module) == LLDB_INVALID_INDEX32)
continue;
- if (!FileSpec::Match(file_spec, line_entry.file))
+ if (!FileSpec::Match(file_spec, line_entry.GetFile()))
continue;
if (start_line > 0 && line_entry.line < start_line)
continue;
@@ -239,7 +239,7 @@ protected:
num_matches++;
if (num_lines > 0 && num_matches > num_lines)
break;
- assert(cu_file_spec == line_entry.file);
+ assert(cu_file_spec == line_entry.GetFile());
if (!cu_header_printed) {
if (num_matches > 0)
strm << "\n\n";
@@ -760,11 +760,11 @@ protected:
bool operator<(const SourceInfo &rhs) const {
if (function.GetCString() < rhs.function.GetCString())
return true;
- if (line_entry.file.GetDirectory().GetCString() <
- rhs.line_entry.file.GetDirectory().GetCString())
+ if (line_entry.GetFile().GetDirectory().GetCString() <
+ rhs.line_entry.GetFile().GetDirectory().GetCString())
return true;
- if (line_entry.file.GetFilename().GetCString() <
- rhs.line_entry.file.GetFilename().GetCString())
+ if (line_entry.GetFile().GetFilename().GetCString() <
+ rhs.line_entry.GetFile().GetFilename().GetCString())
return true;
if (line_entry.line < rhs.line_entry.line)
return true;
@@ -799,7 +799,7 @@ protected:
sc.function->GetEndLineSourceInfo(end_file, end_line);
} else {
// We have an inlined function
- start_file = source_info.line_entry.file;
+ start_file = source_info.line_entry.GetFile();
start_line = source_info.line_entry.line;
end_line = start_line + m_options.num_lines;
}
diff --git a/lldb/source/Commands/CommandObjectThread.cpp b/lldb/source/Commands/CommandObjectThread.cpp
index cf4f8ccaa0c4..3dbbfd4f9d34 100644
--- a/lldb/source/Commands/CommandObjectThread.cpp
+++ b/lldb/source/Commands/CommandObjectThread.cpp
@@ -1705,7 +1705,7 @@ protected:
line = sym_ctx.line_entry.line + m_options.m_line_offset;
// Try the current file, but override if asked.
- FileSpec file = sym_ctx.line_entry.file;
+ FileSpec file = sym_ctx.line_entry.GetFile();
if (m_options.m_filenames.GetSize() == 1)
file = m_options.m_filenames.GetFileSpecAtIndex(0);
diff --git a/lldb/source/Core/Address.cpp b/lldb/source/Core/Address.cpp
index 6f5c366ab38a..b23398883fa5 100644
--- a/lldb/source/Core/Address.cpp
+++ b/lldb/source/Core/Address.cpp
@@ -398,7 +398,7 @@ bool Address::GetDescription(Stream &s, Target &target,
"Non-brief descriptions not implemented");
LineEntry line_entry;
if (CalculateSymbolContextLineEntry(line_entry)) {
- s.Printf(" (%s:%u:%u)", line_entry.file.GetFilename().GetCString(),
+ s.Printf(" (%s:%u:%u)", line_entry.GetFile().GetFilename().GetCString(),
line_entry.line, line_entry.column);
return true;
}
diff --git a/lldb/source/Core/Disassembler.cpp b/lldb/source/Core/Disassembler.cpp
index 7b07fcb26813..e31746fa0b8b 100644
--- a/lldb/source/Core/Disassembler.cpp
+++ b/lldb/source/Core/Disassembler.cpp
@@ -201,7 +201,7 @@ Disassembler::GetFunctionDeclLineEntry(const SymbolContext &sc) {
uint32_t func_decl_line;
sc.function->GetStartLineSourceInfo(func_decl_file, func_decl_line);
- if (func_decl_file != prologue_end_line.file &&
+ if (func_decl_file != prologue_end_line.GetFile() &&
func_decl_file != prologue_end_line.original_file_sp->GetSpecOnly())
return {};
@@ -354,7 +354,7 @@ void Disassembler::PrintInstructions(Debugger &debugger, const ArchSpec &arch,
}
if (sc.line_entry.IsValid()) {
SourceLine this_line;
- this_line.file = sc.line_entry.file;
+ this_line.file = sc.line_entry.GetFile();
this_line.line = sc.line_entry.line;
this_line.column = sc.line_entry.column;
if (!ElideMixedSourceAndDisassemblyLine(exe_ctx, sc, this_line))
@@ -406,7 +406,7 @@ void Disassembler::PrintInstructions(Debugger &debugger, const ArchSpec &arch,
uint32_t func_decl_line;
sc.function->GetStartLineSourceInfo(func_decl_file,
func_decl_line);
- if (func_decl_file == prologue_end_line.file ||
+ if (func_decl_file == prologue_end_line.GetFile() ||
func_decl_file ==
prologue_end_line.original_file_sp->GetSpecOnly()) {
// Add all the lines between the function declaration and
@@ -439,7 +439,7 @@ void Disassembler::PrintInstructions(Debugger &debugger, const ArchSpec &arch,
if (sc != prev_sc && sc.comp_unit && sc.line_entry.IsValid()) {
SourceLine this_line;
- this_line.file = sc.line_entry.file;
+ this_line.file = sc.line_entry.GetFile();
this_line.line = sc.line_entry.line;
if (!ElideMixedSourceAndDisassemblyLine(exe_ctx, sc,
diff --git a/lldb/source/Core/FormatEntity.cpp b/lldb/source/Core/FormatEntity.cpp
index cf82676bedda..ba62e2625259 100644
--- a/lldb/source/Core/FormatEntity.cpp
+++ b/lldb/source/Core/FormatEntity.cpp
@@ -1792,7 +1792,7 @@ bool FormatEntity::Format(const Entry &entry, Stream &s,
if (sc && sc->line_entry.IsValid()) {
Module *module = sc->module_sp.get();
if (module) {
- if (DumpFile(s, sc->line_entry.file, (FileKind)entry.number))
+ if (DumpFile(s, sc->line_entry.GetFile(), (FileKind)entry.number))
return true;
}
}
diff --git a/lldb/source/Core/IOHandlerCursesGUI.cpp b/lldb/source/Core/IOHandlerCursesGUI.cpp
index f86dce247135..d922d32f9105 100644
--- a/lldb/source/Core/IOHandlerCursesGUI.cpp
+++ b/lldb/source/Core/IOHandlerCursesGUI.cpp
@@ -6894,7 +6894,8 @@ public:
if (context_changed)
m_selected_line = m_pc_line;
- if (m_file_sp && m_file_sp->GetFileSpec() == m_sc.line_entry.file) {
+ if (m_file_sp &&
+ m_file_sp->GetFileSpec() == m_sc.line_entry.GetFile()) {
// Same file, nothing to do, we should either have the lines or
// not (source file missing)
if (m_selected_line >= static_cast<size_t>(m_first_visible_line)) {
@@ -6909,8 +6910,8 @@ public:
} else {
// File changed, set selected line to the line with the PC
m_selected_line = m_pc_line;
- m_file_sp =
- m_debugger.GetSourceManager().GetFile(m_sc.line_entry.file);
+ m_file_sp = m_debugger.GetSourceManager().GetFile(
+ m_sc.line_entry.GetFile());
if (m_file_sp) {
const size_t num_lines = m_file_sp->GetNumLines();
m_line_width = 1;
@@ -7000,7 +7001,7 @@ public:
LineEntry bp_loc_line_entry;
if (bp_loc_sp->GetAddress().CalculateSymbolContextLineEntry(
bp_loc_line_entry)) {
- if (m_file_sp->GetFileSpec() == bp_loc_line_entry.file) {
+ if (m_file_sp->GetFileSpec() == bp_loc_line_entry.GetFile()) {
bp_lines.insert(bp_loc_line_entry.line);
}
}
@@ -7477,7 +7478,7 @@ public:
LineEntry bp_loc_line_entry;
if (bp_loc_sp->GetAddress().CalculateSymbolContextLineEntry(
bp_loc_line_entry)) {
- if (m_file_sp->GetFileSpec() == bp_loc_line_entry.file &&
+ if (m_file_sp->GetFileSpec() == bp_loc_line_entry.GetFile() &&
m_selected_line + 1 == bp_loc_line_entry.line) {
bool removed =
exe_ctx.GetTargetRef().RemoveBreakpointByID(bp_sp->GetID());
diff --git a/lldb/source/Core/Module.cpp b/lldb/source/Core/Module.cpp
index 8ffa35518b3c..9c105b3f0e57 100644
--- a/lldb/source/Core/Module.cpp
+++ b/lldb/source/Core/Module.cpp
@@ -1009,6 +1009,8 @@ SymbolFile *Module::GetSymbolFile(bool can_create, Stream *feedback_strm) {
m_symfile_up.reset(
SymbolVendor::FindPlugin(shared_from_this(), feedback_strm));
m_did_load_symfile = true;
+ if (m_unwind_table)
+ m_unwind_table->Update();
}
}
}
@@ -1239,9 +1241,9 @@ void Module::SectionFileAddressesChanged() {
UnwindTable &Module::GetUnwindTable() {
if (!m_unwind_table) {
- m_unwind_table.emplace(*this);
if (!m_symfile_spec)
SymbolLocator::DownloadSymbolFileAsync(GetUUID());
+ m_unwind_table.emplace(*this);
}
return *m_unwind_table;
}
@@ -1359,15 +1361,10 @@ void Module::SetSymbolFileFileSpec(const FileSpec &file) {
// one
obj_file->ClearSymtab();
- // Clear the unwind table too, as that may also be affected by the
- // symbol file information.
- m_unwind_table.reset();
-
// The symbol file might be a directory bundle ("/tmp/a.out.dSYM")
// instead of a full path to the symbol file within the bundle
// ("/tmp/a.out.dSYM/Contents/Resources/DWARF/a.out"). So we need to
// check this
-
if (FileSystem::Instance().IsDirectory(file)) {
std::string new_path(file.GetPath());
std::string old_path(obj_file->GetFileSpec().GetPath());
diff --git a/lldb/source/Core/Progress.cpp b/lldb/source/Core/Progress.cpp
index b4b5e98b7ba4..161038284e21 100644
--- a/lldb/source/Core/Progress.cpp
+++ b/lldb/source/Core/Progress.cpp
@@ -35,7 +35,10 @@ Progress::Progress(std::string title, std::string details,
std::lock_guard<std::mutex> guard(m_mutex);
ReportProgress();
- ProgressManager::Instance().Increment(m_progress_data);
+
+ // Report to the ProgressManager if that subsystem is enabled.
+ if (ProgressManager::Enabled())
+ ProgressManager::Instance().Increment(m_progress_data);
}
Progress::~Progress() {
@@ -45,7 +48,10 @@ Progress::~Progress() {
if (!m_completed)
m_completed = m_total;
ReportProgress();
- ProgressManager::Instance().Decrement(m_progress_data);
+
+ // Report to the ProgressManager if that subsystem is enabled.
+ if (ProgressManager::Enabled())
+ ProgressManager::Instance().Decrement(m_progress_data);
}
void Progress::Increment(uint64_t amount,
@@ -75,45 +81,84 @@ void Progress::ReportProgress() {
}
}
-ProgressManager::ProgressManager() : m_progress_category_map() {}
+ProgressManager::ProgressManager()
+ : m_entries(), m_alarm(std::chrono::milliseconds(100)) {}
ProgressManager::~ProgressManager() {}
+void ProgressManager::Initialize() {
+ assert(!InstanceImpl() && "Already initialized.");
+ InstanceImpl().emplace();
+}
+
+void ProgressManager::Terminate() {
+ assert(InstanceImpl() && "Already terminated.");
+ InstanceImpl().reset();
+}
+
+bool ProgressManager::Enabled() { return InstanceImpl().operator bool(); }
+
ProgressManager &ProgressManager::Instance() {
- static std::once_flag g_once_flag;
- static ProgressManager *g_progress_manager = nullptr;
- std::call_once(g_once_flag, []() {
- // NOTE: known leak to avoid global destructor chain issues.
- g_progress_manager = new ProgressManager();
- });
- return *g_progress_manager;
+ assert(InstanceImpl() && "ProgressManager must be initialized");
+ return *InstanceImpl();
+}
+
+std::optional<ProgressManager> &ProgressManager::InstanceImpl() {
+ static std::optional<ProgressManager> g_progress_manager;
+ return g_progress_manager;
}
void ProgressManager::Increment(const Progress::ProgressData &progress_data) {
- std::lock_guard<std::mutex> lock(m_progress_map_mutex);
- // If the current category exists in the map then it is not an initial report,
- // therefore don't broadcast to the category bit. Also, store the current
- // progress data in the map so that we have a note of the ID used for the
- // initial progress report.
- if (!m_progress_category_map.contains(progress_data.title)) {
- m_progress_category_map[progress_data.title].second = progress_data;
+ std::lock_guard<std::mutex> lock(m_entries_mutex);
+
+ llvm::StringRef key = progress_data.title;
+ bool new_entry = !m_entries.contains(key);
+ Entry &entry = m_entries[progress_data.title];
+
+ if (new_entry) {
+ // This is a new progress event. Report progress and store the progress
+ // data.
ReportProgress(progress_data, EventType::Begin);
+ entry.data = progress_data;
+ } else if (entry.refcount == 0) {
+ // This is an existing entry that was scheduled to be deleted but a new one
+ // came in before the timer expired.
+ assert(entry.handle != Alarm::INVALID_HANDLE);
+
+ if (!m_alarm.Cancel(entry.handle)) {
+ // The timer expired before we had a chance to cancel it. We have to treat
+ // this as an entirely new progress event.
+ ReportProgress(progress_data, EventType::Begin);
+ }
+ // Clear the alarm handle.
+ entry.handle = Alarm::INVALID_HANDLE;
}
- m_progress_category_map[progress_data.title].first++;
+
+ // Regardless of how we got here, we need to bump the reference count.
+ entry.refcount++;
}
void ProgressManager::Decrement(const Progress::ProgressData &progress_data) {
- std::lock_guard<std::mutex> lock(m_progress_map_mutex);
- auto pos = m_progress_category_map.find(progress_data.title);
+ std::lock_guard<std::mutex> lock(m_entries_mutex);
+ llvm::StringRef key = progress_data.title;
- if (pos == m_progress_category_map.end())
+ if (!m_entries.contains(key))
return;
- if (pos->second.first <= 1) {
- ReportProgress(pos->second.second, EventType::End);
- m_progress_category_map.erase(progress_data.title);
- } else {
- --pos->second.first;
+ Entry &entry = m_entries[key];
+ entry.refcount--;
+
+ if (entry.refcount == 0) {
+ assert(entry.handle == Alarm::INVALID_HANDLE);
+
+ // Copy the key to a std::string so we can pass it by value to the lambda.
+ // The underlying StringRef will not exist by the time the callback is
+ // called.
+ std::string key_str = std::string(key);
+
+ // Start a timer. If it expires before we see another progress event, it
+ // will be reported.
+ entry.handle = m_alarm.Create([=]() { Expire(key_str); });
}
}
@@ -129,3 +174,20 @@ void ProgressManager::ReportProgress(
progress_data.debugger_id,
Debugger::eBroadcastBitProgressCategory);
}
+
+void ProgressManager::Expire(llvm::StringRef key) {
+ std::lock_guard<std::mutex> lock(m_entries_mutex);
+
+ // This shouldn't happen but be resilient anyway.
+ if (!m_entries.contains(key))
+ return;
+
+ // A new event came in and the alarm fired before we had a chance to restart
+ // it.
+ if (m_entries[key].refcount != 0)
+ return;
+
+ // We're done with this entry.
+ ReportProgress(m_entries[key].data, EventType::End);
+ m_entries.erase(key);
+}
diff --git a/lldb/source/Core/SourceManager.cpp b/lldb/source/Core/SourceManager.cpp
index 517a4b0268d2..0d70c554e534 100644
--- a/lldb/source/Core/SourceManager.cpp
+++ b/lldb/source/Core/SourceManager.cpp
@@ -418,7 +418,7 @@ bool SourceManager::GetDefaultFileAndLine(FileSpec &file_spec, uint32_t &line) {
if (sc.function->GetAddressRange()
.GetBaseAddress()
.CalculateSymbolContextLineEntry(line_entry)) {
- SetDefaultFileAndLine(line_entry.file, line_entry.line);
+ SetDefaultFileAndLine(line_entry.GetFile(), line_entry.line);
file_spec = m_last_file_spec;
line = m_last_line;
return true;
diff --git a/lldb/source/Host/common/Alarm.cpp b/lldb/source/Host/common/Alarm.cpp
index 245cdc7ae5c2..afc770d20d7b 100644
--- a/lldb/source/Host/common/Alarm.cpp
+++ b/lldb/source/Host/common/Alarm.cpp
@@ -154,54 +154,60 @@ lldb::thread_result_t Alarm::AlarmThread() {
//
// Below we only deal with the timeout expiring and fall through for dealing
// with the rest.
- std::unique_lock<std::mutex> alarm_lock(m_alarm_mutex);
- if (next_alarm) {
- if (!m_alarm_cv.wait_until(alarm_lock, *next_alarm, predicate)) {
- // The timeout for the next alarm expired.
-
- // Clear the next timeout to signal that we need to recompute the next
- // timeout.
- next_alarm.reset();
-
- // Iterate over all the callbacks. Call the ones that have expired
- // and remove them from the list.
- const TimePoint now = std::chrono::system_clock::now();
- auto it = m_entries.begin();
- while (it != m_entries.end()) {
- if (it->expiration <= now) {
- it->callback();
- it = m_entries.erase(it);
- } else {
- it++;
+ llvm::SmallVector<Callback, 1> callbacks;
+ {
+ std::unique_lock<std::mutex> alarm_lock(m_alarm_mutex);
+ if (next_alarm) {
+ if (!m_alarm_cv.wait_until(alarm_lock, *next_alarm, predicate)) {
+ // The timeout for the next alarm expired.
+
+ // Clear the next timeout to signal that we need to recompute the next
+ // timeout.
+ next_alarm.reset();
+
+ // Iterate over all the callbacks. Call the ones that have expired
+ // and remove them from the list.
+ const TimePoint now = std::chrono::system_clock::now();
+ auto it = m_entries.begin();
+ while (it != m_entries.end()) {
+ if (it->expiration <= now) {
+ callbacks.emplace_back(std::move(it->callback));
+ it = m_entries.erase(it);
+ } else {
+ it++;
+ }
}
}
+ } else {
+ m_alarm_cv.wait(alarm_lock, predicate);
}
- } else {
- m_alarm_cv.wait(alarm_lock, predicate);
- }
- // Fall through after waiting on the condition variable. At this point
- // either the predicate is true or we woke up because an alarm expired.
+ // Fall through after waiting on the condition variable. At this point
+ // either the predicate is true or we woke up because an alarm expired.
- // The alarm thread is shutting down.
- if (m_exit) {
- exit = true;
- if (m_run_callbacks_on_exit) {
- for (Entry &entry : m_entries)
- entry.callback();
+ // The alarm thread is shutting down.
+ if (m_exit) {
+ exit = true;
+ if (m_run_callbacks_on_exit) {
+ for (Entry &entry : m_entries)
+ callbacks.emplace_back(std::move(entry.callback));
+ }
}
- continue;
- }
- // A new alarm was added or an alarm expired. Either way we need to
- // recompute when this thread should wake up for the next alarm.
- if (m_recompute_next_alarm || !next_alarm) {
- for (Entry &entry : m_entries) {
- if (!next_alarm || entry.expiration < *next_alarm)
- next_alarm = entry.expiration;
+ // A new alarm was added or an alarm expired. Either way we need to
+ // recompute when this thread should wake up for the next alarm.
+ if (m_recompute_next_alarm || !next_alarm) {
+ for (Entry &entry : m_entries) {
+ if (!next_alarm || entry.expiration < *next_alarm)
+ next_alarm = entry.expiration;
+ }
+ m_recompute_next_alarm = false;
}
- m_recompute_next_alarm = false;
}
+
+ // Outside the lock, call the callbacks.
+ for (Callback &callback : callbacks)
+ callback();
}
return {};
}
diff --git a/lldb/source/Interpreter/OptionArgParser.cpp b/lldb/source/Interpreter/OptionArgParser.cpp
index 75ccad87467e..9a8275128ede 100644
--- a/lldb/source/Interpreter/OptionArgParser.cpp
+++ b/lldb/source/Interpreter/OptionArgParser.cpp
@@ -9,7 +9,9 @@
#include "lldb/Interpreter/OptionArgParser.h"
#include "lldb/DataFormatters/FormatManager.h"
#include "lldb/Target/ABI.h"
+#include "lldb/Target/RegisterContext.h"
#include "lldb/Target/Target.h"
+#include "lldb/Utility/RegisterValue.h"
#include "lldb/Utility/Status.h"
#include "lldb/Utility/StreamString.h"
@@ -233,24 +235,68 @@ OptionArgParser::DoToAddress(const ExecutionContext *exe_ctx, llvm::StringRef s,
// Since the compiler can't handle things like "main + 12" we should try to
// do this for now. The compiler doesn't like adding offsets to function
// pointer types.
+ // Some languages also don't have a natural representation for register
+ // values (e.g. swift) so handle simple uses of them here as well.
+ // We use a regex to parse these forms, the regex handles:
+ // $reg_name
+ // $reg_name+offset
+ // symbol_name+offset
+ //
+ // The important matching elements in the regex below are:
+ // 1: The reg name if there's no +offset
+ // 3: The symbol/reg name if there is an offset
+ // 4: +/-
+ // 5: The offset value.
static RegularExpression g_symbol_plus_offset_regex(
- "^(.*)([-\\+])[[:space:]]*(0x[0-9A-Fa-f]+|[0-9]+)[[:space:]]*$");
+ "^(\\$[^ +-]+)|(([^ +-]+)([-\\+])[[:space:]]*(0x[0-9A-Fa-f]+|[0-9]+)[[:space:]]*)$");
llvm::SmallVector<llvm::StringRef, 4> matches;
if (g_symbol_plus_offset_regex.Execute(sref, &matches)) {
uint64_t offset = 0;
- llvm::StringRef name = matches[1];
- llvm::StringRef sign = matches[2];
- llvm::StringRef str_offset = matches[3];
- if (!str_offset.getAsInteger(0, offset)) {
+ llvm::StringRef name;
+ if (!matches[1].empty())
+ name = matches[1];
+ else
+ name = matches[3];
+
+ llvm::StringRef sign = matches[4];
+ llvm::StringRef str_offset = matches[5];
+
+ // Some languages don't have a natural type for register values, but it
+ // is still useful to look them up here:
+ std::optional<lldb::addr_t> register_value;
+ StackFrame *frame = exe_ctx->GetFramePtr();
+ llvm::StringRef reg_name = name;
+ if (frame && reg_name.consume_front("$")) {
+ RegisterContextSP reg_ctx_sp = frame->GetRegisterContext();
+ if (reg_ctx_sp) {
+ const RegisterInfo *reg_info = reg_ctx_sp->GetRegisterInfoByName(reg_name);
+ if (reg_info) {
+ RegisterValue reg_val;
+ bool success = reg_ctx_sp->ReadRegister(reg_info, reg_val);
+ if (success && reg_val.GetType() != RegisterValue::eTypeInvalid) {
+ register_value = reg_val.GetAsUInt64(0, &success);
+ if (!success)
+ register_value.reset();
+ }
+ }
+ }
+ }
+ if (!str_offset.empty() && !str_offset.getAsInteger(0, offset)) {
Status error;
- addr = ToAddress(exe_ctx, name, LLDB_INVALID_ADDRESS, &error);
+ if (register_value)
+ addr = register_value.value();
+ else
+ addr = ToAddress(exe_ctx, name, LLDB_INVALID_ADDRESS, &error);
if (addr != LLDB_INVALID_ADDRESS) {
if (sign[0] == '+')
return addr + offset;
return addr - offset;
}
- }
+ } else if (register_value)
+ // In the case of register values, someone might just want to get the
+ // value in a language whose expression parser doesn't support registers.
+ return register_value.value();
}
if (error_ptr)
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
index 3d43ed3f99ff..3b601726388d 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
@@ -417,7 +417,7 @@ bool ClangExpressionSourceCode::GetText(
if (sc.comp_unit && sc.line_entry.IsValid()) {
DebugMacros *dm = sc.comp_unit->GetDebugMacros();
if (dm) {
- AddMacroState state(sc.line_entry.file, sc.line_entry.line);
+ AddMacroState state(sc.line_entry.GetFile(), sc.line_entry.line);
AddMacros(dm, sc.comp_unit, state, debug_macros_stream);
}
}
diff --git a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
index 10a1fe039189..d2d50152c07c 100644
--- a/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
+++ b/lldb/source/Plugins/Language/CPlusPlus/LibCxx.cpp
@@ -106,13 +106,13 @@ bool lldb_private::formatters::LibcxxFunctionSummaryProvider(
case CPPLanguageRuntime::LibCppStdFunctionCallableCase::Lambda:
stream.Printf(
" Lambda in File %s at Line %u",
- callable_info.callable_line_entry.file.GetFilename().GetCString(),
+ callable_info.callable_line_entry.GetFile().GetFilename().GetCString(),
callable_info.callable_line_entry.line);
break;
case CPPLanguageRuntime::LibCppStdFunctionCallableCase::CallableObject:
stream.Printf(
" Function in File %s at Line %u",
- callable_info.callable_line_entry.file.GetFilename().GetCString(),
+ callable_info.callable_line_entry.GetFile().GetFilename().GetCString(),
callable_info.callable_line_entry.line);
break;
case CPPLanguageRuntime::LibCppStdFunctionCallableCase::FreeOrMemberFunction:
@@ -1192,7 +1192,7 @@ bool lldb_private::formatters::LibcxxChronoWeekdaySummaryProvider(
return false;
const unsigned weekday = ptr_sp->GetValueAsUnsigned(0);
- if (weekday >= 0 && weekday < 7)
+ if (weekday < 7)
stream << "weekday=" << weekdays[weekday];
else
stream.Printf("weekday=%u", weekday);
diff --git a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
index 3e5ee6f66373..d3fc487aed43 100644
--- a/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
+++ b/lldb/source/Plugins/LanguageRuntime/ObjC/AppleObjCRuntime/AppleObjCRuntimeV2.cpp
@@ -3154,7 +3154,7 @@ AppleObjCRuntimeV2::TaggedPointerVendorExtended::GetClassDescriptor(
<< m_objc_debug_taggedpointer_ext_payload_lshift) >>
m_objc_debug_taggedpointer_ext_payload_rshift);
int64_t data_payload_signed =
- ((int64_t)((int64_t)unobfuscated
+ ((int64_t)((uint64_t)unobfuscated
<< m_objc_debug_taggedpointer_ext_payload_lshift) >>
m_objc_debug_taggedpointer_ext_payload_rshift);
diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
index bcf3a3274cf3..1caf93659956 100644
--- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
+++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
@@ -905,6 +905,11 @@ ConstString ObjectFileMachO::GetSegmentNameDWARF() {
return g_section_name;
}
+ConstString ObjectFileMachO::GetSegmentNameLLVM_COV() {
+ static ConstString g_section_name("__LLVM_COV");
+ return g_section_name;
+}
+
ConstString ObjectFileMachO::GetSectionNameEHFrame() {
static ConstString g_section_name_eh_frame("__eh_frame");
return g_section_name_eh_frame;
@@ -6145,6 +6150,13 @@ bool ObjectFileMachO::SectionIsLoadable(const Section *section) {
return false;
if (GetModule().get() != section->GetModule().get())
return false;
+ // firmware style binaries with llvm gcov segment do
+ // not have that segment mapped into memory.
+ if (section->GetName() == GetSegmentNameLLVM_COV()) {
+ const Strata strata = GetStrata();
+ if (strata == eStrataKernel || strata == eStrataRawImage)
+ return false;
+ }
// Be careful with __LINKEDIT and __DWARF segments
if (section->GetName() == GetSegmentNameLINKEDIT() ||
section->GetName() == GetSegmentNameDWARF()) {
diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.h b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.h
index 0a47f3a7dd18..55bc688126eb 100644
--- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.h
+++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.h
@@ -271,6 +271,7 @@ protected:
static lldb_private::ConstString GetSegmentNameOBJC();
static lldb_private::ConstString GetSegmentNameLINKEDIT();
static lldb_private::ConstString GetSegmentNameDWARF();
+ static lldb_private::ConstString GetSegmentNameLLVM_COV();
static lldb_private::ConstString GetSectionNameEHFrame();
llvm::MachO::dysymtab_command m_dysymtab;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 5f67658f86ea..49f13d2c89e3 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -693,6 +693,7 @@ llvm::DWARFDebugAbbrev *SymbolFileDWARF::DebugAbbrev() {
if (debug_abbrev_data.GetByteSize() == 0)
return nullptr;
+ ElapsedTime elapsed(m_parse_time);
auto abbr =
std::make_unique<llvm::DWARFDebugAbbrev>(debug_abbrev_data.GetAsLLVM());
llvm::Error error = abbr->parse();
@@ -3419,8 +3420,8 @@ static DWARFExpressionList GetExprListFromAtLocation(DWARFFormValue form_value,
if (DWARFFormValue::IsBlockForm(form_value.Form())) {
const DWARFDataExtractor &data = die.GetData();
- uint32_t block_offset = form_value.BlockData() - data.GetDataStart();
- uint32_t block_length = form_value.Unsigned();
+ uint64_t block_offset = form_value.BlockData() - data.GetDataStart();
+ uint64_t block_length = form_value.Unsigned();
return DWARFExpressionList(
module, DataExtractor(data, block_offset, block_length), die.GetCU());
}
@@ -3449,9 +3450,9 @@ GetExprListFromAtConstValue(DWARFFormValue form_value, ModuleSP module,
const DWARFDataExtractor &debug_info_data = die.GetData();
if (DWARFFormValue::IsBlockForm(form_value.Form())) {
// Retrieve the value as a block expression.
- uint32_t block_offset =
+ uint64_t block_offset =
form_value.BlockData() - debug_info_data.GetDataStart();
- uint32_t block_length = form_value.Unsigned();
+ uint64_t block_length = form_value.Unsigned();
return DWARFExpressionList(
module, DataExtractor(debug_info_data, block_offset, block_length),
die.GetCU());
@@ -4060,8 +4061,8 @@ CollectCallSiteParameters(ModuleSP module, DWARFDIE call_site_die) {
if (!DWARFFormValue::IsBlockForm(form_value.Form()))
return {};
auto data = child.GetData();
- uint32_t block_offset = form_value.BlockData() - data.GetDataStart();
- uint32_t block_length = form_value.Unsigned();
+ uint64_t block_offset = form_value.BlockData() - data.GetDataStart();
+ uint64_t block_length = form_value.Unsigned();
return DWARFExpressionList(
module, DataExtractor(data, block_offset, block_length),
child.GetCU());
@@ -4166,8 +4167,8 @@ SymbolFileDWARF::CollectCallEdges(ModuleSP module, DWARFDIE function_die) {
}
auto data = child.GetData();
- uint32_t block_offset = form_value.BlockData() - data.GetDataStart();
- uint32_t block_length = form_value.Unsigned();
+ uint64_t block_offset = form_value.BlockData() - data.GetDataStart();
+ uint64_t block_length = form_value.Unsigned();
call_target = DWARFExpressionList(
module, DataExtractor(data, block_offset, block_length),
child.GetCU());
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
index 6dd3eb3677b7..4bc2cfd60688 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
@@ -1233,7 +1233,7 @@ void SymbolFileDWARFDebugMap::FindTypes(const TypeQuery &query,
std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
ForEachSymbolFile([&](SymbolFileDWARF *oso_dwarf) -> bool {
oso_dwarf->FindTypes(query, results);
- return !results.Done(query); // Keep iterating if we aren't done.
+ return results.Done(query); // Keep iterating if we aren't done.
});
}
@@ -1391,7 +1391,7 @@ void SymbolFileDWARFDebugMap::ParseDeclsForContext(
lldb_private::CompilerDeclContext decl_ctx) {
ForEachSymbolFile([&](SymbolFileDWARF *oso_dwarf) -> bool {
oso_dwarf->ParseDeclsForContext(decl_ctx);
- return true; // Keep iterating
+ return false; // Keep iterating
});
}
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 3ac1cf91932c..ebcc3bc99a80 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -477,6 +477,7 @@ static void ParseLangArgs(LangOptions &Opts, InputKind IK, const char *triple) {
// Based on the base language, pick one.
switch (IK.getLanguage()) {
case clang::Language::Unknown:
+ case clang::Language::CIR:
case clang::Language::LLVM_IR:
case clang::Language::RenderScript:
llvm_unreachable("Invalid input kind!");
diff --git a/lldb/source/Symbol/CompileUnit.cpp b/lldb/source/Symbol/CompileUnit.cpp
index 1b3cd23d9400..ddeacf18e855 100644
--- a/lldb/source/Symbol/CompileUnit.cpp
+++ b/lldb/source/Symbol/CompileUnit.cpp
@@ -320,7 +320,7 @@ void CompileUnit::ResolveSymbolContext(
src_location_spec.GetColumn() ? std::optional<uint16_t>(line_entry.column)
: std::nullopt;
- SourceLocationSpec found_entry(line_entry.file, line_entry.line, column,
+ SourceLocationSpec found_entry(line_entry.GetFile(), line_entry.line, column,
inlines, exact);
while (line_idx != UINT32_MAX) {
diff --git a/lldb/source/Symbol/Function.cpp b/lldb/source/Symbol/Function.cpp
index fdc090355771..194f89bc51d8 100644
--- a/lldb/source/Symbol/Function.cpp
+++ b/lldb/source/Symbol/Function.cpp
@@ -289,7 +289,7 @@ void Function::GetStartLineSourceInfo(FileSpec &source_file,
if (line_table->FindLineEntryByAddress(GetAddressRange().GetBaseAddress(),
line_entry, nullptr)) {
line_no = line_entry.line;
- source_file = line_entry.file;
+ source_file = line_entry.GetFile();
}
}
}
@@ -311,7 +311,7 @@ void Function::GetEndLineSourceInfo(FileSpec &source_file, uint32_t &line_no) {
LineEntry line_entry;
if (line_table->FindLineEntryByAddress(scratch_addr, line_entry, nullptr)) {
line_no = line_entry.line;
- source_file = line_entry.file;
+ source_file = line_entry.GetFile();
}
}
diff --git a/lldb/source/Symbol/LineEntry.cpp b/lldb/source/Symbol/LineEntry.cpp
index 389f8dcb65d8..461399e0326e 100644
--- a/lldb/source/Symbol/LineEntry.cpp
+++ b/lldb/source/Symbol/LineEntry.cpp
@@ -14,12 +14,14 @@
using namespace lldb_private;
LineEntry::LineEntry()
- : range(), file(), is_start_of_statement(0), is_start_of_basic_block(0),
- is_prologue_end(0), is_epilogue_begin(0), is_terminal_entry(0) {}
+ : range(), file_sp(std::make_shared<SupportFile>()),
+ original_file_sp(std::make_shared<SupportFile>()),
+ is_start_of_statement(0), is_start_of_basic_block(0), is_prologue_end(0),
+ is_epilogue_begin(0), is_terminal_entry(0) {}
void LineEntry::Clear() {
range.Clear();
- file.Clear();
+ file_sp = std::make_shared<SupportFile>();
original_file_sp = std::make_shared<SupportFile>();
line = LLDB_INVALID_LINE_NUMBER;
column = 0;
@@ -35,6 +37,7 @@ bool LineEntry::IsValid() const {
}
bool LineEntry::DumpStopContext(Stream *s, bool show_fullpaths) const {
+ const FileSpec &file = file_sp->GetSpecOnly();
if (file) {
if (show_fullpaths)
file.Dump(s->AsRawOstream());
@@ -67,7 +70,7 @@ bool LineEntry::Dump(Stream *s, Target *target, bool show_file,
return false;
}
if (show_file)
- *s << ", file = " << file;
+ *s << ", file = " << GetFile();
if (line)
s->Printf(", line = %u", line);
if (column)
@@ -103,7 +106,7 @@ bool LineEntry::GetDescription(Stream *s, lldb::DescriptionLevel level,
Address::DumpStyleFileAddress);
}
- *s << ": " << file;
+ *s << ": " << GetFile();
if (line) {
s->Printf(":%u", line);
@@ -173,7 +176,7 @@ int LineEntry::Compare(const LineEntry &a, const LineEntry &b) {
if (a.column > b.column)
return +1;
- return FileSpec::Compare(a.file, b.file, true);
+ return FileSpec::Compare(a.GetFile(), b.GetFile(), true);
}
AddressRange LineEntry::GetSameLineContiguousAddressRange(
@@ -242,6 +245,6 @@ void LineEntry::ApplyFileMappings(lldb::TargetSP target_sp) {
// Apply any file remappings to our file.
if (auto new_file_spec = target_sp->GetSourcePathMap().FindFile(
original_file_sp->GetSpecOnly()))
- file = *new_file_spec;
+ file_sp->Update(*new_file_spec);
}
}
diff --git a/lldb/source/Symbol/LineTable.cpp b/lldb/source/Symbol/LineTable.cpp
index 444135f63bc0..06cf4f698316 100644
--- a/lldb/source/Symbol/LineTable.cpp
+++ b/lldb/source/Symbol/LineTable.cpp
@@ -288,8 +288,8 @@ bool LineTable::ConvertEntryAtIndexToLineEntry(uint32_t idx,
else
line_entry.range.SetByteSize(0);
- line_entry.file =
- m_comp_unit->GetSupportFiles().GetFileSpecAtIndex(entry.file_idx);
+ line_entry.file_sp = std::make_shared<SupportFile>(
+ m_comp_unit->GetSupportFiles().GetFileSpecAtIndex(entry.file_idx));
line_entry.original_file_sp =
m_comp_unit->GetSupportFiles().GetSupportFileAtIndex(entry.file_idx);
line_entry.line = entry.line;
diff --git a/lldb/source/Symbol/SymbolContext.cpp b/lldb/source/Symbol/SymbolContext.cpp
index 3c70b8d8743c..f368896fbad4 100644
--- a/lldb/source/Symbol/SymbolContext.cpp
+++ b/lldb/source/Symbol/SymbolContext.cpp
@@ -472,8 +472,8 @@ bool SymbolContext::GetParentOfInlinedScope(const Address &curr_frame_pc,
curr_inlined_block->GetInlinedFunctionInfo();
next_frame_pc = range.GetBaseAddress();
next_frame_sc.line_entry.range.GetBaseAddress() = next_frame_pc;
- next_frame_sc.line_entry.file =
- curr_inlined_block_inlined_info->GetCallSite().GetFile();
+ next_frame_sc.line_entry.file_sp = std::make_shared<SupportFile>(
+ curr_inlined_block_inlined_info->GetCallSite().GetFile());
next_frame_sc.line_entry.original_file_sp =
std::make_shared<SupportFile>(
curr_inlined_block_inlined_info->GetCallSite().GetFile());
diff --git a/lldb/source/Symbol/UnwindTable.cpp b/lldb/source/Symbol/UnwindTable.cpp
index 3c1a5187b110..11bedf3d6052 100644
--- a/lldb/source/Symbol/UnwindTable.cpp
+++ b/lldb/source/Symbol/UnwindTable.cpp
@@ -84,6 +84,51 @@ void UnwindTable::Initialize() {
}
}
+void UnwindTable::Update() {
+ if (!m_initialized)
+ return Initialize();
+
+ std::lock_guard<std::mutex> guard(m_mutex);
+
+ ObjectFile *object_file = m_module.GetObjectFile();
+ if (!object_file)
+ return;
+
+ if (!m_object_file_unwind_up)
+ m_object_file_unwind_up = object_file->CreateCallFrameInfo();
+
+ SectionList *sl = m_module.GetSectionList();
+ if (!sl)
+ return;
+
+ SectionSP sect = sl->FindSectionByType(eSectionTypeEHFrame, true);
+ if (!m_eh_frame_up && sect) {
+ m_eh_frame_up = std::make_unique<DWARFCallFrameInfo>(
+ *object_file, sect, DWARFCallFrameInfo::EH);
+ }
+
+ sect = sl->FindSectionByType(eSectionTypeDWARFDebugFrame, true);
+ if (!m_debug_frame_up && sect) {
+ m_debug_frame_up = std::make_unique<DWARFCallFrameInfo>(
+ *object_file, sect, DWARFCallFrameInfo::DWARF);
+ }
+
+ sect = sl->FindSectionByType(eSectionTypeCompactUnwind, true);
+ if (!m_compact_unwind_up && sect) {
+ m_compact_unwind_up =
+ std::make_unique<CompactUnwindInfo>(*object_file, sect);
+ }
+
+ sect = sl->FindSectionByType(eSectionTypeARMexidx, true);
+ if (!m_arm_unwind_up && sect) {
+ SectionSP sect_extab = sl->FindSectionByType(eSectionTypeARMextab, true);
+ if (sect_extab.get()) {
+ m_arm_unwind_up =
+ std::make_unique<ArmUnwindInfo>(*object_file, sect, sect_extab);
+ }
+ }
+}
+
UnwindTable::~UnwindTable() = default;
std::optional<AddressRange>
diff --git a/lldb/source/Target/StackFrame.cpp b/lldb/source/Target/StackFrame.cpp
index c29a71d92572..03a74f29e76e 100644
--- a/lldb/source/Target/StackFrame.cpp
+++ b/lldb/source/Target/StackFrame.cpp
@@ -1800,7 +1800,6 @@ void StackFrame::DumpUsingSettingsFormat(Stream *strm, bool show_unique,
return;
ExecutionContext exe_ctx(shared_from_this());
- StreamString s;
const FormatEntity::Entry *frame_format = nullptr;
Target *target = exe_ctx.GetTargetPtr();
@@ -1922,7 +1921,7 @@ bool StackFrame::GetStatus(Stream &strm, bool show_frame_info, bool show_source,
size_t num_lines =
target->GetSourceManager().DisplaySourceLinesWithLineNumbers(
- m_sc.line_entry.file, start_line, m_sc.line_entry.column,
+ m_sc.line_entry.GetFile(), start_line, m_sc.line_entry.column,
source_lines_before, source_lines_after, "->", &strm);
if (num_lines != 0)
have_source = true;
diff --git a/lldb/source/Target/StackFrameList.cpp b/lldb/source/Target/StackFrameList.cpp
index 2273e52e2e04..314b5e39c716 100644
--- a/lldb/source/Target/StackFrameList.cpp
+++ b/lldb/source/Target/StackFrameList.cpp
@@ -884,9 +884,9 @@ void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
GetFrameAtIndex(GetSelectedFrameIndex(DoNoSelectMostRelevantFrame)));
if (frame_sp) {
SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
- if (sc.line_entry.file)
+ if (sc.line_entry.GetFile())
m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
- sc.line_entry.file, sc.line_entry.line);
+ sc.line_entry.GetFile(), sc.line_entry.line);
}
}
}
diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp
index 4dfad23b56e2..412e44ede9c1 100644
--- a/lldb/source/Target/Thread.cpp
+++ b/lldb/source/Target/Thread.cpp
@@ -302,10 +302,10 @@ bool Thread::SetSelectedFrameByIndexNoisily(uint32_t frame_idx,
SymbolContext frame_sc(
frame_sp->GetSymbolContext(eSymbolContextLineEntry));
const Debugger &debugger = GetProcess()->GetTarget().GetDebugger();
- if (debugger.GetUseExternalEditor() && frame_sc.line_entry.file &&
+ if (debugger.GetUseExternalEditor() && frame_sc.line_entry.GetFile() &&
frame_sc.line_entry.line != 0) {
if (llvm::Error e = Host::OpenFileInExternalEditor(
- debugger.GetExternalEditor(), frame_sc.line_entry.file,
+ debugger.GetExternalEditor(), frame_sc.line_entry.GetFile(),
frame_sc.line_entry.line)) {
LLDB_LOG_ERROR(GetLog(LLDBLog::Host), std::move(e),
"OpenFileInExternalEditor failed: {0}");
@@ -1753,10 +1753,10 @@ size_t Thread::GetStatus(Stream &strm, uint32_t start_frame,
if (frame_sp) {
SymbolContext frame_sc(
frame_sp->GetSymbolContext(eSymbolContextLineEntry));
- if (frame_sc.line_entry.line != 0 && frame_sc.line_entry.file) {
+ if (frame_sc.line_entry.line != 0 && frame_sc.line_entry.GetFile()) {
if (llvm::Error e = Host::OpenFileInExternalEditor(
target->GetDebugger().GetExternalEditor(),
- frame_sc.line_entry.file, frame_sc.line_entry.line)) {
+ frame_sc.line_entry.GetFile(), frame_sc.line_entry.line)) {
LLDB_LOG_ERROR(GetLog(LLDBLog::Host), std::move(e),
"OpenFileInExternalEditor failed: {0}");
}
diff --git a/lldb/source/Target/TraceDumper.cpp b/lldb/source/Target/TraceDumper.cpp
index e92419e70b32..4ef8efc1a676 100644
--- a/lldb/source/Target/TraceDumper.cpp
+++ b/lldb/source/Target/TraceDumper.cpp
@@ -57,7 +57,7 @@ static bool FileLineAndColumnMatches(const LineEntry &a, const LineEntry &b) {
return false;
if (a.column != b.column)
return false;
- return a.file == b.file;
+ return a.GetFile() == b.GetFile();
}
/// Compare the symbol contexts of the provided \a SymbolInfo
@@ -396,7 +396,7 @@ public:
m_j.attribute(
"source",
ToOptionalString(
- item.symbol_info->sc.line_entry.file.GetPath().c_str()));
+ item.symbol_info->sc.line_entry.GetFile().GetPath().c_str()));
m_j.attribute("line", item.symbol_info->sc.line_entry.line);
m_j.attribute("column", item.symbol_info->sc.line_entry.column);
}
diff --git a/lldb/source/Utility/Scalar.cpp b/lldb/source/Utility/Scalar.cpp
index 5ad68065bce1..e94fd4596236 100644
--- a/lldb/source/Utility/Scalar.cpp
+++ b/lldb/source/Utility/Scalar.cpp
@@ -813,6 +813,48 @@ bool Scalar::ExtractBitfield(uint32_t bit_size, uint32_t bit_offset) {
return false;
}
+llvm::APFloat Scalar::CreateAPFloatFromAPSInt(lldb::BasicType basic_type) {
+ switch (basic_type) {
+ case lldb::eBasicTypeFloat:
+ return llvm::APFloat(
+ m_integer.isSigned()
+ ? llvm::APIntOps::RoundSignedAPIntToFloat(m_integer)
+ : llvm::APIntOps::RoundAPIntToFloat(m_integer));
+ case lldb::eBasicTypeDouble:
+ // No way to get more precision at the moment.
+ case lldb::eBasicTypeLongDouble:
+ return llvm::APFloat(
+ m_integer.isSigned()
+ ? llvm::APIntOps::RoundSignedAPIntToDouble(m_integer)
+ : llvm::APIntOps::RoundAPIntToDouble(m_integer));
+ default:
+ const llvm::fltSemantics &sem = APFloat::IEEEsingle();
+ return llvm::APFloat::getNaN(sem);
+ }
+}
+
+llvm::APFloat Scalar::CreateAPFloatFromAPFloat(lldb::BasicType basic_type) {
+ switch (basic_type) {
+ case lldb::eBasicTypeFloat: {
+ bool loses_info;
+ m_float.convert(llvm::APFloat::IEEEsingle(),
+ llvm::APFloat::rmNearestTiesToEven, &loses_info);
+ return m_float;
+ }
+ case lldb::eBasicTypeDouble:
+ // No way to get more precision at the moment.
+ case lldb::eBasicTypeLongDouble: {
+ bool loses_info;
+ m_float.convert(llvm::APFloat::IEEEdouble(),
+ llvm::APFloat::rmNearestTiesToEven, &loses_info);
+ return m_float;
+ }
+ default:
+ const llvm::fltSemantics &sem = APFloat::IEEEsingle();
+ return llvm::APFloat::getNaN(sem);
+ }
+}
+
bool lldb_private::operator==(Scalar lhs, Scalar rhs) {
// If either entry is void then we can just compare the types
if (lhs.m_type == Scalar::e_void || rhs.m_type == Scalar::e_void)
diff --git a/lldb/test/API/commands/target/modules/lookup/Makefile b/lldb/test/API/commands/target/modules/lookup/Makefile
new file mode 100644
index 000000000000..695335e068c0
--- /dev/null
+++ b/lldb/test/API/commands/target/modules/lookup/Makefile
@@ -0,0 +1,4 @@
+C_SOURCES := main.c
+CFLAGS_EXTRAS := -std=c99
+
+include Makefile.rules
diff --git a/lldb/test/API/commands/target/modules/lookup/TestImageLookupPCExpression.py b/lldb/test/API/commands/target/modules/lookup/TestImageLookupPCExpression.py
new file mode 100644
index 000000000000..9872e057cbbf
--- /dev/null
+++ b/lldb/test/API/commands/target/modules/lookup/TestImageLookupPCExpression.py
@@ -0,0 +1,27 @@
+"""
+Make sure that "target modules lookup -va $pc" works
+"""
+
+
+import lldb
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+
+class TestImageLookupPCInC(TestBase):
+ def test_sample_rename_this(self):
+ """There can be many tests in a test case - describe this test here."""
+ self.build()
+ self.main_source_file = lldb.SBFileSpec("main.c")
+ self.sample_test()
+
+ def sample_test(self):
+ """Make sure the address expression resolves to the right function"""
+
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
+ self, "Set a breakpoint here", self.main_source_file
+ )
+
+ self.expect("target modules lookup -va $pc", substrs=["doSomething"])
+ self.expect("target modules lookup -va $pc+4", substrs=["doSomething"])
+
diff --git a/lldb/test/API/commands/target/modules/lookup/main.c b/lldb/test/API/commands/target/modules/lookup/main.c
new file mode 100644
index 000000000000..afe962f30916
--- /dev/null
+++ b/lldb/test/API/commands/target/modules/lookup/main.c
@@ -0,0 +1,15 @@
+#include <stdio.h>
+
+void
+doSomething()
+{
+ printf ("Set a breakpoint here.\n");
+ printf ("Need a bit more code.\n");
+}
+
+int
+main()
+{
+ doSomething();
+ return 0;
+}
diff --git a/lldb/test/API/functionalities/type_find_first/Makefile b/lldb/test/API/functionalities/type_find_first/Makefile
index 3d0b98f13f3d..e027553c7a6b 100644
--- a/lldb/test/API/functionalities/type_find_first/Makefile
+++ b/lldb/test/API/functionalities/type_find_first/Makefile
@@ -1,2 +1,2 @@
-CXX_SOURCES := main.cpp
+CXX_SOURCES := main.cpp other.cpp
include Makefile.rules
diff --git a/lldb/test/API/functionalities/type_find_first/TestFindFirstType.py b/lldb/test/API/functionalities/type_find_first/TestFindFirstType.py
index 6347a35e72ea..b1c5659a324a 100644
--- a/lldb/test/API/functionalities/type_find_first/TestFindFirstType.py
+++ b/lldb/test/API/functionalities/type_find_first/TestFindFirstType.py
@@ -8,8 +8,6 @@ from lldbsuite.test import lldbutil
class TypeFindFirstTestCase(TestBase):
- NO_DEBUG_INFO_TESTCASE = True
-
def test_find_first_type(self):
"""
Test SBTarget::FindFirstType() and SBModule::FindFirstType() APIs.
@@ -19,19 +17,22 @@ class TypeFindFirstTestCase(TestBase):
basename, FindFirstType() could end up failing depending on which
type was found first in the debug info indexes. This test will
ensure this doesn't regress in the future.
+
+ The test also looks for a type defined in a different compilation unit
+ to verify that SymbolFileDWARFDebugMap searches each symbol file in a
+ module.
"""
self.build()
target = self.createTestTarget()
- # Test the SBTarget APIs for FindFirstType
- integer_type = target.FindFirstType("Integer::Point")
- self.assertTrue(integer_type.IsValid())
- float_type = target.FindFirstType("Float::Point")
- self.assertTrue(float_type.IsValid())
-
- # Test the SBModule APIs for FindFirstType
exe_module = target.GetModuleAtIndex(0)
self.assertTrue(exe_module.IsValid())
- integer_type = exe_module.FindFirstType("Integer::Point")
- self.assertTrue(integer_type.IsValid())
- float_type = exe_module.FindFirstType("Float::Point")
- self.assertTrue(float_type.IsValid())
+ # Test the SBTarget and SBModule APIs for FindFirstType
+ for api in [target, exe_module]:
+ integer_type = api.FindFirstType("Integer::Point")
+ self.assertTrue(integer_type.IsValid())
+ float_type = api.FindFirstType("Float::Point")
+ self.assertTrue(float_type.IsValid())
+ external_type = api.FindFirstType("OtherCompilationUnit::Type")
+ self.assertTrue(external_type.IsValid())
+ nonexistent_type = api.FindFirstType("NonexistentType")
+ self.assertFalse(nonexistent_type.IsValid())
diff --git a/lldb/test/API/functionalities/type_find_first/main.cpp b/lldb/test/API/functionalities/type_find_first/main.cpp
index f4e467286004..bbb060872a1e 100644
--- a/lldb/test/API/functionalities/type_find_first/main.cpp
+++ b/lldb/test/API/functionalities/type_find_first/main.cpp
@@ -10,8 +10,13 @@ struct Point {
};
} // namespace Float
+namespace OtherCompilationUnit {
+void Function();
+} // namespace OtherCompilationUnit
+
int main(int argc, char const *argv[]) {
Integer::Point ip = {2, 3};
Float::Point fp = {2.0, 3.0};
+ OtherCompilationUnit::Function();
return 0;
}
diff --git a/lldb/test/API/functionalities/type_find_first/other.cpp b/lldb/test/API/functionalities/type_find_first/other.cpp
new file mode 100644
index 000000000000..b91edcd8e1d7
--- /dev/null
+++ b/lldb/test/API/functionalities/type_find_first/other.cpp
@@ -0,0 +1,4 @@
+namespace OtherCompilationUnit {
+struct Type {};
+void Function() { Type typeIsActuallyUsed; }
+} // namespace OtherCompilationUnit \ No newline at end of file
diff --git a/lldb/test/CMakeLists.txt b/lldb/test/CMakeLists.txt
index 0ef2eb1c42ce..b6ec5bc4d819 100644
--- a/lldb/test/CMakeLists.txt
+++ b/lldb/test/CMakeLists.txt
@@ -31,23 +31,6 @@ if(LLDB_ENFORCE_STRICT_TEST_REQUIREMENTS)
endforeach()
endif()
-# The "pexpect" package should come from the system environment, not from the
-# LLDB tree. However, we delay the deletion of it from the tree in case
-# users/buildbots don't have the package yet and need some time to install it.
-# Windows is configured to skip all pexpect tests, and guards all
-# "import pexpect" calls, so we do not need pexpect installed there.
-if (NOT LLDB_TEST_USE_VENDOR_PACKAGES AND NOT WIN32)
- unset(PY_pexpect_FOUND CACHE)
- lldb_find_python_module(pexpect)
- if (NOT PY_pexpect_FOUND)
- message(FATAL_ERROR
- "Python module 'pexpect' not found. Please install it via pip or via "
- "your operating system's package manager. For a temporary workaround, "
- "use a version from the LLDB tree with "
- "`LLDB_TEST_USE_VENDOR_PACKAGES=ON`")
- endif()
-endif()
-
if(LLDB_BUILT_STANDALONE)
# In order to run check-lldb-* we need the correct map_config directives in
# llvm-lit. Because this is a standalone build, LLVM doesn't know about LLDB,
diff --git a/lldb/unittests/Core/ProgressReportTest.cpp b/lldb/unittests/Core/ProgressReportTest.cpp
index 1f993180fd83..f0d253be9bf6 100644
--- a/lldb/unittests/Core/ProgressReportTest.cpp
+++ b/lldb/unittests/Core/ProgressReportTest.cpp
@@ -22,7 +22,7 @@
using namespace lldb;
using namespace lldb_private;
-static std::chrono::milliseconds TIMEOUT(100);
+static std::chrono::milliseconds TIMEOUT(500);
class ProgressReportTest : public ::testing::Test {
public:
@@ -56,7 +56,8 @@ protected:
DebuggerSP m_debugger_sp;
ListenerSP m_listener_sp;
- SubsystemRAII<FileSystem, HostInfo, PlatformMacOSX> subsystems;
+ SubsystemRAII<FileSystem, HostInfo, PlatformMacOSX, ProgressManager>
+ subsystems;
};
TEST_F(ProgressReportTest, TestReportCreation) {
@@ -210,3 +211,37 @@ TEST_F(ProgressReportTest, TestOverlappingEvents) {
// initial report.
EXPECT_EQ(data->GetID(), expected_progress_id);
}
+
+TEST_F(ProgressReportTest, TestProgressManagerDisjointReports) {
+ ListenerSP listener_sp =
+ CreateListenerFor(Debugger::eBroadcastBitProgressCategory);
+ EventSP event_sp;
+ const ProgressEventData *data;
+ uint64_t expected_progress_id;
+
+ { Progress progress("Coalesced report 1", "Starting report 1"); }
+ { Progress progress("Coalesced report 1", "Starting report 2"); }
+ { Progress progress("Coalesced report 1", "Starting report 3"); }
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+ expected_progress_id = data->GetID();
+
+ EXPECT_EQ(data->GetDetails(), "");
+ EXPECT_FALSE(data->IsFinite());
+ EXPECT_FALSE(data->GetCompleted());
+ EXPECT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetMessage(), "Coalesced report 1");
+
+ ASSERT_TRUE(listener_sp->GetEvent(event_sp, TIMEOUT));
+ data = ProgressEventData::GetEventDataFromEvent(event_sp.get());
+
+ EXPECT_EQ(data->GetID(), expected_progress_id);
+ EXPECT_EQ(data->GetDetails(), "");
+ EXPECT_FALSE(data->IsFinite());
+ EXPECT_TRUE(data->GetCompleted());
+ EXPECT_EQ(data->GetTotal(), Progress::kNonDeterministicTotal);
+ EXPECT_EQ(data->GetMessage(), "Coalesced report 1");
+
+ ASSERT_FALSE(listener_sp->GetEvent(event_sp, TIMEOUT));
+}
diff --git a/lldb/unittests/SymbolFile/PDB/SymbolFilePDBTests.cpp b/lldb/unittests/SymbolFile/PDB/SymbolFilePDBTests.cpp
index f237dd63ab1c..4379ffac9d74 100644
--- a/lldb/unittests/SymbolFile/PDB/SymbolFilePDBTests.cpp
+++ b/lldb/unittests/SymbolFile/PDB/SymbolFilePDBTests.cpp
@@ -102,7 +102,7 @@ protected:
EXPECT_EQ(line, entry.line);
EXPECT_EQ(address, entry.range.GetBaseAddress());
- EXPECT_TRUE(FileSpecMatchesAsBaseOrFull(spec, entry.file));
+ EXPECT_TRUE(FileSpecMatchesAsBaseOrFull(spec, entry.GetFile()));
}
bool ContainsCompileUnit(const SymbolContextList &sc_list,
diff --git a/lldb/unittests/Utility/ScalarTest.cpp b/lldb/unittests/Utility/ScalarTest.cpp
index 29a4bcd356f1..8d957d16593e 100644
--- a/lldb/unittests/Utility/ScalarTest.cpp
+++ b/lldb/unittests/Utility/ScalarTest.cpp
@@ -402,3 +402,61 @@ TEST(ScalarTest, TruncOrExtendTo) {
S.TruncOrExtendTo(16, false);
EXPECT_EQ(S.UInt128(APInt()), APInt(16, 0xffffu));
}
+
+TEST(ScalarTest, APFloatConstructor) {
+ llvm::APFloat my_single(llvm::APFloatBase::IEEEsingle(), "3.14159");
+ llvm::APFloat my_double(llvm::APFloatBase::IEEEdouble(), "3.14159");
+ Scalar S(my_single);
+ Scalar D(my_double);
+
+ EXPECT_EQ(S.GetType(), Scalar::e_float);
+ EXPECT_EQ(D.GetType(), Scalar::e_float);
+ ASSERT_TRUE(S != D);
+}
+
+TEST(ScalarTest, CreateAPFloats) {
+ llvm::APFloat ap_float(llvm::APFloatBase::IEEEsingle(), "3.14159");
+ llvm::APFloat ap_nan = llvm::APFloat::getNaN(llvm::APFloat::IEEEsingle());
+ llvm::APSInt int1("12");
+ llvm::APSInt int2("-4");
+ Scalar I1(int1);
+ Scalar I2(int2);
+ Scalar F(ap_float);
+
+ llvm::APFloat out1_float = I1.CreateAPFloatFromAPSInt(lldb::eBasicTypeFloat);
+ llvm::APFloat out1_double =
+ I1.CreateAPFloatFromAPSInt(lldb::eBasicTypeDouble);
+ llvm::APFloat out1_longdouble =
+ I1.CreateAPFloatFromAPSInt(lldb::eBasicTypeLongDouble);
+ llvm::APFloat out1_nan =
+ I1.CreateAPFloatFromAPSInt(lldb::eBasicTypeFloatComplex);
+ EXPECT_TRUE(!out1_float.isNegative());
+ EXPECT_TRUE(!out1_double.isNegative());
+ EXPECT_TRUE(out1_double.bitwiseIsEqual(out1_longdouble));
+ EXPECT_FALSE(out1_double.bitwiseIsEqual(out1_float));
+ EXPECT_TRUE(out1_nan.bitwiseIsEqual(ap_nan));
+
+ llvm::APFloat out2_float = I2.CreateAPFloatFromAPSInt(lldb::eBasicTypeFloat);
+ llvm::APFloat out2_double =
+ I2.CreateAPFloatFromAPSInt(lldb::eBasicTypeDouble);
+ llvm::APFloat out2_longdouble =
+ I2.CreateAPFloatFromAPSInt(lldb::eBasicTypeLongDouble);
+ llvm::APFloat out2_nan =
+ I2.CreateAPFloatFromAPSInt(lldb::eBasicTypeFloatComplex);
+ EXPECT_TRUE(out2_float.isNegative());
+ EXPECT_TRUE(out2_double.isNegative());
+ EXPECT_TRUE(out2_double.bitwiseIsEqual(out2_longdouble));
+ EXPECT_FALSE(out2_double.bitwiseIsEqual(out2_float));
+ EXPECT_TRUE(out2_nan.bitwiseIsEqual(ap_nan));
+
+ llvm::APFloat out3_float = F.CreateAPFloatFromAPFloat(lldb::eBasicTypeFloat);
+ llvm::APFloat out3_double =
+ F.CreateAPFloatFromAPFloat(lldb::eBasicTypeDouble);
+ llvm::APFloat out3_longdouble =
+ F.CreateAPFloatFromAPFloat(lldb::eBasicTypeLongDouble);
+ llvm::APFloat out3_nan =
+ F.CreateAPFloatFromAPFloat(lldb::eBasicTypeFloatComplex);
+ EXPECT_TRUE(out3_double.bitwiseIsEqual(out3_longdouble));
+ EXPECT_FALSE(out3_double.bitwiseIsEqual(out3_float));
+ EXPECT_TRUE(out3_nan.bitwiseIsEqual(ap_nan));
+}
diff --git a/llvm/bindings/ocaml/debuginfo/debuginfo_ocaml.c b/llvm/bindings/ocaml/debuginfo/debuginfo_ocaml.c
index a793e893524f..fbe45c0c1e0b 100644
--- a/llvm/bindings/ocaml/debuginfo/debuginfo_ocaml.c
+++ b/llvm/bindings/ocaml/debuginfo/debuginfo_ocaml.c
@@ -972,7 +972,7 @@ value llvm_dibuild_create_parameter_variable_bytecode(value *argv, int arg) {
value llvm_dibuild_insert_declare_before_native(value Builder, value Storage,
value VarInfo, value Expr,
value DebugLoc, value Instr) {
- LLVMValueRef Value = LLVMDIBuilderInsertDeclareBefore(
+ LLVMDbgRecordRef Value = LLVMDIBuilderInsertDeclareBefore(
DIBuilder_val(Builder), Value_val(Storage), Metadata_val(VarInfo),
Metadata_val(Expr), Metadata_val(DebugLoc), Value_val(Instr));
return to_val(Value);
@@ -992,7 +992,7 @@ value llvm_dibuild_insert_declare_before_bytecode(value *argv, int arg) {
value llvm_dibuild_insert_declare_at_end_native(value Builder, value Storage,
value VarInfo, value Expr,
value DebugLoc, value Block) {
- LLVMValueRef Value = LLVMDIBuilderInsertDeclareAtEnd(
+ LLVMDbgRecordRef Value = LLVMDIBuilderInsertDeclareAtEnd(
DIBuilder_val(Builder), Value_val(Storage), Metadata_val(VarInfo),
Metadata_val(Expr), Metadata_val(DebugLoc), BasicBlock_val(Block));
return to_val(Value);
@@ -1012,3 +1012,14 @@ value llvm_dibuild_expression(value Builder, value Addr) {
return to_val(LLVMDIBuilderCreateExpression(
DIBuilder_val(Builder), (uint64_t *)Op_val(Addr), Wosize_val(Addr)));
}
+
+/* llmodule -> bool */
+value llvm_is_new_dbg_info_format(value Module) {
+ return Val_bool(LLVMIsNewDbgInfoFormat(Module_val(Module)));
+}
+
+/* llmodule -> bool -> unit */
+value llvm_set_is_new_dbg_info_format(value Module, value UseNewFormat) {
+ LLVMSetIsNewDbgInfoFormat(Module_val(Module), Bool_val(UseNewFormat));
+ return Val_unit;
+}
diff --git a/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.ml b/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.ml
index a6d74ed0eb81..8bb5edb17a2c 100644
--- a/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.ml
+++ b/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.ml
@@ -599,7 +599,7 @@ external dibuild_insert_declare_before :
expr:Llvm.llmetadata ->
location:Llvm.llmetadata ->
instr:Llvm.llvalue ->
- Llvm.llvalue
+ Llvm.lldbgrecord
= "llvm_dibuild_insert_declare_before_bytecode" "llvm_dibuild_insert_declare_before_native"
external dibuild_insert_declare_at_end :
@@ -609,7 +609,7 @@ external dibuild_insert_declare_at_end :
expr:Llvm.llmetadata ->
location:Llvm.llmetadata ->
block:Llvm.llbasicblock ->
- Llvm.llvalue
+ Llvm.lldbgrecord
= "llvm_dibuild_insert_declare_at_end_bytecode" "llvm_dibuild_insert_declare_at_end_native"
external dibuild_expression :
@@ -617,3 +617,9 @@ external dibuild_expression :
Int64.t array ->
Llvm.llmetadata
= "llvm_dibuild_expression"
+
+external is_new_dbg_info_format : Llvm.llmodule -> bool
+ = "llvm_is_new_dbg_info_format"
+
+external set_is_new_dbg_info_format : Llvm.llmodule -> bool -> unit
+ = "llvm_set_is_new_dbg_info_format"
diff --git a/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.mli b/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.mli
index e92778b07589..7c7882ccce85 100644
--- a/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.mli
+++ b/llvm/bindings/ocaml/debuginfo/llvm_debuginfo.mli
@@ -659,7 +659,7 @@ val dibuild_insert_declare_before :
expr:Llvm.llmetadata ->
location:Llvm.llmetadata ->
instr:Llvm.llvalue ->
- Llvm.llvalue
+ Llvm.lldbgrecord
(** [dibuild_insert_declare_before] Insert a new llvm.dbg.declare
intrinsic call before the given instruction [instr]. *)
@@ -670,7 +670,7 @@ val dibuild_insert_declare_at_end :
expr:Llvm.llmetadata ->
location:Llvm.llmetadata ->
block:Llvm.llbasicblock ->
- Llvm.llvalue
+ Llvm.lldbgrecord
(** [dibuild_insert_declare_at_end] Insert a new llvm.dbg.declare
intrinsic call at the end of basic block [block]. If [block]
has a terminator instruction, the intrinsic is inserted
@@ -680,3 +680,9 @@ val dibuild_expression : lldibuilder -> Int64.t array -> Llvm.llmetadata
(** [dibuild_expression] Create a new descriptor for the specified variable
which has a complex address expression for its address.
See LLVMDIBuilderCreateExpression. *)
+
+val is_new_dbg_info_format : Llvm.llmodule -> bool
+(** [is_new_dbg_info_format] See LLVMIsNewDbgInfoFormat *)
+
+val set_is_new_dbg_info_format : Llvm.llmodule -> bool -> unit
+(** [set_is_new_dbg_info_format] See LLVMSetIsNewDbgInfoFormat *)
diff --git a/llvm/bindings/ocaml/llvm/llvm.ml b/llvm/bindings/ocaml/llvm/llvm.ml
index 057798fc0cea..003fd750cd9f 100644
--- a/llvm/bindings/ocaml/llvm/llvm.ml
+++ b/llvm/bindings/ocaml/llvm/llvm.ml
@@ -12,6 +12,7 @@ type llmodule
type llmetadata
type lltype
type llvalue
+type lldbgrecord
type lluse
type llbasicblock
type llbuilder
@@ -528,6 +529,7 @@ external value_name : llvalue -> string = "llvm_value_name"
external set_value_name : string -> llvalue -> unit = "llvm_set_value_name"
external dump_value : llvalue -> unit = "llvm_dump_value"
external string_of_llvalue : llvalue -> string = "llvm_string_of_llvalue"
+external string_of_lldbgrecord : lldbgrecord -> string = "llvm_string_of_lldbgrecord"
external replace_all_uses_with : llvalue -> llvalue -> unit
= "llvm_replace_all_uses_with"
diff --git a/llvm/bindings/ocaml/llvm/llvm.mli b/llvm/bindings/ocaml/llvm/llvm.mli
index e0febb79a2b6..93540c619efb 100644
--- a/llvm/bindings/ocaml/llvm/llvm.mli
+++ b/llvm/bindings/ocaml/llvm/llvm.mli
@@ -36,6 +36,9 @@ type lltype
This type covers a wide range of subclasses. *)
type llvalue
+(** Non-instruction debug info record. See the [llvm::DbgRecord] class.*)
+type lldbgrecord
+
(** Used to store users and usees of values. See the [llvm::Use] class. *)
type lluse
@@ -793,6 +796,9 @@ val dump_value : llvalue -> unit
(** [string_of_llvalue v] returns a string describing the value [v]. *)
val string_of_llvalue : llvalue -> string
+(** [string_of_lldbgrecord r] returns a string describing the DbgRecord [r]. *)
+val string_of_lldbgrecord : lldbgrecord -> string
+
(** [replace_all_uses_with old new] replaces all uses of the value [old]
with the value [new]. See the method [llvm::Value::replaceAllUsesWith]. *)
val replace_all_uses_with : llvalue -> llvalue -> unit
diff --git a/llvm/bindings/ocaml/llvm/llvm_ocaml.c b/llvm/bindings/ocaml/llvm/llvm_ocaml.c
index 55679f218b30..6d08d78b8445 100644
--- a/llvm/bindings/ocaml/llvm/llvm_ocaml.c
+++ b/llvm/bindings/ocaml/llvm/llvm_ocaml.c
@@ -800,6 +800,15 @@ value llvm_string_of_llvalue(value M) {
return ValueStr;
}
+/* lldbgrecord -> string */
+value llvm_string_of_lldbgrecord(value Record) {
+ char *ValueCStr = LLVMPrintDbgRecordToString(DbgRecord_val(Record));
+ value ValueStr = caml_copy_string(ValueCStr);
+ LLVMDisposeMessage(ValueCStr);
+
+ return ValueStr;
+}
+
/* llvalue -> llvalue -> unit */
value llvm_replace_all_uses_with(value OldVal, value NewVal) {
LLVMReplaceAllUsesWith(Value_val(OldVal), Value_val(NewVal));
diff --git a/llvm/bindings/ocaml/llvm/llvm_ocaml.h b/llvm/bindings/ocaml/llvm/llvm_ocaml.h
index a3791744e647..ec60d6a5dad6 100644
--- a/llvm/bindings/ocaml/llvm/llvm_ocaml.h
+++ b/llvm/bindings/ocaml/llvm/llvm_ocaml.h
@@ -53,6 +53,7 @@ void *from_val_array(value Elements);
#define Metadata_val(v) ((LLVMMetadataRef)from_val(v))
#define Type_val(v) ((LLVMTypeRef)from_val(v))
#define Value_val(v) ((LLVMValueRef)from_val(v))
+#define DbgRecord_val(v) ((LLVMDbgRecordRef)from_val(v))
#define Use_val(v) ((LLVMUseRef)from_val(v))
#define BasicBlock_val(v) ((LLVMBasicBlockRef)from_val(v))
#define MemoryBuffer_val(v) ((LLVMMemoryBufferRef)from_val(v))
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 29ea5005c0c4..22c1d1f186ea 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1449,11 +1449,52 @@ The AMDGPU backend supports the following LLVM IR attributes.
the frame. This is an internal detail of how LDS variables are lowered,
language front ends should not set this attribute.
+ "amdgpu-gds-size" Bytes expected to be allocated at the start of GDS memory at entry.
+
+ "amdgpu-git-ptr-high" The hard-wired high half of the address of the global information table
+ for AMDPAL OS type. 0xffffffff represents no hard-wired high half, since
+ current hardware only allows a 16 bit value.
+
+ "amdgpu-32bit-address-high-bits" Assumed high 32-bits for 32-bit address spaces which are really truncated
+ 64-bit addresses (i.e., addrspace(6))
+
+ "amdgpu-color-export" Indicates shader exports color information if set to 1.
+ Defaults to 1 for :ref:`amdgpu_ps <amdgpu-cc>`, and 0 for other calling
+ conventions. Determines the necessity and type of null exports when a shader
+ terminates early by killing lanes.
+
+ "amdgpu-depth-export" Indicates shader exports depth information if set to 1. Determines the
+ necessity and type of null exports when a shader terminates early by killing
+ lanes. A depth-only shader will export to depth channel when no null export
+ target is available (GFX11+).
+
+ "InitialPSInputAddr" Set the initial value of the `spi_ps_input_addr` register for
+ :ref:`amdgpu_ps <amdgpu-cc>` shaders. Any bits enabled by this value will
+ be enabled in the final register value.
+
+ "amdgpu-wave-priority-threshold" VALU instruction count threshold for adjusting wave priority. If exceeded,
+ temporarily raise the wave priority at the start of the shader function
+ until its last VMEM instructions to allow younger waves to issue their VMEM
+ instructions as well.
+
+ "amdgpu-memory-bound" Set internally by backend
+
+ "amdgpu-wave-limiter" Set internally by backend
+
+ "amdgpu-unroll-threshold" Set base cost threshold preference for loop unrolling within this function,
+ default is 300. Actual threshold may be varied by per-loop metadata or
+ reduced by heuristics.
+
"amdgpu-max-num-workgroups"="x,y,z" Specify the maximum number of work groups for the kernel dispatch in the
X, Y, and Z dimensions. Generated by the ``amdgpu_max_num_work_groups``
CLANG attribute [CLANG-ATTR]_. Clang only emits this attribute when all
the three numbers are >= 1.
+ "amdgpu-no-agpr" Indicates the function will not require allocating AGPRs. This is only
+ relevant on subtargets with AGPRs. The behavior is undefined if a
+ function which requires AGPRs is reached through any function marked
+ with this attribute.
+
======================================= ==========================================================
Calling Conventions
diff --git a/llvm/docs/CommandGuide/llvm-debuginfo-analyzer.rst b/llvm/docs/CommandGuide/llvm-debuginfo-analyzer.rst
index 54622cc61fdf..5b3200a4b782 100644
--- a/llvm/docs/CommandGuide/llvm-debuginfo-analyzer.rst
+++ b/llvm/docs/CommandGuide/llvm-debuginfo-analyzer.rst
@@ -2267,6 +2267,10 @@ EXIT STATUS
:program:`llvm-debuginfo-analyzer` returns 0 if the input files were
parsed and printed successfully. Otherwise, it returns 1.
+LIMITATIONS AND KNOWN ISSUES
+----------------------------
+See :download:`Limitations <../../tools/llvm-debuginfo-analyzer/README.md>`.
+
SEE ALSO
--------
:manpage:`llvm-dwarfdump`
diff --git a/llvm/docs/CommandGuide/llvm-objcopy.rst b/llvm/docs/CommandGuide/llvm-objcopy.rst
index 9d0cb7ad1195..985d16eb11cf 100644
--- a/llvm/docs/CommandGuide/llvm-objcopy.rst
+++ b/llvm/docs/CommandGuide/llvm-objcopy.rst
@@ -464,6 +464,19 @@ them.
Read a list of symbols from <filename> and change their visibility to the
specified value. Visibility values: default, internal, hidden, protected.
+.. option:: --skip-symbol <symbol>
+
+ Do not change the parameters of symbol ``<symbol>`` when executing other
+ options that can change the symbol's name, binding or visibility.
+
+.. option:: --skip-symbols <filename>
+
+ Do not change the parameters of symbols named in the file ``<filename>`` when
+ executing other options that can change the symbol's name, binding or
+ visibility. In the file, each line represents a single symbol, with leading
+ and trailing whitespace ignored, as is anything following a '#'.
+ Can be specified multiple times to read names from multiple files.
+
.. option:: --split-dwo <dwo-file>
Equivalent to running :program:`llvm-objcopy` with :option:`--extract-dwo` and
diff --git a/llvm/docs/DirectX/DXILArchitecture.rst b/llvm/docs/DirectX/DXILArchitecture.rst
index d6712bea4f77..32b1e72deae7 100644
--- a/llvm/docs/DirectX/DXILArchitecture.rst
+++ b/llvm/docs/DirectX/DXILArchitecture.rst
@@ -61,6 +61,19 @@ on the utilities described in "Common Code" above in order to share
logic with both the DirectX backend and with Clang's codegen of HLSL
support as much as possible.
+The DirectX Intrinsic Expansion Pass
+====================================
+There are intrinsics that don't map directly to DXIL Ops. In some cases
+an intrinsic needs to be expanded to a set of LLVM IR instructions. In
+other cases an intrinsic needs modifications to the arguments or return
+values of a DXIL Op. The `DXILIntrinsicExpansion` pass handles all
+the cases where our intrinsics don't have a one to one mapping. This
+pass may also be used when the expansion is specific to DXIL to keep
+implementation details out of CodeGen. Finally, there is an expectation
+that we maintain vector types through this pass. Therefore, best
+practice would be to avoid scalarization in this pass.
+
+
The DirectX Backend
===================
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index ac6217d08e6a..cae2c21b80d7 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -939,6 +939,25 @@ The _CONVERGENT variant corresponds to an LLVM IR intrinsic marked `convergent`.
Unlike SelectionDAG, there is no _VOID variant. Both of these are permitted
to have zero, one, or multiple results.
+G_TRAP, G_DEBUGTRAP, G_UBSANTRAP
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Represents :ref:`llvm.trap <llvm.trap>`, :ref:`llvm.debugtrap <llvm.debugtrap>`
+and :ref:`llvm.ubsantrap <llvm.ubsantrap>` that generate a target dependent
+trap instructions.
+
+.. code-block:: none
+
+ G_TRAP
+
+.. code-block:: none
+
+ G_DEBUGTRAP
+
+.. code-block:: none
+
+ G_UBSANTRAP 12
+
Variadic Arguments
------------------
diff --git a/llvm/docs/InstCombineContributorGuide.md b/llvm/docs/InstCombineContributorGuide.md
new file mode 100644
index 000000000000..2416fd0920f6
--- /dev/null
+++ b/llvm/docs/InstCombineContributorGuide.md
@@ -0,0 +1,556 @@
+# InstCombine contributor guide
+
+This guide lays out a series of rules that contributions to InstCombine should
+follow. **Following these rules will results in much faster PR approvals.**
+
+## Tests
+
+### Precommit tests
+
+Tests for new optimizations or miscompilation fixes should be pre-committed.
+This means that you first commit the test with CHECK lines showing the behavior
+*without* your change. Your actual change will then only contain CHECK line
+diffs relative to that baseline.
+
+This means that pull requests should generally contain two commits: First,
+one commit adding new tests with baseline check lines. Second, a commit with
+functional changes and test diffs.
+
+If the second commit in your PR does not contain test diffs, you did something
+wrong. Either you made a mistake when generating CHECK lines, or your tests are
+not actually affected by your patch.
+
+Exceptions: When fixing assertion failures or infinite loops, do not pre-commit
+tests.
+
+### Use `update_test_checks.py`
+
+CHECK lines should be generated using the `update_test_checks.py` script. Do
+**not** manually edit check lines after using it.
+
+Be sure to use the correct opt binary when using the script. For example, if
+your build directory is `build`, then you'll want to run:
+
+```sh
+llvm/utils/update_test_checks.py --opt-binary build/bin/opt \
+ llvm/test/Transforms/InstCombine/the_test.ll
+```
+
+Exceptions: Hand-written CHECK lines are allowed for debuginfo tests.
+
+### General testing considerations
+
+Place all tests relating to a transform into a single file. If you are adding
+a regression test for a crash/miscompile in an existing transform, find the
+file where the existing tests are located. A good way to do that is to comment
+out the transform and see which tests fail.
+
+Make tests minimal. Only test exactly the pattern being transformed. If your
+original motivating case is a larger pattern that your fold enables to
+optimize in some non-trivial way, you may add it as well -- however, the bulk
+of the test coverage should be minimal.
+
+Give tests short, but meaningful names. Don't call them `@test1`, `@test2` etc.
+For example, a test checking multi-use behavior of a fold involving the
+addition of two selects might be called `@add_of_selects_multi_use`.
+
+Add representative tests for each test category (discussed below), but don't
+test all combinations of everything. If you have multi-use tests, and you have
+commuted tests, you shouldn't also add commuted multi-use tests.
+
+Prefer to keep bit-widths for tests low to improve performance of proof checking using alive2. Using `i8` is better than `i128` where possible.
+
+### Add negative tests
+
+Make sure to add tests for which your transform does **not** apply. Start with
+one of the test cases that succeeds and then create a sequence of negative
+tests, such that **exactly one** different pre-condition of your transform is
+not satisfied in each test.
+
+### Add multi-use tests
+
+Add multi-use tests that ensures your transform does not increase instruction
+count if some instructions have additional uses. The standard pattern is to
+introduce extra uses with function calls:
+
+```llvm
+declare void @use(i8)
+
+define i8 @add_mul_const_multi_use(i8 %x) {
+ %add = add i8 %x, 1
+ call void @use(i8 %add)
+ %mul = mul i8 %add, 3
+ ret i8 %mul
+}
+```
+
+Exceptions: For transform that only produce one instruction, multi-use tests
+may be omitted.
+
+### Add commuted tests
+
+If the transform involves commutative operations, add tests with commuted
+(swapped) operands.
+
+Make sure that the operand order stays intact in the CHECK lines of your
+pre-commited tests. You should not see something like this:
+
+```llvm
+; CHECK-NEXT: [[OR:%.*]] = or i8 [[X]], [[Y]]
+; ...
+%or = or i8 %y, %x
+```
+
+If this happens, you may need to change one of the operands to have higher
+complexity (include the "thwart" comment in that case):
+
+```llvm
+%y2 = mul i8 %y, %y ; thwart complexity-based canonicalization
+%or = or i8 %y, %x
+```
+
+### Add vector tests
+
+When possible, it is recommended to add at least one test that uses vectors
+instead of scalars.
+
+For patterns that include constants, we distinguish three kinds of tests.
+The first are "splat" vectors, where all the vector elements are the same.
+These tests *should* usually fold without additional effort.
+
+```llvm
+define <2 x i8> @add_mul_const_vec_splat(<2 x i8> %x) {
+ %add = add <2 x i8> %x, <i8 1, i8 1>
+ %mul = mul <2 x i8> %add, <i8 3, i8 3>
+ ret <2 x i8> %mul
+}
+```
+
+A minor variant is to replace some of the splat elements with poison. These
+will often also fold without additional effort.
+
+```llvm
+define <2 x i8> @add_mul_const_vec_splat_poison(<2 x i8> %x) {
+ %add = add <2 x i8> %x, <i8 1, i8 poison>
+ %mul = mul <2 x i8> %add, <i8 3, i8 poison>
+ ret <2 x i8> %mul
+}
+```
+
+Finally, you can have non-splat vectors, where the vector elements are not
+the same:
+
+```llvm
+define <2 x i8> @add_mul_const_vec_non_splat(<2 x i8> %x) {
+ %add = add <2 x i8> %x, <i8 1, i8 5>
+ %mul = mul <2 x i8> %add, <i8 3, i8 6>
+ ret <2 x i8> %mul
+}
+```
+
+Non-splat vectors will often not fold by default. You should **not** try to
+make them fold, unless doing so does not add **any** additional complexity.
+You should still add the test though, even if it does not fold.
+
+### Flag tests
+
+If your transform involves instructions that can have poison-generating flags,
+such as `nuw` and `nsw` on `add`, you should test how these interact with the
+transform.
+
+If your transform *requires* a certain flag for correctness, make sure to add
+negative tests missing the required flag.
+
+If your transform doesn't require flags for correctness, you should have tests
+for preservation behavior. If the input instructions have certain flags, are
+they preserved in the output instructions, if it is valid to preserve them?
+(This depends on the transform. Check with alive2.)
+
+The same also applies to fast-math-flags (FMF). In that case, please always
+test specific flags like `nnan`, `nsz` or `reassoc`, rather than the umbrella
+`fast` flag.
+
+### Other tests
+
+The test categories mentioned above are non-exhaustive. There may be more tests
+to be added, depending on the instructions involved in the transform. Some
+examples:
+
+ * For folds involving memory accesses like load/store, check that scalable vectors and non-byte-size types (like i3) are handled correctly. Also check that volatile/atomic are handled.
+ * For folds that interact with the bitwidth in some non-trivial way, check an illegal type like i13. Also confirm that the transform is correct for i1.
+ * For folds that involve phis, you may want to check that the case of multiple incoming values from one block is handled correctly.
+
+## Proofs
+
+Your pull request description should contain one or more
+[alive2 proofs](https://alive2.llvm.org/ce/) for the correctness of the
+proposed transform.
+
+### Basics
+
+Proofs are written using LLVM IR, by specifying a `@src` and `@tgt` function.
+It is possible to include multiple proofs in a single file by giving the src
+and tgt functions matching suffixes.
+
+For example, here is a pair of proofs that both `(x-y)+y` and `(x+y)-y` can
+be simplified to `x` ([online](https://alive2.llvm.org/ce/z/MsPPGz)):
+
+```llvm
+define i8 @src_add_sub(i8 %x, i8 %y) {
+ %add = add i8 %x, %y
+ %sub = sub i8 %add, %y
+ ret i8 %sub
+}
+
+define i8 @tgt_add_sub(i8 %x, i8 %y) {
+ ret i8 %x
+}
+
+
+define i8 @src_sub_add(i8 %x, i8 %y) {
+ %sub = sub i8 %x, %y
+ %add = add i8 %sub, %y
+ ret i8 %add
+}
+
+define i8 @tgt_sub_add(i8 %x, i8 %y) {
+ ret i8 %x
+}
+```
+
+### Use generic values in proofs
+
+Proofs should operate on generic values, rather than specific constants, to the degree that this is possible.
+
+For example, if we want to fold `X s/ C s< X` to `X s> 0`, the following would
+be a *bad* proof:
+
+```llvm
+; Don't do this!
+define i1 @src(i8 %x) {
+ %div = sdiv i8 %x, 123
+ %cmp = icmp slt i8 %div, %x
+ ret i1 %cmp
+}
+
+define i1 @tgt(i8 %x) {
+ %cmp = icmp sgt i8 %x, 0
+ ret i1 %cmp
+}
+```
+
+This is because it only proves that the transform is correct for the specific
+constant 123. Maybe there are some constants for which the transform is
+incorrect?
+
+The correct way to write this proof is as follows
+([online](https://alive2.llvm.org/ce/z/acjwb6)):
+
+```llvm
+define i1 @src(i8 %x, i8 %C) {
+ %precond = icmp ne i8 %C, 1
+ call void @llvm.assume(i1 %precond)
+ %div = sdiv i8 %x, %C
+ %cmp = icmp slt i8 %div, %x
+ ret i1 %cmp
+}
+
+define i1 @tgt(i8 %x, i8 %C) {
+ %cmp = icmp sgt i8 %x, 0
+ ret i1 %cmp
+}
+```
+
+Note that the `@llvm.assume` intrinsic is used to specify pre-conditions for
+the transform. In this case, the proof will fail unless we specify `C != 1` as
+a pre-condition.
+
+It should be emphasized that there is, in general, no expectation that the
+IR in the proofs will be transformed by the implemented fold. In the above
+example, the transform would only apply if `%C` is actually a constant, but we
+need to use non-constants in the proof.
+
+### Common pre-conditions
+
+Here are some examples of common preconditions.
+
+```llvm
+; %x is non-negative:
+%nonneg = icmp sgt i8 %x, -1
+call void @llvm.assume(i1 %nonneg)
+
+; %x is a power of two:
+%ctpop = call i8 @llvm.ctpop.i8(i8 %x)
+%pow2 = icmp eq i8 %x, 1
+call void @llvm.assume(i1 %pow2)
+
+; %x is a power of two or zero:
+%ctpop = call i8 @llvm.ctpop.i8(i8 %x)
+%pow2orzero = icmp ult i8 %x, 2
+call void @llvm.assume(i1 %pow2orzero)
+
+; Adding %x and %y does not overflow in a signed sense:
+%wo = call { i8, i1 } @llvm.sadd.with.overflow(i8 %x, i8 %y)
+%ov = extractvalue { i8, i1 } %wo, 1
+%ov.not = xor i1 %ov, true
+call void @llvm.assume(i1 %ov.not)
+```
+
+### Timeouts
+
+Alive2 proofs will sometimes produce a timeout with the following message:
+
+```
+Alive2 timed out while processing your query.
+There are a few things you can try:
+
+- remove extraneous instructions, if any
+
+- reduce variable widths, for example to i16, i8, or i4
+
+- add the --disable-undef-input command line flag, which
+ allows Alive2 to assume that arguments to your IR are not
+ undef. This is, in general, unsound: it can cause Alive2
+ to miss bugs.
+```
+
+This is good advice, follow it!
+
+Reducing the bitwidth usually helps. For floating point numbers, you can use
+the `half` type for bitwidth reduction purposes. For pointers, you can reduce
+the bitwidth by specifying a custom data layout:
+
+```llvm
+; For 16-bit pointers
+target datalayout = "p:16:16"
+```
+
+If reducing the bitwidth does not help, try `-disable-undef-input`. This will
+often significantly improve performance, but also implies that the correctness
+of the transform with `undef` values is no longer verified. This is usually
+fine if the transform does not increase the number of uses of any value.
+
+Finally, it's possible to build alive2 locally and use `-smt-to=<m>` to verify
+the proof with a larger timeout. If you don't want to do this (or it still
+does not work), please submit the proof you have despite the timeout.
+
+## Implementation
+
+### Real-world usefulness
+
+There is a very large number of transforms that *could* be implemented, but
+only a tiny fraction of them are useful for real-world code.
+
+Transforms that do not have real-world usefulness provide *negative* value to
+the LLVM project, by taking up valuable reviewer time, increasing code
+complexity and increasing compile-time overhead.
+
+We do not require explicit proof of real-world usefulness for every transform
+-- in most cases the usefulness is fairly "obvious". However, the question may
+come up for complex or unusual folds. Keep this in mind when chosing what you
+work on.
+
+In particular, fixes for fuzzer-generated missed optimization reports will
+likely be rejected if there is no evidence of real-world usefulness.
+
+### Pick the correct optimization pass
+
+There are a number of passes and utilities in the InstCombine family, and it
+is important to pick the right place when implementing a fold.
+
+ * `ConstantFolding`: For folding instructions with constant arguments to a constant. (Mainly relevant for intrinsics.)
+ * `ValueTracking`: For analyzing instructions, e.g. for known bits, non-zero, etc. Tests should usually use `-passes=instsimplify`.
+ * `InstructionSimplify`: For folds that do not create new instructions (either fold to existing value or constant).
+ * `InstCombine`: For folds that create or modify instructions.
+ * `AggressiveInstCombine`: For folds that are expensive, or violate InstCombine requirements.
+ * `VectorCombine`: For folds of vector operations that require target-dependent cost-modelling.
+
+Sometimes, folds that logically belong in InstSimplify are placed in InstCombine instead, for example because they are too expensive, or because they are structurally simpler to implement in InstCombine.
+
+For example, if a fold produces new instructions in some cases but returns an existing value in others, it may be preferable to keep all cases in InstCombine, rather than trying to split them among InstCombine and InstSimplify.
+
+### Canonicalization and target-independence
+
+InstCombine is a target-independent canonicalization pass. This means that it
+tries to bring IR into a "canonical form" that other optimizations (both inside
+and outside of InstCombine) can rely on. For this reason, the chosen canonical
+form needs to be the same for all targets, and not depend on target-specific
+cost modelling.
+
+In many cases, "canonicalization" and "optimization" coincide. For example, if
+we convert `x * 2` into `x << 1`, this both makes the IR more canonical
+(because there is now only one way to express the same operation, rather than
+two) and faster (because shifts will usually have lower latency than
+multiplies).
+
+However, there are also canonicalizations that don't serve any direct
+optimization purpose. For example, InstCombine will canonicalize non-strict
+predicates like `ule` to strict predicates like `ult`. `icmp ule i8 %x, 7`
+becomes `icmp ult i8 %x, 8`. This is not an optimization in any meaningful
+sense, but it does reduce the number of cases that other transforms need to
+handle.
+
+If some canonicalization is not profitable for a specific target, then a reverse
+transform needs to be added in the backend. Patches to disable specific
+InstCombine transforms on certain targets, or to drive them using
+target-specific cost-modelling, **will not be accepted**. The only permitted
+target-dependence is on DataLayout and TargetLibraryInfo.
+
+The use of TargetTransformInfo is only allowed for hooks for target-specific
+intrinsics, such as `TargetTransformInfo::instCombineIntrinsic()`. These are
+already inherently target-dependent anyway.
+
+For vector-specific transforms that require cost-modelling, the VectorCombine
+pass can be used instead. In very rare circumstances, if there are no other
+alternatives, target-dependent transforms may be accepted into
+AggressiveInstCombine.
+
+### PatternMatch
+
+Many transforms make use of the matching infrastructure defined in
+[PatternMatch.h](https://github.com/llvm/llvm-project/blame/main/llvm/include/llvm/IR/PatternMatch.h).
+
+Here is a typical usage example:
+
+```
+// Fold (A - B) + B and B + (A - B) to A.
+Value *A, *B;
+if (match(V, m_c_Add(m_Sub(m_Value(A), m_Value(B)), m_Deferred(B))))
+ return A;
+```
+
+And another:
+
+```
+// Fold A + C1 == C2 to A == C1+C2
+Value *A;
+if (match(V, m_ICmp(Pred, m_Add(m_Value(A), m_APInt(C1)), m_APInt(C2))) &&
+ ICmpInst::isEquality(Pred))
+ return Builder.CreateICmp(Pred, A,
+ ConstantInt::get(A->getType(), *C1 + *C2));
+```
+
+Some common matchers are:
+
+ * `m_Value(A)`: Match any value and write it into `Value *A`.
+ * `m_Specific(A)`: Check that the operand equals A. Use this if A is
+ assigned **outside** the pattern.
+ * `m_Deferred(A)`: Check that the operand equals A. Use this if A is
+ assigned **inside** the pattern, for example via `m_Value(A)`.
+ * `m_APInt(C)`: Match a scalar integer constant or splat vector constant into
+ `const APInt *C`. Does not permit undef/poison values.
+ * `m_ImmConstant(C)`: Match any non-constant-expression constant into
+ `Constant *C`.
+ * `m_Constant(C)`: Match any constant into `Constant *C`. Don't use this unless
+ you know what you're doing.
+ * `m_Add(M1, M2)`, `m_Sub(M1, M2)`, etc: Match an add/sub/etc where the first
+ operand matches M1 and the second M2.
+ * `m_c_Add(M1, M2)`, etc: Match an add commutatively. The operands must match
+ either M1 and M2 or M2 and M1. Most instruction matchers have a commutative
+ variant.
+ * `m_ICmp(Pred, M1, M2)` and `m_c_ICmp(Pred, M1, M2)`: Match an icmp, writing
+ the predicate into `IcmpInst::Predicate Pred`. If the commutative version
+ is used, and the operands match in order M2, M1, then `Pred` will be the
+ swapped predicate.
+ * `m_OneUse(M)`: Check that the value only has one use, and also matches M.
+ For example `m_OneUse(m_Add(...))`. See the next section for more
+ information.
+
+See the header for the full list of available matchers.
+
+### InstCombine APIs
+
+InstCombine transforms are handled by `visitXYZ()` methods, where XYZ
+corresponds to the root instruction of your transform. If the outermost
+instruction of the pattern you are matching is an icmp, the fold will be
+located somewhere inside `visitICmpInst()`.
+
+The return value of the visit method is an instruction. You can either return
+a new instruction, in which case it will be inserted before the old one, and
+uses of the old one will be replaced by it. Or you can return the original
+instruction to indicate that *some* kind of change has been made. Finally, a
+nullptr return value indicates that no change occurred.
+
+For example, if your transform produces a single new icmp instruction, you could
+write the following:
+
+```
+if (...)
+ return new ICmpInst(Pred, X, Y);
+```
+
+In this case the main InstCombine loop takes care of inserting the instruction
+and replacing uses of the old instruction.
+
+Alternatively, you can also write it like this:
+
+```
+if (...)
+ return replaceInstUsesWith(OrigI, Builder.CreateICmp(Pred, X, Y));
+```
+
+In this case `IRBuilder` will insert the instruction and `replaceInstUsesWith()`
+will replace the uses of the old instruction, and return it to indicate that
+a change occurred.
+
+Both forms are equivalent, and you can use whichever is more convenient in
+context. For example, it's common that folds are inside helper functions that
+return `Value *` and then `replaceInstUsesWith()` is invoked on the result of
+that helper.
+
+InstCombine makes use of a worklist, which needs to be correctly updated during
+transforms. This usually happens automatically, but there are some things to
+keep in mind:
+
+ * Don't use the `Value::replaceAllUsesWith()` API. Use InstCombine's
+ `replaceInstUsesWith()` helper instead.
+ * Don't use the `Instruction::eraseFromParent()` API. Use InstCombine's
+ `eraseInstFromFunction()` helper instead. (Explicitly erasing instruction
+ is usually not necessary, as side-effect free instructions without users
+ are automatically removed.)
+ * Apart from the "directly return an instruction" pattern above, use IRBUilder
+ to create all instruction. Do not manually create and insert them.
+ * When replacing operands or uses of instructions, use `replaceOperand()`
+ and `replaceUse()` instead of `setOperand()`.
+
+### Multi-use handling
+
+Transforms should usually not increase the total number of instructions. This
+is not a hard requirement: For example, it is usually worthwhile to replace a
+single division instruction with multiple other instructions.
+
+For example, if you have a transform that replaces two instructions, with two
+other instructions, this is (usually) only profitable if *both* the original
+instructions can be removed. To ensure that both instructions are removed, you
+need to add a one-use check for the inner instruction.
+
+One-use checks can be performed using the `m_OneUse()` matcher, or the
+`V->hasOneUse()` method.
+
+### Generalization
+
+Transforms can both be too specific (only handling some odd subset of patterns,
+leading to unexpected optimization cliffs) and too general (introducing
+complexity to handle cases with no real-world relevance). The right level of
+generality is quite subjective, so this section only provides some broad
+guidelines.
+
+ * Avoid transforms that are hardcoded to specific constants. Try to figure
+ out what the general rule for arbitrary constants is.
+ * Add handling for conjugate patterns. For example, if you implement a fold
+ for `icmp eq`, you almost certainly also want to support `icmp ne`, with the
+ inverse result. Similarly, if you implement a pattern for `and` of `icmp`s,
+ you should also handle the de-Morgan conjugate using `or`.
+ * Handle non-splat vector constants if doing so is free, but do not add
+ handling for them if it adds any additional complexity to the code.
+ * Do not handle non-canonical patterns, unless there is a specific motivation
+ to do so. For example, it may sometimes be worthwhile to handle a pattern
+ that would normally be converted into a different canonical form, but can
+ still occur in multi-use scenarios. This is fine to do if there is specific
+ real-world motivation, but you should not go out of your way to do this
+ otherwise.
+ * Sometimes the motivating pattern uses a constant value with certain
+ properties, but the fold can be generalized to non-constant values by making
+ use of ValueTracking queries. Whether this makes sense depends on the case,
+ but it's usually a good idea to only handle the constant pattern first, and
+ then generalize later if it seems useful.
diff --git a/llvm/docs/InstrProfileFormat.rst b/llvm/docs/InstrProfileFormat.rst
index 2069b87a245a..3b33c09f8c7a 100644
--- a/llvm/docs/InstrProfileFormat.rst
+++ b/llvm/docs/InstrProfileFormat.rst
@@ -150,6 +150,13 @@ Header
Records the in-memory address of name section. Not used except for raw profile
reader error checking.
+``NumVTables``
+ Records the number of instrumented vtable entries in the binary. Used for
+ `type profiling`_.
+
+``VNamesSize``
+ Records the byte size in the virtual table names section. Used for `type profiling`_.
+
``ValueKindLast``
Records the number of value kinds. Macro `VALUE_PROF_KIND`_ defines the value
kinds with a description of the kind.
@@ -323,6 +330,8 @@ for the design.
.. _`Modified Condition/Decision Coverage`: https://en.wikipedia.org/wiki/Modified_condition/decision_coverage
.. _`Bitmap RFC`: https://discourse.llvm.org/t/rfc-source-based-mc-dc-code-coverage/59244
+.. _`function names`:
+
Names
^^^^^^
@@ -333,6 +342,37 @@ Function names serve as keys in the PGO data hash table when raw profiles are
converted into indexed profiles. They are also crucial for ``llvm-profdata`` to
show the profiles in a human-readable way.
+Virtual Table Profile Data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This section is used for `type profiling`_. Each entry corresponds to one virtual
+table and is defined by the following C++ struct
+
+.. code-block:: c++
+
+ struct VTableProfData {
+ // The start address of the vtable, collected at runtime.
+ uint64_t StartAddress;
+ // The byte size of the vtable. `StartAddress` and `ByteSize` specifies an address range to look up.
+ uint32_t ByteSize;
+ // The hash of vtable's (PGO) name
+ uint64_t MD5HashOfName;
+ };
+
+At profile use time, the compiler looks up a profiled address in the sorted vtable
+address ranges and maps the address to a specific vtable through hashed name.
+
+Virtual Table Names
+^^^^^^^^^^^^^^^^^^^^
+
+This section is similar to `function names`_ section above, except it contains the PGO
+names of profiled virtual tables. It's a standalone section such that raw profile
+readers could directly find each name set by accessing the corresponding profile
+data section.
+
+This section is stored in raw profiles such that `llvm-profdata` could show the
+profiles in a human-readable way.
+
Value Profile Data
^^^^^^^^^^^^^^^^^^^^
@@ -360,6 +400,10 @@ profiles generated by older tools or compilers.
General Storage Layout
-----------------------
+The ASCII art depicts the general storage layout of indexed profiles.
+Specifically, the indexed profile header describes the byte offset of individual
+payload sections.
+
::
+-----------------------+---+
@@ -369,55 +413,49 @@ General Storage Layout
+-----------------------+ |
| HashType | H
+-----------------------+ E
- +-------| HashOffset | A
- | +-----------------------+ D
- +-----------| MemProfOffset | E
- | | +-----------------------+ R
- | | +--| BinaryIdOffset | |
- | | | +-----------------------+ |
- +---------------| TemporalProf- | |
- | | | | | TracesOffset | |
- | | | | +-----------------------+---+
- | | | | | Profile Summary | |
- | | | | +-----------------------+ P
- | | +------>| Function data | A
- | | | +-----------------------+ Y
- | +---------->| MemProf profile data | L
- | | +-----------------------+ O
- | +->| Binary Ids | A
+ | Byte Offset | A
+ +------ | of section A | D
+ | +-----------------------+ E
+ | | Byte Of fset | R
+ +-----------| of section B | |
+ | | +-----------------------+ |
+ | | | ... | |
+ | | +-----------------------+ |
+ | | | Byte Offset | |
+ +---------------| of section Z | |
+ | | | +-----------------------+---+
+ | | | | Profile Summary | |
+ | | | +-----------------------+ P
+ | | +------>| Section A | A
+ | | +-----------------------+ Y
+ | +---------->| Section B | L
+ | +-----------------------+ O
+ | | ... | A
| +-----------------------+ D
- +-------------->| Temporal profiles | |
+ +-------------->| Section Z | |
+-----------------------+---+
-Header
---------
+.. note::
-``Magic``
- The purpose of the magic number is to be able to tell if the profile is an
- indexed profile.
+ Profile summary section is at the beginning of payload. It's right after the
+ header so its position is implicitly known after reading the header.
-``Version``
- Similar to raw profile version, the lower 32 bits specify the version of the
- indexed profile and the most significant 32 bits are reserved to specify the
- variant types of the profile.
+Header
+--------
-``HashType``
- The hashing scheme for on-disk hash table keys. Only MD5 hashing is used as of
- writing.
+The `Header struct`_ is the source of truth and struct fields should explain
+what's in the header. At a high level, `*Offset` fields record section byte
+offsets, which are used by readers to locate interesting sections and skip
+uninteresting ones.
-``HashOffset``
- An on-disk hash table stores the per-function profile records. This field records
- the offset of this hash table's metadata (i.e., the number of buckets and
- entries), which follows right after the payload of the entire hash table.
+.. note::
-``MemProfOffset``
- Records the byte offset of MemProf profiling data.
+ To maintain backward compatibility of the indexed profiles, existing fields
+ shouldn't be deleted from struct definition; the field order shouldn't be
+ modified. New fields should be appended.
-``BinaryIdOffset``
- Records the byte offset of binary id sections.
+.. _`Header struct`: https://github.com/llvm/llvm-project/blob/1a2960bab6381f2b288328e2371829b460ac020c/llvm/include/llvm/ProfileData/InstrProf.h#L1053-L1080
-``TemporalProfTracesOffset``
- Records the byte offset of temporal profiles.
Payload Sections
------------------
@@ -428,6 +466,8 @@ This section is right after profile header. It stores the serialized profile
summary. For context-sensitive IR-based instrumentation PGO, this section stores
an additional profile summary corresponding to the context-sensitive profiles.
+.. _`function data`:
+
Function data
^^^^^^^^^^^^^^^^^^
This section stores functions and their profiling data as an on-disk hash table.
@@ -455,6 +495,16 @@ Temporal Profile Traces
The section is used to carry on temporal profile information from raw profiles.
See `temporal profiling`_ for the design.
+Virtual Table Names
+^^^^^^^^^^^^^^^^^^^^
+This section is used to store the names of vtables from raw profile in the indexed
+profile.
+
+Unlike function names which are stored as keys of `function data`_ hash table,
+vtable names need to be stored in a standalone section in indexed profiles.
+This way, `llvm-profdata` could show the profiled vtable information in a
+human-readable way.
+
Profile Data Usage
=======================================
@@ -478,3 +528,4 @@ based profile data. For supported usages, check out `llvm-profdata documentation
.. _`single-byte counters`: https://discourse.llvm.org/t/rfc-single-byte-counters-for-source-based-code-coverage/75685
.. _`binary profile correlation`: https://discourse.llvm.org/t/rfc-add-binary-profile-correlation-to-not-load-profile-metadata-sections-into-memory-at-runtime/74565
.. _`binary id`: https://lists.llvm.org/pipermail/llvm-dev/2021-June/151154.html
+.. _`type profiling`: https://discourse.llvm.org/t/rfc-dynamic-type-profiling-and-optimizations-in-llvm/74600
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 773eb756ed71..1d4ff5238226 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -2571,7 +2571,7 @@ are grouped into a single :ref:`attribute group <attrgrp>`.
``sanitize_memtag``
This attribute indicates that the global variable should have AArch64 memory
tags (MTE) instrumentation applied to it. This attribute causes the
- suppression of certain optimisations, like GlobalMerge, as well as ensuring
+ suppression of certain optimizations, like GlobalMerge, as well as ensuring
extra directives are emitted in the assembly and extra bits of metadata are
placed in the object file so that the linker can ensure the accesses are
protected by MTE. This attribute is added by clang when
@@ -3638,7 +3638,7 @@ floating-point transformations.
``contract``
Allow floating-point contraction (e.g. fusing a multiply followed by an
- addition into a fused multiply-and-add). This does not enable reassociating
+ addition into a fused multiply-and-add). This does not enable reassociation
to form arbitrary contractions. For example, ``(a*b) + (c*d) + e`` can not
be transformed into ``(a*b) + ((c*d) + e)`` to create two fma operations.
@@ -5622,34 +5622,106 @@ occurs on.
Metadata
========
-LLVM IR allows metadata to be attached to instructions and global objects in the
-program that can convey extra information about the code to the optimizers and
-code generator. One example application of metadata is source-level
-debug information. There are two metadata primitives: strings and nodes.
+LLVM IR allows metadata to be attached to instructions and global objects in
+the program that can convey extra information about the code to the optimizers
+and code generator.
-Metadata does not have a type, and is not a value. If referenced from a
-``call`` instruction, it uses the ``metadata`` type.
+There are two metadata primitives: strings and nodes. There are
+also specialized nodes which have a distinguished name and a set of named
+arguments.
+
+.. note::
+
+ One example application of metadata is source-level debug information,
+ which is currently the only user of specialized nodes.
+
+Metadata does not have a type, and is not a value.
+
+A value of non-\ ``metadata`` type can be used in a metadata context using the
+syntax '``<type> <value>``'.
+
+All other metadata is identified in syntax as starting with an exclamation
+point ('``!``').
+
+Metadata may be used in the following value contexts by using the ``metadata``
+type:
+
+- Arguments to certain intrinsic functions, as described in their specification.
+- Arguments to the ``catchpad``/``cleanuppad`` instructions.
+
+.. note::
+
+ Metadata can be "wrapped" in a ``MetadataAsValue`` so it can be referenced
+ in a value context: ``MetadataAsValue`` is-a ``Value``.
+
+ A typed value can be "wrapped" in ``ValueAsMetadata`` so it can be
+ referenced in a metadata context: ``ValueAsMetadata`` is-a ``Metadata``.
+
+ There is no explicit syntax for a ``ValueAsMetadata``, and instead
+ the fact that a type identifier cannot begin with an exclamation point
+ is used to resolve ambiguity.
+
+ A ``metadata`` type implies a ``MetadataAsValue``, and when followed with a
+ '``<type> <value>``' pair it wraps the typed value in a ``ValueAsMetadata``.
+
+ For example, the first argument
+ to this call is a ``MetadataAsValue(ValueAsMetadata(Value))``:
+
+ .. code-block:: llvm
-All metadata are identified in syntax by an exclamation point ('``!``').
+ call void @llvm.foo(metadata i32 1)
+
+ Whereas the first argument to this call is a ``MetadataAsValue(MDNode)``:
+
+ .. code-block:: llvm
+
+ call void @llvm.foo(metadata !0)
+
+ The first element of this ``MDTuple`` is a ``MDNode``:
+
+ .. code-block:: llvm
+
+ !{!0}
+
+ And the first element of this ``MDTuple`` is a ``ValueAsMetadata(Value)``:
+
+ .. code-block:: llvm
+
+ !{i32 1}
.. _metadata-string:
-Metadata Nodes and Metadata Strings
------------------------------------
+Metadata Strings (``MDString``)
+-------------------------------
+
+.. FIXME Either fix all references to "MDString" in the docs, or make that
+ identifier a formal part of the document.
A metadata string is a string surrounded by double quotes. It can
contain any character by escaping non-printable characters with
"``\xx``" where "``xx``" is the two digit hex code. For example:
"``!"test\00"``".
-Metadata nodes are represented with notation similar to structure
-constants (a comma separated list of elements, surrounded by braces and
-preceded by an exclamation point). Metadata nodes can have any values as
+.. note::
+
+ A metadata string is metadata, but is not a metadata node.
+
+.. _metadata-node:
+
+Metadata Nodes (``MDNode``)
+---------------------------
+
+.. FIXME Either fix all references to "MDNode" in the docs, or make that
+ identifier a formal part of the document.
+
+Metadata tuples are represented with notation similar to structure
+constants: a comma separated list of elements, surrounded by braces and
+preceded by an exclamation point. Metadata nodes can have any values as
their operand. For example:
.. code-block:: llvm
- !{ !"test\00", i32 10}
+ !{!"test\00", i32 10}
Metadata nodes that aren't uniqued use the ``distinct`` keyword. For example:
@@ -5676,6 +5748,12 @@ intrinsic is using three metadata arguments:
call void @llvm.dbg.value(metadata !24, metadata !25, metadata !26)
+
+.. FIXME Attachments cannot be ValueAsMetadata, but we don't have a
+ particularly clear way to refer to ValueAsMetadata without getting into
+ implementation details. Ideally the restriction would be explicit somewhere,
+ though?
+
Metadata can be attached to an instruction. Here metadata ``!21`` is attached
to the ``add`` instruction using the ``!dbg`` identifier:
@@ -6309,7 +6387,7 @@ valid debug intrinsic.
!5 = !DIExpression(DW_OP_constu, 42, DW_OP_stack_value)
DIAssignID
-""""""""""""
+""""""""""
``DIAssignID`` nodes have no operands and are always distinct. They are used to
link together `@llvm.dbg.assign` intrinsics (:ref:`debug
@@ -6324,7 +6402,13 @@ Assignment Tracking <AssignmentTracking.html>`_ for more info.
!2 = distinct !DIAssignID()
DIArgList
-""""""""""""
+"""""""""
+
+.. FIXME In the implementation this is not a "node", but as it can only appear
+ inline in a function context that distinction isn't observable anyway. Even
+ if it is not required, it would be nice to be more clear about what is a
+ "node", and what that actually means. The names in the implementation could
+ also be updated to mirror whatever we decide here.
``DIArgList`` nodes hold a list of constant or SSA value references. These are
used in :ref:`debug intrinsics<dbg_intrinsics>` (currently only in
@@ -6340,7 +6424,7 @@ inlined, and cannot appear in named metadata.
metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus))
DIFlags
-"""""""""""""""
+"""""""
These flags encode various properties of DINodes.
@@ -6416,6 +6500,46 @@ within the file where the label is declared.
!2 = !DILabel(scope: !0, name: "foo", file: !1, line: 7)
+DICommonBlock
+"""""""""""""
+
+``DICommonBlock`` nodes represent Fortran common blocks. The ``scope:`` field
+is mandatory and points to a :ref:`DILexicalBlockFile`, a
+:ref:`DILexicalBlock`, or a :ref:`DISubprogram`. The ``declaration:``,
+``name:``, ``file:``, and ``line:`` fields are optional.
+
+DIModule
+""""""""
+
+``DIModule`` nodes represent a source language module, for example, a Clang
+module, or a Fortran module. The ``scope:`` field is mandatory and points to a
+:ref:`DILexicalBlockFile`, a :ref:`DILexicalBlock`, or a :ref:`DISubprogram`.
+The ``name:`` field is mandatory. The ``configMacros:``, ``includePath:``,
+``apinotes:``, ``file:``, ``line:``, and ``isDecl:`` fields are optional.
+
+DIStringType
+""""""""""""
+
+``DIStringType`` nodes represent a Fortran ``CHARACTER(n)`` type, with a
+dynamic length and location encoded as an expression.
+The ``tag:`` field is optional and defaults to ``DW_TAG_string_type``. The ``name:``,
+``stringLength:``, ``stringLengthExpression``, ``stringLocationExpression:``,
+``size:``, ``align:``, and ``encoding:`` fields are optional.
+
+If not present, the ``size:`` and ``align:`` fields default to the value zero.
+
+The length in bits of the string is specified by the first of the following
+fields present:
+
+- ``stringLength:``, which points to a ``DIVariable`` whose value is the string
+ length in bits.
+- ``stringLengthExpression:``, which points to a ``DIExpression`` which
+ computes the length in bits.
+- ``size``, which contains the literal length in bits.
+
+The ``stringLocationExpression:`` points to a ``DIExpression`` which describes
+the "data location" of the string object, if present.
+
'``tbaa``' Metadata
^^^^^^^^^^^^^^^^^^^
@@ -7365,7 +7489,7 @@ matches the ``llvm.loop.parallel_accesses`` list.
If all memory-accessing instructions in a loop have
``llvm.access.group`` metadata that each refer to one of the access
groups of a loop's ``llvm.loop.parallel_accesses`` metadata, then the
-loop has no loop carried memory dependences and is considered to be a
+loop has no loop carried memory dependencies and is considered to be a
parallel loop.
Note that if not all memory access instructions belong to an access
@@ -11295,6 +11419,9 @@ Syntax:
::
<result> = trunc <ty> <value> to <ty2> ; yields ty2
+ <result> = trunc nsw <ty> <value> to <ty2> ; yields ty2
+ <result> = trunc nuw <ty> <value> to <ty2> ; yields ty2
+ <result> = trunc nuw nsw <ty> <value> to <ty2> ; yields ty2
Overview:
"""""""""
@@ -11318,6 +11445,11 @@ and converts the remaining bits to ``ty2``. Since the source size must
be larger than the destination size, ``trunc`` cannot be a *no-op cast*.
It will always truncate bits.
+If the ``nuw`` keyword is present, and any of the truncated bits are zero,
+the result is a :ref:`poison value <poisonvalues>`. If the ``nsw`` keyword
+is present, and any of the truncated bits are not the same as the top bit
+of the truncation result, the result is a :ref:`poison value <poisonvalues>`.
+
Example:
""""""""
@@ -12815,10 +12947,11 @@ Variable argument support is defined in LLVM with the
functions. These functions are related to the similarly named macros
defined in the ``<stdarg.h>`` header file.
-All of these functions operate on arguments that use a target-specific
+All of these functions take as arguments pointers to a target-specific
value type "``va_list``". The LLVM assembly language reference manual
does not define what this type is, so all transformations should be
-prepared to handle these functions regardless of the type used.
+prepared to handle these functions regardless of the type used. The intrinsics
+are overloaded, and can be used for pointers to different address spaces.
This example shows how the :ref:`va_arg <i_va_arg>` instruction and the
variable argument handling intrinsic functions are used.
@@ -12835,24 +12968,24 @@ variable argument handling intrinsic functions are used.
define i32 @test(i32 %X, ...) {
; Initialize variable argument processing
%ap = alloca %struct.va_list
- call void @llvm.va_start(ptr %ap)
+ call void @llvm.va_start.p0(ptr %ap)
; Read a single integer argument
%tmp = va_arg ptr %ap, i32
; Demonstrate usage of llvm.va_copy and llvm.va_end
%aq = alloca ptr
- call void @llvm.va_copy(ptr %aq, ptr %ap)
- call void @llvm.va_end(ptr %aq)
+ call void @llvm.va_copy.p0(ptr %aq, ptr %ap)
+ call void @llvm.va_end.p0(ptr %aq)
; Stop processing of arguments.
- call void @llvm.va_end(ptr %ap)
+ call void @llvm.va_end.p0(ptr %ap)
ret i32 %tmp
}
- declare void @llvm.va_start(ptr)
- declare void @llvm.va_copy(ptr, ptr)
- declare void @llvm.va_end(ptr)
+ declare void @llvm.va_start.p0(ptr)
+ declare void @llvm.va_copy.p0(ptr, ptr)
+ declare void @llvm.va_end.p0(ptr)
.. _int_va_start:
@@ -12864,7 +12997,8 @@ Syntax:
::
- declare void @llvm.va_start(ptr <arglist>)
+ declare void @llvm.va_start.p0(ptr <arglist>)
+ declare void @llvm.va_start.p5(ptr addrspace(5) <arglist>)
Overview:
"""""""""
@@ -12896,7 +13030,8 @@ Syntax:
::
- declare void @llvm.va_end(ptr <arglist>)
+ declare void @llvm.va_end.p0(ptr <arglist>)
+ declare void @llvm.va_end.p5(ptr addrspace(5) <arglist>)
Overview:
"""""""""
@@ -12929,7 +13064,8 @@ Syntax:
::
- declare void @llvm.va_copy(ptr <destarglist>, ptr <srcarglist>)
+ declare void @llvm.va_copy.p0(ptr <destarglist>, ptr <srcarglist>)
+ declare void @llvm.va_copy.p5(ptr addrspace(5) <destarglist>, ptr addrspace(5) <srcarglist>)
Overview:
"""""""""
@@ -12942,6 +13078,7 @@ Arguments:
The first argument is a pointer to a ``va_list`` element to initialize.
The second argument is a pointer to a ``va_list`` element to copy from.
+The address spaces of the two arguments must match.
Semantics:
""""""""""
@@ -13577,12 +13714,12 @@ Overview:
"""""""""
The '``llvm.seh.try.begin``' and '``llvm.seh.try.end``' intrinsics mark
-the boundary of a _try region for Windows SEH Asynchrous Exception Handling.
+the boundary of a _try region for Windows SEH Asynchronous Exception Handling.
Semantics:
""""""""""
-When a C-function is compiled with Windows SEH Asynchrous Exception option,
+When a C-function is compiled with Windows SEH Asynchronous Exception option,
-feh_asynch (aka MSVC -EHa), these two intrinsics are injected to mark _try
boundary and to prevent potential exceptions from being moved across boundary.
Any set of operations can then be confined to the region by reading their leaf
@@ -13603,7 +13740,7 @@ Overview:
"""""""""
The '``llvm.seh.scope.begin``' and '``llvm.seh.scope.end``' intrinsics mark
-the boundary of a CPP object lifetime for Windows SEH Asynchrous Exception
+the boundary of a CPP object lifetime for Windows SEH Asynchronous Exception
Handling (MSVC option -EHa).
Semantics:
@@ -17713,7 +17850,7 @@ Examples
%res = call i4 @llvm.udiv.fix.sat.i4(i4 8, i4 2, i32 2) ; %res = 15 (2 / 0.5 = 4 => 3.75)
-Specialised Arithmetic Intrinsics
+Specialized Arithmetic Intrinsics
---------------------------------
.. _i_intr_llvm_canonicalize:
@@ -18037,9 +18174,9 @@ The '``llvm.loop.decrement.reg.*``' intrinsics do an integer ``SUB`` of its
two operands, which is not allowed to wrap. They return the remaining number of
iterations still to be executed, and can be used together with a ``PHI``,
``ICMP`` and ``BR`` to control the number of loop iterations executed. Any
-optimisations are allowed to treat it is a ``SUB``, and it is supported by
+optimizations are allowed to treat it is a ``SUB``, and it is supported by
SCEV, so it's the backends responsibility to handle cases where it may be
-optimised. These intrinsics are marked as ``IntrNoDuplicate`` to avoid
+optimized. These intrinsics are marked as ``IntrNoDuplicate`` to avoid
optimizers duplicating these instructions.
@@ -18741,7 +18878,7 @@ Arguments:
The first argument is the vector to be counted. This argument must be a vector
with integer element type. The return type must also be an integer type which is
wide enough to hold the maximum number of elements of the source vector. The
-behaviour of this intrinsic is undefined if the return type is not wide enough
+behavior of this intrinsic is undefined if the return type is not wide enough
for the number of elements in the input vector.
The second argument is a constant flag that indicates whether the intrinsic
@@ -22026,7 +22163,7 @@ and ``evl2`` are unsigned integers indicating the explicit vector lengths of
``vec1`` and ``vec2`` respectively. ``imm``, ``evl1`` and ``evl2`` should
respect the following constraints: ``-evl1 <= imm < evl1``, ``0 <= evl1 <= VL``
and ``0 <= evl2 <= VL``, where ``VL`` is the runtime vector factor. If these
-constraints are not satisfied the intrinsic has undefined behaviour.
+constraints are not satisfied the intrinsic has undefined behavior.
Semantics:
""""""""""
@@ -24442,7 +24579,7 @@ operand. The pointer alignment defaults to 1.
Semantics:
""""""""""
-The '``llvm.masked.compressstore``' intrinsic is designed for compressing data in memory. It allows to collect elements from possibly non-adjacent lanes of a vector and store them contiguously in memory in one IR operation. It is useful for targets that support compressing store operations and allows vectorizing loops with cross-iteration dependences like in the following example:
+The '``llvm.masked.compressstore``' intrinsic is designed for compressing data in memory. It allows to collect elements from possibly non-adjacent lanes of a vector and store them contiguously in memory in one IR operation. It is useful for targets that support compressing store operations and allows vectorizing loops with cross-iteration dependencies like in the following example:
.. code-block:: c
@@ -26584,7 +26721,7 @@ Semantics:
The '``llvm.set.fpenv``' intrinsic sets the current floating-point environment
to the state specified by the argument. The state may be previously obtained by a
-call to '``llvm.get.fpenv``' or synthesised in a platform-dependent way.
+call to '``llvm.get.fpenv``' or synthesized in a platform-dependent way.
'``llvm.reset.fpenv``' Intrinsic
@@ -26926,6 +27063,8 @@ Arguments:
The argument should be an MDTuple containing any number of MDStrings.
+.. _llvm.trap:
+
'``llvm.trap``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -26953,6 +27092,8 @@ This intrinsic is lowered to the target dependent trap instruction. If
the target does not have a trap instruction, this intrinsic will be
lowered to a call of the ``abort()`` function.
+.. _llvm.debugtrap:
+
'``llvm.debugtrap``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -26980,6 +27121,8 @@ This intrinsic is lowered to code which is intended to cause an
execution trap with the intention of requesting the attention of a
debugger.
+.. _llvm.ubsantrap:
+
'``llvm.ubsantrap``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -27007,7 +27150,7 @@ This intrinsic is lowered to code which is intended to cause an execution trap,
embedding the argument into encoding of that trap somehow to discriminate
crashes if possible.
-Equivalent to ``@llvm.trap`` for targets that do not support this behaviour.
+Equivalent to ``@llvm.trap`` for targets that do not support this behavior.
'``llvm.stackprotector``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -27731,12 +27874,11 @@ The intrinsic ``@llvm.allow.ubsan.check()`` returns either ``true`` or
For each evaluation of a call to this intrinsic, the program must be valid and
correct both if it returns ``true`` and if it returns ``false``.
-When used in a branch condition, it allows us to choose between
-two alternative correct solutions for the same problem.
+When used in a branch condition, it selects one of the two paths:
-If the intrinsic is evaluated as ``true``, program must check ubsan condition,
-and report if needed. If the intrinsic is evaluated as ``false``, program must
-avoid checking ubsan condition and assume it passed.
+* `true``: Executes the UBSan check and reports any failures.
+
+* `false`: Bypasses the check, assuming it always succeeds.
Example:
@@ -27785,7 +27927,7 @@ string can be used to control rules to allow checks.
Semantics:
""""""""""
-The intrinsic ``@llvm.allow.ubsan.check()`` returns either ``true`` or
+The intrinsic ``@llvm.allow.runtime.check()`` returns either ``true`` or
``false``, depending on compiler options.
For each evaluation of a call to this intrinsic, the program must be valid and
@@ -27795,7 +27937,7 @@ When used in a branch condition, it allows us to choose between
two alternative correct solutions for the same problem.
If the intrinsic is evaluated as ``true``, program should execute a guarded
-checks. If the intrinsic is evaluated as ``false``, the program should avoid any
+check. If the intrinsic is evaluated as ``false``, the program should avoid any
unnecessary checks.
Example:
@@ -28063,7 +28205,7 @@ Arguments:
The first three arguments are the same as they are in the :ref:`@llvm.memcpy <int_memcpy>`
intrinsic, with the added constraint that ``len`` is required to be a positive integer
multiple of the ``element_size``. If ``len`` is not a positive integer multiple of
-``element_size``, then the behaviour of the intrinsic is undefined.
+``element_size``, then the behavior of the intrinsic is undefined.
``element_size`` must be a compile-time constant positive power of two no greater than
target-specific atomic access size limit.
@@ -28139,7 +28281,7 @@ The first three arguments are the same as they are in the
:ref:`@llvm.memmove <int_memmove>` intrinsic, with the added constraint that
``len`` is required to be a positive integer multiple of the ``element_size``.
If ``len`` is not a positive integer multiple of ``element_size``, then the
-behaviour of the intrinsic is undefined.
+behavior of the intrinsic is undefined.
``element_size`` must be a compile-time constant positive power of two no
greater than a target-specific atomic access size limit.
@@ -28218,7 +28360,7 @@ Arguments:
The first three arguments are the same as they are in the :ref:`@llvm.memset <int_memset>`
intrinsic, with the added constraint that ``len`` is required to be a positive integer
multiple of the ``element_size``. If ``len`` is not a positive integer multiple of
-``element_size``, then the behaviour of the intrinsic is undefined.
+``element_size``, then the behavior of the intrinsic is undefined.
``element_size`` must be a compile-time constant positive power of two no greater than
target-specific atomic access size limit.
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 03691efe836f..7588048334d7 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -76,6 +76,7 @@ Changes to the AMDGPU Backend
Changes to the ARM Backend
--------------------------
+* FEAT_F32MM is no longer activated by default when using `+sve` on v8.6-A or greater. The feature is still available and can be used by adding `+f32mm` to the command line options.
Changes to the AVR Backend
--------------------------
@@ -146,6 +147,8 @@ Changes to the C API
* ``LLVMGetPrologueData``
* ``LLVMSetPrologueData``
+* Deprecated ``LLVMConstNUWNeg`` and ``LLVMBuildNUWNeg``.
+
Changes to the CodeGen infrastructure
-------------------------------------
@@ -175,6 +178,15 @@ Changes to the LLVM tools
``--set-symbols-visibility`` options for ELF input to change the
visibility of symbols.
+* llvm-objcopy now supports ``--skip-symbol`` and ``--skip-symbols`` options
+ for ELF input to skip the specified symbols when executing other options
+ that can change a symbol's name, binding or visibility.
+
+* llvm-profgen now supports COFF+DWARF binaries. This enables Sample-based PGO
+ on Windows using Intel VTune's SEP. For details on usage, see the `end-user
+ documentation for SPGO
+ <https://clang.llvm.org/docs/UsersManual.html#using-sampling-profilers>`_.
+
Changes to LLDB
---------------------------------
diff --git a/llvm/docs/RemoveDIsDebugInfo.md b/llvm/docs/RemoveDIsDebugInfo.md
index a2f1e173d9d9..9e50a2a604aa 100644
--- a/llvm/docs/RemoveDIsDebugInfo.md
+++ b/llvm/docs/RemoveDIsDebugInfo.md
@@ -40,15 +40,22 @@ New functions (all to be deprecated)
LLVMIsNewDbgInfoFormat # Returns true if the module is in the new non-instruction mode.
LLVMSetIsNewDbgInfoFormat # Convert to the requested debug info format.
-LLVMDIBuilderInsertDeclareIntrinsicBefore # Insert a debug intrinsic (old debug info format).
+LLVMDIBuilderInsertDeclareIntrinsicBefore # Insert a debug intrinsic (old debug info format).
LLVMDIBuilderInsertDeclareIntrinsicAtEnd # Same as above.
LLVMDIBuilderInsertDbgValueIntrinsicBefore # Same as above.
LLVMDIBuilderInsertDbgValueIntrinsicAtEnd # Same as above.
-LLVMDIBuilderInsertDeclareRecordBefore # Insert a debug record (new debug info format).
+LLVMDIBuilderInsertDeclareRecordBefore # Insert a debug record (new debug info format).
LLVMDIBuilderInsertDeclareRecordAtEnd # Same as above.
LLVMDIBuilderInsertDbgValueRecordBefore # Same as above.
LLVMDIBuilderInsertDbgValueRecordAtEnd # Same as above.
+
+Existing functions (behaviour change)
+-------------------------------------
+LLVMDIBuilderInsertDeclareBefore # Insert a debug record (new debug info format) instead of a debug intrinsic (old debug info format).
+LLVMDIBuilderInsertDeclareAtEnd # Same as above.
+LLVMDIBuilderInsertDbgValueBefore # Same as above.
+LLVMDIBuilderInsertDbgValueAtEnd # Same as above.
```
# Anything else?
diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst
index 5ca035b68392..a183877c48cb 100644
--- a/llvm/docs/SPIRVUsage.rst
+++ b/llvm/docs/SPIRVUsage.rst
@@ -14,10 +14,43 @@ Introduction
The SPIR-V target provides code generation for the SPIR-V binary format described
in `the official SPIR-V specification <https://www.khronos.org/registry/SPIR-V/>`_.
+Usage
+=====
+
+The SPIR-V backend can be invoked either from LLVM's Static Compiler (llc) or Clang,
+allowing developers to compile LLVM intermediate language (IL) files or OpenCL kernel
+sources directly to SPIR-V. This section outlines the usage of various commands to
+leverage the SPIR-V backend for different purposes.
+
+Static Compiler Commands
+------------------------
+
+1. **Basic SPIR-V Compilation**
+ Command: `llc -mtriple=spirv32-unknown-unknown input.ll -o output.spvt`
+ Description: This command compiles an LLVM IL file (`input.ll`) to a SPIR-V binary (`output.spvt`) for a 32-bit architecture.
+
+2. **Compilation with Extensions and Optimization**
+ Command: `llc -O1 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers input.ll -o output.spvt`
+ Description: Compiles an LLVM IL file to SPIR-V with (`-O1`) optimizations, targeting a 64-bit architecture. It enables the SPV_INTEL_arbitrary_precision_integers extension.
+
+3. **SPIR-V Binary Generation**
+ Command: `llc -O0 -mtriple=spirv64-unknown-unknown -filetype=obj input.ll -o output.spvt`
+ Description: Generates a SPIR-V object file (`output.spvt`) from an LLVM module, targeting a 64-bit SPIR-V architecture with no optimizations.
+
+Clang Commands
+--------------
+
+1. **SPIR-V Generation**
+ Command: `clang –target=spirv64 input.cl`
+ Description: Generates a SPIR-V file directly from an OpenCL kernel source file (`input.cl`).
+
+Compiler Options
+================
+
.. _spirv-target-triples:
Target Triples
-==============
+--------------
For cross-compilation into SPIR-V use option
@@ -32,6 +65,7 @@ to specify the target triple:
============ ==============================================================
``spirv32`` SPIR-V with 32-bit pointer width.
``spirv64`` SPIR-V with 64-bit pointer width.
+ ``spirv`` SPIR-V with logical memory layout.
============ ==============================================================
.. table:: SPIR-V Subarchitectures
@@ -39,13 +73,14 @@ to specify the target triple:
=============== ==============================================================
Subarchitecture Description
=============== ==============================================================
- *<empty>* SPIR-V version deduced by tools based on the compiled input.
+ *<empty>* SPIR-V version deduced by backend based on the input.
``v1.0`` SPIR-V version 1.0.
``v1.1`` SPIR-V version 1.1.
``v1.2`` SPIR-V version 1.2.
``v1.3`` SPIR-V version 1.3.
``v1.4`` SPIR-V version 1.4.
``v1.5`` SPIR-V version 1.5.
+ ``v1.6`` SPIR-V version 1.6.
=============== ==============================================================
.. table:: SPIR-V Vendors
@@ -62,6 +97,9 @@ to specify the target triple:
OS Description
===================== ============================================================
*<empty>*/``unknown`` Defaults to the OpenCL runtime.
+ ``vulkan`` Vulkan shader runtime.
+ ``vulkan1.2`` Vulkan 1.2 runtime, corresponding to SPIR-V 1.5.
+ ``vulkan1.3`` Vulkan 1.3 runtime, corresponding to SPIR-V 1.6.
===================== ============================================================
.. table:: SPIR-V Environments
@@ -69,20 +107,95 @@ to specify the target triple:
===================== ==============================================================
Environment Description
===================== ==============================================================
- *<empty>*/``unknown`` Defaults to the OpenCL environment.
+ *<empty>*/``unknown`` OpenCL environment or deduced by backend based on the input.
===================== ==============================================================
Example:
``-target spirv64v1.0`` can be used to compile for SPIR-V version 1.0 with 64-bit pointer width.
-.. _spirv-types:
+.. _spirv-extensions:
+
+Extensions
+----------
+
+The SPIR-V backend supports a variety of `extensions <https://github.com/KhronosGroup/SPIRV-Registry/tree/main/extensions>`_
+that enable or enhance features beyond the core SPIR-V specification.
+These extensions can be enabled using the ``-spirv-extensions`` option
+followed by the name of the extension(s) you wish to enable. Below is a
+list of supported SPIR-V extensions, sorted alphabetically by their extension names:
+
+.. list-table:: Supported SPIR-V Extensions
+ :widths: 50 150
+ :header-rows: 1
+
+ * - Extension Name
+ - Description
+ * - ``SPV_EXT_shader_atomic_float16_add``
+ - Extends the SPV_EXT_shader_atomic_float_add extension to support atomically adding to 16-bit floating-point numbers in memory.
+ * - ``SPV_EXT_shader_atomic_float_add``
+ - Adds atomic add instruction on floating-point numbers.
+ * - ``SPV_EXT_shader_atomic_float_min_max``
+ - Adds atomic min and max instruction on floating-point numbers.
+ * - ``SPV_INTEL_arbitrary_precision_integers``
+ - Allows generating arbitrary width integer types.
+ * - ``SPV_INTEL_bfloat16_conversion``
+ - Adds instructions to convert between single-precision 32-bit floating-point values and 16-bit bfloat16 values.
+ * - ``SPV_INTEL_function_pointers``
+ - Allows translation of function pointers.
+ * - ``SPV_INTEL_optnone``
+ - Adds OptNoneINTEL value for Function Control mask that indicates a request to not optimize the function.
+ * - ``SPV_INTEL_subgroups``
+ - Allows work items in a subgroup to share data without the use of local memory and work group barriers, and to utilize specialized hardware to load and store blocks of data from images or buffers.
+ * - ``SPV_INTEL_usm_storage_classes``
+ - Introduces two new storage classes that are subclasses of the CrossWorkgroup storage class that provides additional information that can enable optimization.
+ * - ``SPV_INTEL_variable_length_array``
+ - Allows to allocate local arrays whose number of elements is unknown at compile time.
+ * - ``SPV_KHR_bit_instructions``
+ - Enables bit instructions to be used by SPIR-V modules without requiring the Shader capability.
+ * - ``SPV_KHR_expect_assume``
+ - Provides additional information to a compiler, similar to the llvm.assume and llvm.expect intrinsics.
+ * - ``SPV_KHR_float_controls``
+ - Provides new execution modes to control floating-point computations by overriding an implementation’s default behavior for rounding modes, denormals, signed zero, and infinities.
+ * - ``SPV_KHR_linkonce_odr``
+ - Allows to use the LinkOnceODR linkage type that lets a function or global variable to be merged with other functions or global variables of the same name when linkage occurs.
+ * - ``SPV_KHR_no_integer_wrap_decoration``
+ - Adds decorations to indicate that a given instruction does not cause integer wrapping.
+ * - ``SPV_KHR_subgroup_rotate``
+ - Adds a new instruction that enables rotating values across invocations within a subgroup.
+ * - ``SPV_KHR_uniform_group_instructions``
+ - Allows support for additional group operations within uniform control flow.
-Representing special types in SPIR-V
-====================================
+To enable multiple extensions, list them separated by spaces. For example, to enable support for atomic operations on floating-point numbers and arbitrary precision integers, use:
+
+``-spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_INTEL_arbitrary_precision_integers``
+
+To enable all extensions, use the following option:
+``-spirv-ext=all``
+
+To enable all extensions except specified, specify ``all`` followed by a list of disallowed extensions. For example:
+``-spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers``
+
+SPIR-V representation in LLVM IR
+================================
+
+SPIR-V is intentionally designed for seamless integration with various Intermediate
+Representations (IRs), including LLVM IR, facilitating straightforward mappings for
+most of its entities. The development of the SPIR-V backend has been guided by a
+principle of compatibility with the `Khronos Group SPIR-V LLVM Translator <https://github.com/KhronosGroup/SPIRV-LLVM-Translator>`_.
+Consequently, the input representation accepted by the SPIR-V backend aligns closely
+with that detailed in `the SPIR-V Representation in LLVM document <https://github.com/KhronosGroup/SPIRV-LLVM-Translator/blob/main/docs/SPIRVRepresentationInLLVM.rst>`_.
+This document, along with the sections that follow, delineate the main points and focus
+on any differences between the LLVM IR that this backend processes and the conventions
+used by other tools.
+
+.. _spirv-special-types:
+
+Special types
+-------------
SPIR-V specifies several kinds of opaque types. These types are represented
-using target extension types. These types are represented as follows:
+using target extension types and are represented as follows:
.. table:: SPIR-V Opaque Types
@@ -108,3 +221,210 @@ dimensionality parameter as ``1`` meaning 2D. Sampled image types include the
parameters of its underlying image type, so that a sampled image for the
previous type has the representation
``target("spirv.SampledImage, void, 1, 1, 0, 0, 0, 0, 0)``.
+
+.. _spirv-intrinsics:
+
+Target Intrinsics
+-----------------
+
+The SPIR-V backend employs several LLVM IR intrinsics that facilitate various low-level
+operations essential for generating correct and efficient SPIR-V code. These intrinsics
+cover a range of functionalities from type assignment and memory management to control
+flow and atomic operations. Below is a detailed table of selected intrinsics used in the
+SPIR-V backend, along with their descriptions and argument details.
+
+.. list-table:: LLVM IR Intrinsics for SPIR-V
+ :widths: 25 15 20 40
+ :header-rows: 1
+
+ * - Intrinsic ID
+ - Return Type
+ - Argument Types
+ - Description
+ * - `int_spv_assign_type`
+ - None
+ - `[Type, Metadata]`
+ - Associates a type with metadata, crucial for maintaining type information in SPIR-V structures. Not emitted directly but supports the type system internally.
+ * - `int_spv_assign_ptr_type`
+ - None
+ - `[Type, Metadata, Integer]`
+ - Similar to `int_spv_assign_type`, but for pointer types with an additional integer specifying the storage class. Supports SPIR-V's detailed pointer type system. Not emitted directly.
+ * - `int_spv_assign_name`
+ - None
+ - `[Type, Vararg]`
+ - Assigns names to types or values, enhancing readability and debuggability of SPIR-V code. Not emitted directly but used for metadata enrichment.
+ * - `int_spv_track_constant`
+ - Type
+ - `[Type, Metadata]`
+ - Tracks constants in the SPIR-V module. Essential for optimizing and reducing redundancy. Emitted for internal use only.
+ * - `int_spv_init_global`
+ - None
+ - `[Type, Type]`
+ - Initializes global variables, a necessary step for ensuring correct global state management in SPIR-V. Emitted for internal use only.
+ * - `int_spv_unref_global`
+ - None
+ - `[Type]`
+ - Manages the lifetime of global variables by marking them as unreferenced, thus enabling optimizations related to global variable usage. Emitted for internal use only.
+ * - `int_spv_gep`
+ - Pointer
+ - `[Boolean, Type, Vararg]`
+ - Computes the address of a sub-element of an aggregate type. Critical for accessing array elements and structure fields. Supports conditionally addressing elements in a generic way.
+ * - `int_spv_load`
+ - 32-bit Integer
+ - `[Pointer, 16-bit Integer, 8-bit Integer]`
+ - Loads a value from a memory location. The additional integers specify memory access and alignment details, vital for ensuring correct and efficient memory operations.
+ * - `int_spv_store`
+ - None
+ - `[Type, Pointer, 16-bit Integer, 8-bit Integer]`
+ - Stores a value to a memory location. Like `int_spv_load`, it includes specifications for memory access and alignment, essential for memory operations.
+ * - `int_spv_extractv`
+ - Type
+ - `[32-bit Integer, Vararg]`
+ - Extracts a value from a vector, allowing for vector operations within SPIR-V. Enables manipulation of vector components.
+ * - `int_spv_insertv`
+ - 32-bit Integer
+ - `[32-bit Integer, Type, Vararg]`
+ - Inserts a value into a vector. Complementary to `int_spv_extractv`, it facilitates the construction and manipulation of vectors.
+ * - `int_spv_extractelt`
+ - Type
+ - `[Type, Any Integer]`
+ - Extracts an element from an aggregate type based on an index. Essential for operations on arrays and vectors.
+ * - `int_spv_insertelt`
+ - Type
+ - `[Type, Type, Any Integer]`
+ - Inserts an element into an aggregate type at a specified index. Allows for building and modifying arrays and vectors.
+ * - `int_spv_const_composite`
+ - 32-bit Integer
+ - `[Vararg]`
+ - Constructs a composite type from given elements. Key for creating arrays, structs, and vectors from individual components.
+ * - `int_spv_bitcast`
+ - Type
+ - `[Type]`
+ - Performs a bit-wise cast between types. Critical for type conversions that do not change the bit representation.
+ * - `int_spv_ptrcast`
+ - Type
+ - `[Type, Metadata, Integer]`
+ - Casts pointers between different types. Similar to `int_spv_bitcast` but specifically for pointers, taking into account SPIR-V's strict type system.
+ * - `int_spv_switch`
+ - None
+ - `[Type, Vararg]`
+ - Implements a multi-way branch based on a value. Enables complex control flow structures, similar to the switch statement in high-level languages.
+ * - `int_spv_cmpxchg`
+ - 32-bit Integer
+ - `[Type, Vararg]`
+ - Performs an atomic compare-and-exchange operation. Crucial for synchronization and concurrency control in compute shaders.
+ * - `int_spv_unreachable`
+ - None
+ - `[]`
+ - Marks a point in the code that should never be reached, enabling optimizations by indicating unreachable code paths.
+ * - `int_spv_alloca`
+ - Type
+ - `[]`
+ - Allocates memory on the stack. Fundamental for local variable storage in functions.
+ * - `int_spv_alloca_array`
+ - Type
+ - `[Any Integer]`
+ - Allocates an array on the stack. Extends `int_spv_alloca` to support array allocations, essential for temporary arrays.
+ * - `int_spv_undef`
+ - 32-bit Integer
+ - `[]`
+ - Generates an undefined value. Useful for optimizations and indicating uninitialized variables.
+ * - `int_spv_assume`
+ - None
+ - `[1-bit Integer]`
+ - Provides hints to the optimizer about assumptions that can be made about program state. Improves optimization potential.
+ * - `int_spv_expect`
+ - Any Integer Type
+ - `[Type, Type]`
+ - Guides branch prediction by indicating expected branch paths. Enhances performance by optimizing common code paths.
+ * - `int_spv_thread_id`
+ - 32-bit Integer
+ - `[32-bit Integer]`
+ - Retrieves the thread ID within a workgroup. Essential for identifying execution context in parallel compute operations.
+ * - `int_spv_create_handle`
+ - Pointer
+ - `[8-bit Integer]`
+ - Creates a resource handle for graphics or compute resources. Facilitates the management and use of resources in shaders.
+
+.. _spirv-builtin-functions:
+
+Builtin Functions
+-----------------
+
+The following section highlights the representation of SPIR-V builtins in LLVM IR,
+emphasizing builtins that do not have direct counterparts in LLVM.
+
+Instructions as Function Calls
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SPIR-V builtins without direct LLVM counterparts are represented as LLVM function calls.
+These functions, termed SPIR-V builtin functions, follow an IA64 mangling scheme with
+SPIR-V-specific extensions. Parsing non-mangled calls to builtins is supported in some cases,
+but not tested extensively. The general format is:
+
+.. code-block:: c
+
+ __spirv_{OpCodeName}{_OptionalPostfixes}
+
+Where `{OpCodeName}` is the SPIR-V opcode name sans the "Op" prefix, and
+`{OptionalPostfixes}` are decoration-specific postfixes, if any. The mangling and
+postfixes allow for the representation of SPIR-V's rich instruction set within LLVM's
+framework.
+
+Extended Instruction Sets
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SPIR-V defines several extended instruction sets for additional functionalities, such as
+OpenCL-specific operations. In LLVM IR, these are represented by function calls to
+mangled builtins and selected based on the environment. For example:
+
+.. code-block:: c
+
+ acos_f32
+
+represents the `acos` function from the OpenCL extended instruction set for a float32
+input.
+
+Builtin Variables
+~~~~~~~~~~~~~~~~~
+
+SPIR-V builtin variables, which provide access to special hardware or execution model
+properties, are mapped to either LLVM function calls or LLVM global variables. The
+representation follows the naming convention:
+
+.. code-block:: c
+
+ __spirv_BuiltIn{VariableName}
+
+For instance, the SPIR-V builtin `GlobalInvocationId` is accessible in LLVM IR as
+`__spirv_BuiltInGlobalInvocationId`.
+
+Vector Load and Store Builtins
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SPIR-V's capabilities for loading and storing vectors are represented in LLVM IR using
+functions that mimic the SPIR-V instructions. These builtins handle cases that LLVM's
+native instructions do not directly support, enabling fine-grained control over memory
+operations.
+
+Atomic Operations
+~~~~~~~~~~~~~~~~~
+
+SPIR-V's atomic operations, especially those operating on floating-point data, are
+represented in LLVM IR with corresponding function calls. These builtins ensure
+atomicity in operations where LLVM might not have direct support, essential for parallel
+execution and synchronization.
+
+Image Operations
+~~~~~~~~~~~~~~~~
+
+SPIR-V provides extensive support for image and sampler operations, which LLVM
+represents through function calls to builtins. These include image reads, writes, and
+queries, allowing detailed manipulation of image data and parameters.
+
+Group and Subgroup Operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For workgroup and subgroup operations, LLVM uses function calls to represent SPIR-V's
+group-based instructions. These builtins facilitate group synchronization, data sharing,
+and collective operations essential for efficient parallel computation.
diff --git a/llvm/docs/UserGuides.rst b/llvm/docs/UserGuides.rst
index 155cb3361669..f40a04d414a2 100644
--- a/llvm/docs/UserGuides.rst
+++ b/llvm/docs/UserGuides.rst
@@ -43,6 +43,7 @@ intermediate LLVM representation.
HowToCrossCompileBuiltinsOnArm
HowToCrossCompileLLVM
HowToUpdateDebugInfo
+ InstCombineContributorGuide
InstrProfileFormat
InstrRefDebugInfo
LinkTimeOptimization
@@ -186,6 +187,10 @@ Optimizations
:doc:`InstrProfileFormat`
This document explains two binary formats of instrumentation-based profiles.
+:doc:`InstCombineContributorGuide`
+ This document specifies guidelines for contributions for InstCombine and
+ related passes.
+
Code Generation
---------------
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index f56a6c961aad..6be5957ce610 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -1867,6 +1867,14 @@ void LLVMDumpValue(LLVMValueRef Val);
char *LLVMPrintValueToString(LLVMValueRef Val);
/**
+ * Return a string representation of the DbgRecord. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see llvm::DbgRecord::print()
+ */
+char *LLVMPrintDbgRecordToString(LLVMDbgRecordRef Record);
+
+/**
* Replace all uses of a value with another one.
*
* @see llvm::Value::replaceAllUsesWith()
@@ -2316,7 +2324,9 @@ LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty);
LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty);
LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal);
LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal);
-LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal);
+LLVM_ATTRIBUTE_C_DEPRECATED(
+ LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal),
+ "Use LLVMConstNull instead.");
LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal);
LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
@@ -4152,8 +4162,10 @@ LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op,
LLVMValueRef LLVMBuildNeg(LLVMBuilderRef, LLVMValueRef V, const char *Name);
LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V,
const char *Name);
-LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V,
- const char *Name);
+LLVM_ATTRIBUTE_C_DEPRECATED(LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B,
+ LLVMValueRef V,
+ const char *Name),
+ "Use LLVMBuildNeg + LLVMSetNUW instead.");
LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef, LLVMValueRef V, const char *Name);
LLVMValueRef LLVMBuildNot(LLVMBuilderRef, LLVMValueRef V, const char *Name);
diff --git a/llvm/include/llvm-c/DebugInfo.h b/llvm/include/llvm-c/DebugInfo.h
index b23ff63c862f..dab1d697761b 100644
--- a/llvm/include/llvm-c/DebugInfo.h
+++ b/llvm/include/llvm-c/DebugInfo.h
@@ -1249,7 +1249,12 @@ LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
LLVMMetadataRef Decl, uint32_t AlignInBits);
/*
- * Insert a new llvm.dbg.declare intrinsic call before the given instruction.
+ * Insert a new Declare DbgRecord before the given instruction.
+ *
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
+ * Use LLVMSetIsNewDbgInfoFormat(LLVMBool) to convert between formats.
+ * See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
+ *
* \param Builder The DIBuilder.
* \param Storage The storage of the variable to declare.
* \param VarInfo The variable's debug info descriptor.
@@ -1257,13 +1262,13 @@ LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
* \param DebugLoc Debug info location.
* \param Instr Instruction acting as a location for the new intrinsic.
*/
-LLVMValueRef
+LLVMDbgRecordRef
LLVMDIBuilderInsertDeclareBefore(LLVMDIBuilderRef Builder, LLVMValueRef Storage,
LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
* Soon to be deprecated.
- * Only use in "old debug mode" (LLVMIsNewDbgFormat() is false).
+ * Only use in "old debug mode" (LLVMIsNewDbgInfoFormat() is false).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.declare intrinsic call before the given instruction.
@@ -1279,7 +1284,7 @@ LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicBefore(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
* Soon to be deprecated.
- * Only use in "new debug mode" (LLVMIsNewDbgFormat() is true).
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a Declare DbgRecord before the given instruction.
@@ -1295,9 +1300,14 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordBefore(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
- * Insert a new llvm.dbg.declare intrinsic call at the end of the given basic
- * block. If the basic block has a terminator instruction, the intrinsic is
- * inserted before that terminator instruction.
+ * Insert a new Declare DbgRecord at the end of the given basic block. If the
+ * basic block has a terminator instruction, the intrinsic is inserted before
+ * that terminator instruction.
+ *
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
+ * Use LLVMSetIsNewDbgInfoFormat(LLVMBool) to convert between formats.
+ * See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
+ *
* \param Builder The DIBuilder.
* \param Storage The storage of the variable to declare.
* \param VarInfo The variable's debug info descriptor.
@@ -1305,12 +1315,12 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordBefore(
* \param DebugLoc Debug info location.
* \param Block Basic block acting as a location for the new intrinsic.
*/
-LLVMValueRef LLVMDIBuilderInsertDeclareAtEnd(
+LLVMDbgRecordRef LLVMDIBuilderInsertDeclareAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
* Soon to be deprecated.
- * Only use in "old debug mode" (LLVMIsNewDbgFormat() is false).
+ * Only use in "old debug mode" (LLVMIsNewDbgInfoFormat() is false).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.declare intrinsic call at the end of the given basic
@@ -1328,7 +1338,7 @@ LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
* Soon to be deprecated.
- * Only use in "new debug mode" (LLVMIsNewDbgFormat() is true).
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a Declare DbgRecord at the end of the given basic block. If the basic
@@ -1346,7 +1356,12 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordAtEnd(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
- * Insert a new llvm.dbg.value intrinsic call before the given instruction.
+ * Insert a new Value DbgRecord before the given instruction.
+ *
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
+ * Use LLVMSetIsNewDbgInfoFormat(LLVMBool) to convert between formats.
+ * See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
+ *
* \param Builder The DIBuilder.
* \param Val The value of the variable.
* \param VarInfo The variable's debug info descriptor.
@@ -1354,13 +1369,13 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordAtEnd(
* \param DebugLoc Debug info location.
* \param Instr Instruction acting as a location for the new intrinsic.
*/
-LLVMValueRef
+LLVMDbgRecordRef
LLVMDIBuilderInsertDbgValueBefore(LLVMDIBuilderRef Builder, LLVMValueRef Val,
LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
* Soon to be deprecated.
- * Only use in "old debug mode" (Module::IsNewDbgInfoFormat is false).
+ * Only use in "old debug mode" (LLVMIsNewDbgInfoFormat() is false).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.value intrinsic call before the given instruction.
@@ -1376,7 +1391,7 @@ LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicBefore(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
* Soon to be deprecated.
- * Only use in "new debug mode" (Module::IsNewDbgInfoFormat is true).
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.value intrinsic call before the given instruction.
@@ -1392,9 +1407,14 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueRecordBefore(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
- * Insert a new llvm.dbg.value intrinsic call at the end of the given basic
- * block. If the basic block has a terminator instruction, the intrinsic is
- * inserted before that terminator instruction.
+ * Insert a new Value DbgRecord at the end of the given basic block. If the
+ * basic block has a terminator instruction, the intrinsic is inserted before
+ * that terminator instruction.
+ *
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
+ * Use LLVMSetIsNewDbgInfoFormat(LLVMBool) to convert between formats.
+ * See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
+ *
* \param Builder The DIBuilder.
* \param Val The value of the variable.
* \param VarInfo The variable's debug info descriptor.
@@ -1402,12 +1422,12 @@ LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueRecordBefore(
* \param DebugLoc Debug info location.
* \param Block Basic block acting as a location for the new intrinsic.
*/
-LLVMValueRef LLVMDIBuilderInsertDbgValueAtEnd(
+LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
* Soon to be deprecated.
- * Only use in "old debug mode" (Module::IsNewDbgInfoFormat is false).
+ * Only use in "old debug mode" (LLVMIsNewDbgInfoFormat() is false).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.value intrinsic call at the end of the given basic
@@ -1425,7 +1445,7 @@ LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicAtEnd(
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
* Soon to be deprecated.
- * Only use in "new debug mode" (Module::IsNewDbgInfoFormat is true).
+ * Only use in "new debug mode" (LLVMIsNewDbgInfoFormat() is true).
* See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes
*
* Insert a new llvm.dbg.value intrinsic call at the end of the given basic
diff --git a/llvm/include/llvm/ADT/SCCIterator.h b/llvm/include/llvm/ADT/SCCIterator.h
index e743ae7c11ed..3bd103c13f19 100644
--- a/llvm/include/llvm/ADT/SCCIterator.h
+++ b/llvm/include/llvm/ADT/SCCIterator.h
@@ -281,14 +281,14 @@ class scc_member_iterator {
if (G1 == G2)
return false;
- // Make the smaller rank tree a direct child or the root of high rank tree.
- if (G1->Rank < G1->Rank)
+ // Make the smaller rank tree a direct child of high rank tree.
+ if (G1->Rank < G2->Rank)
G1->Group = G2;
else {
G2->Group = G1;
// If the ranks are the same, increment root of one tree by one.
if (G1->Rank == G2->Rank)
- G2->Rank++;
+ G1->Rank++;
}
return true;
}
diff --git a/llvm/include/llvm/Analysis/InlineCost.h b/llvm/include/llvm/Analysis/InlineCost.h
index 3a760e0a85ce..c5978ce54fc1 100644
--- a/llvm/include/llvm/Analysis/InlineCost.h
+++ b/llvm/include/llvm/Analysis/InlineCost.h
@@ -65,7 +65,8 @@ const char MaxInlineStackSizeAttributeName[] = "inline-max-stacksize";
// The cost-benefit pair computed by cost-benefit analysis.
class CostBenefitPair {
public:
- CostBenefitPair(APInt Cost, APInt Benefit) : Cost(Cost), Benefit(Benefit) {}
+ CostBenefitPair(APInt Cost, APInt Benefit)
+ : Cost(std::move(Cost)), Benefit(std::move(Benefit)) {}
const APInt &getCost() const { return Cost; }
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 37ce1518f00c..bb282a1b73d3 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -196,7 +196,8 @@ public:
T Offset;
SizeOffsetType() = default;
- SizeOffsetType(T Size, T Offset) : Size(Size), Offset(Offset) {}
+ SizeOffsetType(T Size, T Offset)
+ : Size(std::move(Size)), Offset(std::move(Offset)) {}
bool knownSize() const { return C::known(Size); }
bool knownOffset() const { return C::known(Offset); }
@@ -215,9 +216,10 @@ public:
/// \p APInts.
struct SizeOffsetAPInt : public SizeOffsetType<APInt, SizeOffsetAPInt> {
SizeOffsetAPInt() = default;
- SizeOffsetAPInt(APInt Size, APInt Offset) : SizeOffsetType(Size, Offset) {}
+ SizeOffsetAPInt(APInt Size, APInt Offset)
+ : SizeOffsetType(std::move(Size), std::move(Offset)) {}
- static bool known(APInt V) { return V.getBitWidth() > 1; }
+ static bool known(const APInt &V) { return V.getBitWidth() > 1; }
};
/// Evaluate the size and offset of an object pointed to by a Value*
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index 830eed5d60ee..7d896c44f467 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -297,13 +297,6 @@ public:
return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags);
}
- // Return the exact size if the exact size is known at compiletime,
- // otherwise return LocationSize::beforeOrAfterPointer().
- static LocationSize getSizeOrUnknown(const TypeSize &T) {
- return T.isScalable() ? LocationSize::beforeOrAfterPointer()
- : LocationSize::precise(T.getFixedValue());
- }
-
MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {}
explicit MemoryLocation(const Value *Ptr, LocationSize Size,
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index 5863a8d6e8ee..65ccb1b81b3a 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -181,6 +181,7 @@ enum Kind {
kw_tailcc,
kw_m68k_rtdcc,
kw_graalcc,
+ kw_riscv_vector_cc,
// Attributes:
kw_attributes,
diff --git a/llvm/include/llvm/BinaryFormat/COFF.h b/llvm/include/llvm/BinaryFormat/COFF.h
index 72461d0d9c31..4c31cd847bdf 100644
--- a/llvm/include/llvm/BinaryFormat/COFF.h
+++ b/llvm/include/llvm/BinaryFormat/COFF.h
@@ -806,6 +806,12 @@ enum Feat00Flags : uint32_t {
Kernel = 0x40000000,
};
+enum class Arm64ECThunkType : uint8_t {
+ GuestExit = 0,
+ Entry = 1,
+ Exit = 4,
+};
+
inline bool isReservedSectionNumber(int32_t SectionNumber) {
return SectionNumber <= 0;
}
diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h
index 532f9481766a..e8d03f806715 100644
--- a/llvm/include/llvm/BinaryFormat/DXContainer.h
+++ b/llvm/include/llvm/BinaryFormat/DXContainer.h
@@ -424,6 +424,22 @@ struct ResourceBindInfo : public v0::ResourceBindInfo {
};
} // namespace v2
+
+namespace v3 {
+struct RuntimeInfo : public v2::RuntimeInfo {
+ uint32_t EntryNameOffset;
+
+ void swapBytes() {
+ v2::RuntimeInfo::swapBytes();
+ sys::swapByteOrder(EntryNameOffset);
+ }
+
+ void swapBytes(Triple::EnvironmentType Stage) {
+ v2::RuntimeInfo::swapBytes(Stage);
+ }
+};
+
+} // namespace v3
} // namespace PSV
#define COMPONENT_PRECISION(Val, Enum) Enum = Val,
diff --git a/llvm/include/llvm/BinaryFormat/Dwarf.def b/llvm/include/llvm/BinaryFormat/Dwarf.def
index e70b58d5ea50..8cf90de637a3 100644
--- a/llvm/include/llvm/BinaryFormat/Dwarf.def
+++ b/llvm/include/llvm/BinaryFormat/Dwarf.def
@@ -1040,6 +1040,8 @@ HANDLE_DW_CC(0xca, LLVM_PreserveAll)
HANDLE_DW_CC(0xcb, LLVM_X86RegCall)
HANDLE_DW_CC(0xcc, LLVM_M68kRTD)
HANDLE_DW_CC(0xcd, LLVM_PreserveNone)
+HANDLE_DW_CC(0xce, LLVM_RISCVVectorCall)
+HANDLE_DW_CC(0xcf, LLVM_SwiftTail)
// From GCC source code (include/dwarf2.h): This DW_CC_ value is not currently
// generated by any toolchain. It is used internally to GDB to indicate OpenCL
// C functions that have been compiled with the IBM XL C for OpenCL compiler and
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 4018ef03f960..909eb833c601 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -492,6 +492,13 @@ enum OverflowingBinaryOperatorOptionalFlags {
OBO_NO_SIGNED_WRAP = 1
};
+/// TruncInstOptionalFlags - Flags for serializing
+/// TruncInstOptionalFlags's SubclassOptionalData contents.
+enum TruncInstOptionalFlags {
+ TIO_NO_UNSIGNED_WRAP = 0,
+ TIO_NO_SIGNED_WRAP = 1
+};
+
/// FastMath Flags
/// This is a fixed layout derived from the bitcode emitted by LLVM 5.0
/// intended to decouple the in-memory representation from the serialization.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9e8fc5d635c5..28d9cf6260d6 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -673,6 +673,14 @@ public:
bool matchSDivByConst(MachineInstr &MI);
void applySDivByConst(MachineInstr &MI);
+ /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
+ /// return expressions that implements it by shifting.
+ bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
+ void applySDivByPow2(MachineInstr &MI);
+ /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
+ /// return expressions that implements it by shifting.
+ void applyUDivByPow2(MachineInstr &MI);
+
// G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
bool matchUMulHToLShr(MachineInstr &MI);
void applyUMulHToLShr(MachineInstr &MI);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 6ae7c1440907..5f28908e998a 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -243,6 +243,10 @@ private:
bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
unsigned Opcode);
+ /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
+ bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
+ unsigned Opcode);
+
// Translate @llvm.experimental.vector.interleave2 and
// @llvm.experimental.vector.deinterleave2 intrinsics for fixed-width vector
// types into vector shuffles.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 5bb3692f0a46..284f434fbb9b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -429,6 +429,7 @@ public:
LegalizeResult lowerDIVREM(MachineInstr &MI);
LegalizeResult lowerAbsToAddXor(MachineInstr &MI);
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI);
+ LegalizeResult lowerAbsToCNeg(MachineInstr &MI);
LegalizeResult lowerVectorReduction(MachineInstr &MI);
LegalizeResult lowerMemcpyInline(MachineInstr &MI);
LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index aaa81342845b..4c9d85fd9f51 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1165,6 +1165,17 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, const ConstantInt &MinElts);
+ /// Build and insert \p Res = G_VSCALE \p MinElts
+ ///
+ /// G_VSCALE puts the value of the runtime vscale multiplied by \p MinElts
+ /// into \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with scalar type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildVScale(const DstOp &Res, const APInt &MinElts);
+
/// Build and insert a G_INTRINSIC instruction.
///
/// There are four different opcodes based on combinations of whether the
@@ -1322,9 +1333,9 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder
- buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes,
- Register Addr, Register CmpVal, Register NewVal,
- MachineMemOperand &MMO);
+ buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes,
+ const SrcOp &Addr, const SrcOp &CmpVal,
+ const SrcOp &NewVal, MachineMemOperand &MMO);
/// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
/// MMO`.
@@ -1340,8 +1351,9 @@ public:
/// registers of the same type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr,
- Register CmpVal, Register NewVal,
+ MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes,
+ const SrcOp &Addr, const SrcOp &CmpVal,
+ const SrcOp &NewVal,
MachineMemOperand &MMO);
/// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
@@ -2113,6 +2125,11 @@ public:
DstMMO, SrcMMO);
}
+ /// Build and insert G_TRAP or G_DEBUGTRAP
+ MachineInstrBuilder buildTrap(bool Debug = false) {
+ return buildInstr(Debug ? TargetOpcode::G_DEBUGTRAP : TargetOpcode::G_TRAP);
+ }
+
/// Build and insert \p Dst = G_SBFX \p Src, \p LSB, \p Width.
MachineInstrBuilder buildSbfx(const DstOp &Dst, const SrcOp &Src,
const SrcOp &LSB, const SrcOp &Width) {
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index f8900f3434cc..807cec3c177d 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -308,10 +308,16 @@ std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
Register Src,
const MachineRegisterInfo &MRI);
-/// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
-/// then it tries to do an element-wise constant fold.
+/// Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on \p
+/// Src. If \p Src is a vector then it tries to do an element-wise constant
+/// fold.
std::optional<SmallVector<unsigned>>
-ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
+ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
+ std::function<unsigned(APInt)> CB);
+
+std::optional<SmallVector<APInt>>
+ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
+ const MachineRegisterInfo &MRI);
/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit is
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index dfbf7a1e7aae..c2bff2794493 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -1058,8 +1058,9 @@ public:
int64_t Offset, LocationSize Size) {
return getMachineMemOperand(
MMO, Offset,
- !Size.hasValue() || Size.isScalable()
- ? LLT()
+ !Size.hasValue() ? LLT()
+ : Size.isScalable()
+ ? LLT::scalable_vector(1, 8 * Size.getValue().getKnownMinValue())
: LLT::scalar(8 * Size.getValue().getKnownMinValue()));
}
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h
index fcdd73d8b65f..7249f812d2cc 100644
--- a/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -115,6 +115,8 @@ public:
// this instruction.
Unpredictable = 1 << 16, // Instruction with unpredictable condition.
NoConvergent = 1 << 17, // Call does not require convergence guarantees.
+ NonNeg = 1 << 18, // The operand is non-negative.
+ Disjoint = 1 << 19, // Each bit is zero in at least one of the inputs.
};
private:
diff --git a/llvm/include/llvm/CodeGen/MachinePassManager.h b/llvm/include/llvm/CodeGen/MachinePassManager.h
index 3faffe5c4cab..8689fd19030f 100644
--- a/llvm/include/llvm/CodeGen/MachinePassManager.h
+++ b/llvm/include/llvm/CodeGen/MachinePassManager.h
@@ -16,8 +16,6 @@
// their respective analysis managers such as ModuleAnalysisManager and
// FunctionAnalysisManager.
//
-// TODO: Add MachineFunctionProperties support.
-//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEPASSMANAGER_H
@@ -44,23 +42,67 @@ using MachineFunctionAnalysisManager = AnalysisManager<MachineFunction>;
/// automatically mixes in \c PassInfoMixin.
template <typename DerivedT>
struct MachinePassInfoMixin : public PassInfoMixin<DerivedT> {
- // TODO: Add MachineFunctionProperties support.
+protected:
+ class PropertyChanger {
+ MachineFunction &MF;
+
+ template <typename T>
+ using has_get_required_properties_t =
+ decltype(std::declval<T &>().getRequiredProperties());
+
+ template <typename T>
+ using has_get_set_properties_t =
+ decltype(std::declval<T &>().getSetProperties());
+
+ template <typename T>
+ using has_get_cleared_properties_t =
+ decltype(std::declval<T &>().getClearedProperties());
+
+ public:
+ PropertyChanger(MachineFunction &MF) : MF(MF) {
+#ifndef NDEBUG
+ if constexpr (is_detected<has_get_required_properties_t,
+ DerivedT>::value) {
+ auto &MFProps = MF.getProperties();
+ auto RequiredProperties = DerivedT::getRequiredProperties();
+ if (!MFProps.verifyRequiredProperties(RequiredProperties)) {
+ errs() << "MachineFunctionProperties required by " << DerivedT::name()
+ << " pass are not met by function " << MF.getName() << ".\n"
+ << "Required properties: ";
+ RequiredProperties.print(errs());
+ errs() << "\nCurrent properties: ";
+ MFProps.print(errs());
+ errs() << '\n';
+ report_fatal_error("MachineFunctionProperties check failed");
+ }
+ }
+#endif
+ }
+
+ ~PropertyChanger() {
+ if constexpr (is_detected<has_get_set_properties_t, DerivedT>::value)
+ MF.getProperties().set(DerivedT::getSetProperties());
+ if constexpr (is_detected<has_get_cleared_properties_t, DerivedT>::value)
+ MF.getProperties().reset(DerivedT::getClearedProperties());
+ }
+ };
+
+public:
+ PreservedAnalyses runImpl(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM) {
+ PropertyChanger PC(MF);
+ return static_cast<DerivedT *>(this)->run(MF, MFAM);
+ }
};
namespace detail {
-struct MachinePassConcept
- : PassConcept<MachineFunction, MachineFunctionAnalysisManager> {
- virtual MachineFunctionProperties getRequiredProperties() const = 0;
- virtual MachineFunctionProperties getSetProperties() const = 0;
- virtual MachineFunctionProperties getClearedProperties() const = 0;
-};
-template <typename PassT> struct MachinePassModel : MachinePassConcept {
- explicit MachinePassModel(PassT &&Pass) : Pass(std::move(Pass)) {}
- // We have to explicitly define all the special member functions because MSVC
- // refuses to generate them.
- MachinePassModel(const MachinePassModel &Arg) : Pass(Arg.Pass) {}
- MachinePassModel(MachinePassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+template <typename PassT>
+struct MachinePassModel
+ : PassModel<MachineFunction, PassT, MachineFunctionAnalysisManager> {
+ explicit MachinePassModel(PassT &&Pass)
+ : PassModel<MachineFunction, PassT, MachineFunctionAnalysisManager>(
+ std::move(Pass)) {}
friend void swap(MachinePassModel &LHS, MachinePassModel &RHS) {
using std::swap;
@@ -75,89 +117,8 @@ template <typename PassT> struct MachinePassModel : MachinePassConcept {
MachinePassModel &operator=(const MachinePassModel &) = delete;
PreservedAnalyses run(MachineFunction &IR,
MachineFunctionAnalysisManager &AM) override {
- return Pass.run(IR, AM);
- }
-
- void printPipeline(
- raw_ostream &OS,
- function_ref<StringRef(StringRef)> MapClassName2PassName) override {
- Pass.printPipeline(OS, MapClassName2PassName);
- }
-
- StringRef name() const override { return PassT::name(); }
-
- template <typename T>
- using has_required_t = decltype(std::declval<T &>().isRequired());
- template <typename T>
- static std::enable_if_t<is_detected<has_required_t, T>::value, bool>
- passIsRequiredImpl() {
- return T::isRequired();
+ return this->Pass.runImpl(IR, AM);
}
- template <typename T>
- static std::enable_if_t<!is_detected<has_required_t, T>::value, bool>
- passIsRequiredImpl() {
- return false;
- }
- bool isRequired() const override { return passIsRequiredImpl<PassT>(); }
-
- template <typename T>
- using has_get_required_properties_t =
- decltype(std::declval<T &>().getRequiredProperties());
- template <typename T>
- static std::enable_if_t<is_detected<has_get_required_properties_t, T>::value,
- MachineFunctionProperties>
- getRequiredPropertiesImpl() {
- return PassT::getRequiredProperties();
- }
- template <typename T>
- static std::enable_if_t<!is_detected<has_get_required_properties_t, T>::value,
- MachineFunctionProperties>
- getRequiredPropertiesImpl() {
- return MachineFunctionProperties();
- }
- MachineFunctionProperties getRequiredProperties() const override {
- return getRequiredPropertiesImpl<PassT>();
- }
-
- template <typename T>
- using has_get_set_properties_t =
- decltype(std::declval<T &>().getSetProperties());
- template <typename T>
- static std::enable_if_t<is_detected<has_get_set_properties_t, T>::value,
- MachineFunctionProperties>
- getSetPropertiesImpl() {
- return PassT::getSetProperties();
- }
- template <typename T>
- static std::enable_if_t<!is_detected<has_get_set_properties_t, T>::value,
- MachineFunctionProperties>
- getSetPropertiesImpl() {
- return MachineFunctionProperties();
- }
- MachineFunctionProperties getSetProperties() const override {
- return getSetPropertiesImpl<PassT>();
- }
-
- template <typename T>
- using has_get_cleared_properties_t =
- decltype(std::declval<T &>().getClearedProperties());
- template <typename T>
- static std::enable_if_t<is_detected<has_get_cleared_properties_t, T>::value,
- MachineFunctionProperties>
- getClearedPropertiesImpl() {
- return PassT::getClearedProperties();
- }
- template <typename T>
- static std::enable_if_t<!is_detected<has_get_cleared_properties_t, T>::value,
- MachineFunctionProperties>
- getClearedPropertiesImpl() {
- return MachineFunctionProperties();
- }
- MachineFunctionProperties getClearedProperties() const override {
- return getClearedPropertiesImpl<PassT>();
- }
-
- PassT Pass;
};
} // namespace detail
@@ -251,11 +212,12 @@ private:
class ModuleToMachineFunctionPassAdaptor
: public PassInfoMixin<ModuleToMachineFunctionPassAdaptor> {
- using MachinePassConcept = detail::MachinePassConcept;
-
public:
+ using PassConceptT =
+ detail::PassConcept<MachineFunction, MachineFunctionAnalysisManager>;
+
explicit ModuleToMachineFunctionPassAdaptor(
- std::unique_ptr<MachinePassConcept> Pass)
+ std::unique_ptr<PassConceptT> Pass)
: Pass(std::move(Pass)) {}
/// Runs the function pass across every function in the module.
@@ -266,21 +228,42 @@ public:
static bool isRequired() { return true; }
private:
- std::unique_ptr<MachinePassConcept> Pass;
+ std::unique_ptr<PassConceptT> Pass;
};
template <typename MachineFunctionPassT>
ModuleToMachineFunctionPassAdaptor
createModuleToMachineFunctionPassAdaptor(MachineFunctionPassT &&Pass) {
- using PassModelT = detail::MachinePassModel<MachineFunctionPassT>;
+ using PassModelT = detail::PassModel<MachineFunction, MachineFunctionPassT,
+ MachineFunctionAnalysisManager>;
// Do not use make_unique, it causes too many template instantiations,
// causing terrible compile times.
return ModuleToMachineFunctionPassAdaptor(
- std::unique_ptr<detail::MachinePassConcept>(
+ std::unique_ptr<ModuleToMachineFunctionPassAdaptor::PassConceptT>(
new PassModelT(std::forward<MachineFunctionPassT>(Pass))));
}
template <>
+template <typename PassT>
+void PassManager<MachineFunction>::addPass(PassT &&Pass) {
+ using PassModelT =
+ detail::PassModel<MachineFunction, PassT, MachineFunctionAnalysisManager>;
+ using MachinePassModelT = detail::MachinePassModel<PassT>;
+ // Do not use make_unique or emplace_back, they cause too many template
+ // instantiations, causing terrible compile times.
+ if constexpr (std::is_base_of_v<MachinePassInfoMixin<PassT>, PassT>) {
+ Passes.push_back(std::unique_ptr<PassConceptT>(
+ new MachinePassModelT(std::forward<PassT>(Pass))));
+ } else if constexpr (std::is_same_v<PassT, PassManager<MachineFunction>>) {
+ for (auto &P : Pass.Passes)
+ Passes.push_back(std::move(P));
+ } else {
+ Passes.push_back(std::unique_ptr<PassConceptT>(
+ new PassModelT(std::forward<PassT>(Pass))));
+ }
+}
+
+template <>
PreservedAnalyses
PassManager<MachineFunction>::run(MachineFunction &,
AnalysisManager<MachineFunction> &);
diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h
index 25703dd6b61f..9cca6b3a571c 100644
--- a/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -1296,6 +1296,11 @@ protected:
SchedBoundary Bot;
MachineSchedPolicy RegionPolicy;
+ /// Candidate last picked from Top boundary.
+ SchedCandidate TopCand;
+ /// Candidate last picked from Bot boundary.
+ SchedCandidate BotCand;
+
public:
PostGenericScheduler(const MachineSchedContext *C)
: GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
@@ -1316,6 +1321,8 @@ public:
SUnit *pickNode(bool &IsTopNode) override;
+ SUnit *pickNodeBidirectional(bool &IsTopNode);
+
void scheduleTree(unsigned SubtreeID) override {
llvm_unreachable("PostRA scheduler does not support subtree analysis.");
}
@@ -1326,12 +1333,14 @@ public:
if (SU->isScheduled)
return;
Top.releaseNode(SU, SU->TopReadyCycle, false);
+ TopCand.SU = nullptr;
}
void releaseBottomNode(SUnit *SU) override {
if (SU->isScheduled)
return;
Bot.releaseNode(SU, SU->BotReadyCycle, false);
+ BotCand.SU = nullptr;
}
protected:
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 4785e93d72d1..574c63552ce0 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -883,7 +883,7 @@ public:
/// Returns a vector of type ResVT whose elements contain the linear sequence
/// <0, Step, Step * 2, Step * 3, ...>
- SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal);
+ SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal);
/// Returns a vector of type ResVT whose elements contain the linear sequence
/// <0, 1, 2, 3, ...>
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 59fad88f91b1..a4dc09744618 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1454,6 +1454,28 @@ public:
getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
}
+ /// Same as getLoadExtAction, but for atomic loads.
+ LegalizeAction getAtomicLoadExtAction(unsigned ExtType, EVT ValVT,
+ EVT MemVT) const {
+ if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+ unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
+ unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
+ MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
+ unsigned Shift = 4 * ExtType;
+ LegalizeAction Action =
+ (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
+ assert((Action == Legal || Action == Expand) &&
+ "Unsupported atomic load extension action.");
+ return Action;
+ }
+
+ /// Return true if the specified atomic load with extension is legal on
+ /// this target.
+ bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+ return getAtomicLoadExtAction(ExtType, ValVT, MemVT) == Legal;
+ }
+
/// Return how this store with truncation should be treated: either it is
/// legal, needs to be promoted to a larger size, needs to be expanded to some
/// other code sequence, or the target has a custom expander for it.
@@ -2536,6 +2558,30 @@ protected:
setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
}
+ /// Let target indicate that an extending atomic load of the specified type
+ /// is legal.
+ void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
+ LegalizeAction Action) {
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
+ MemVT.isValid() && "Table isn't big enough!");
+ assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+ unsigned Shift = 4 * ExtType;
+ AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &=
+ ~((uint16_t)0xF << Shift);
+ AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |=
+ ((uint16_t)Action << Shift);
+ }
+ void setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
+ LegalizeAction Action) {
+ for (auto ExtType : ExtTypes)
+ setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
+ }
+ void setAtomicLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT,
+ ArrayRef<MVT> MemVTs, LegalizeAction Action) {
+ for (auto MemVT : MemVTs)
+ setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
+ }
+
/// Indicate that the specified truncating store does not work with the
/// specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
@@ -3521,6 +3567,10 @@ private:
/// for each of the 4 load ext types.
uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
+ /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand
+ /// (default) values are supported.
+ uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
+
/// For each value type pair keep a LegalizeAction that indicates whether a
/// truncating store of a specific value type and truncating type is legal.
LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 117d3f718297..33c4c745c341 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -243,9 +243,20 @@ public:
unsigned RegSize, SpillSize, SpillAlignment;
unsigned VTListOffset;
};
+
+ /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
+ /// index, -1 in any being invalid.
+ struct SubRegCoveredBits {
+ uint16_t Offset;
+ uint16_t Size;
+ };
+
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
+ const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered
+ // bit ranges array.
+
// Pointer to array of lane masks, one per sub-reg index.
const LaneBitmask *SubRegIndexLaneMasks;
@@ -256,12 +267,10 @@ private:
unsigned HwMode;
protected:
- TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
- regclass_iterator RCB,
- regclass_iterator RCE,
- const char *const *SRINames,
- const LaneBitmask *SRILaneMasks,
- LaneBitmask CoveringLanes,
+ TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
+ regclass_iterator RCE, const char *const *SRINames,
+ const SubRegCoveredBits *SubIdxRanges,
+ const LaneBitmask *SRILaneMasks, LaneBitmask CoveringLanes,
const RegClassInfo *const RCIs,
const MVT::SimpleValueType *const RCVTLists,
unsigned Mode = 0);
@@ -382,6 +391,16 @@ public:
return SubRegIndexNames[SubIdx-1];
}
+ /// Get the size of the bit range covered by a sub-register index.
+ /// If the index isn't continuous, return the sum of the sizes of its parts.
+ /// If the index is used to access subregisters of different sizes, return -1.
+ unsigned getSubRegIdxSize(unsigned Idx) const;
+
+ /// Get the offset of the bit range covered by a sub-register index.
+ /// If an Offset doesn't make sense (the index isn't continuous, or is used to
+ /// access sub-registers at different offsets), return -1.
+ unsigned getSubRegIdxOffset(unsigned Idx) const;
+
/// Return a bitmask representing the parts of a register that are covered by
/// SubIdx \see LaneBitmask.
///
diff --git a/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVCodeViewReader.h b/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVCodeViewReader.h
index 8a32210bac3c..4dd7c967ddc1 100644
--- a/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVCodeViewReader.h
+++ b/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVCodeViewReader.h
@@ -58,7 +58,7 @@ class LVSymbolVisitorDelegate;
using LVNames = SmallVector<StringRef, 16>;
-// The ELF reader uses the DWARF constants to create the logical elements.
+// The DWARF reader uses the DWARF constants to create the logical elements.
// The DW_TAG_* and DW_AT_* are used to select the logical object and to
// set specific attributes, such as name, type, etc.
// As the CodeView constants are different to the DWARF constants, the
diff --git a/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVDWARFReader.h b/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVDWARFReader.h
index 22e804a459f8..fdc97249d8e5 100644
--- a/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVDWARFReader.h
+++ b/llvm/include/llvm/DebugInfo/LogicalView/Readers/LVDWARFReader.h
@@ -49,7 +49,7 @@ class LVDWARFReader final : public LVBinaryReader {
// In DWARF v4, the files are 1-indexed.
// In DWARF v5, the files are 0-indexed.
- // The ELF reader expects the indexes as 1-indexed.
+ // The DWARF reader expects the indexes as 1-indexed.
bool IncrementFileIndex = false;
// Address ranges collected for current DIE.
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h b/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
index 30a9383dc0bc..4ce0c8d9fd17 100644
--- a/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/JITLink.h
@@ -567,7 +567,7 @@ public:
orc::ExecutorAddrDiff getOffset() const { return Offset; }
void setOffset(orc::ExecutorAddrDiff NewOffset) {
- assert(NewOffset < getBlock().getSize() && "Offset out of range");
+ assert(NewOffset <= getBlock().getSize() && "Offset out of range");
Offset = NewOffset;
}
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index f7c286bec778..ed30a792e9e9 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -352,7 +352,7 @@ private:
: ES(ES), L(L) {}
static Expected<unsigned> getTargetPointerSize(const Triple &TT);
- static Expected<llvm::endianness> getTargetEndianness(const Triple &TT);
+ static Expected<llvm::endianness> getEndianness(const Triple &TT);
Expected<std::unique_ptr<jitlink::LinkGraph>>
createStubsGraph(const SymbolMap &Resolved);
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
new file mode 100644
index 000000000000..6ce972adcf0f
--- /dev/null
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -0,0 +1,1268 @@
+//===- ClauseT.h -- clause template definitions ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file contains template classes that represent OpenMP clauses, as
+// described in the OpenMP API specification.
+//
+// The general structure of any specific clause class is that it is either
+// empty, or it consists of a single data member, which can take one of these
+// three forms:
+// - a value member, named `v`, or
+// - a tuple of values, named `t`, or
+// - a variant (i.e. union) of values, named `u`.
+// To assist with generic visit algorithms, classes define one of the following
+// traits:
+// - EmptyTrait: the class has no data members.
+// - WrapperTrait: the class has a single member `v`
+// - TupleTrait: the class has a tuple member `t`
+// - UnionTrait the class has a varuant member `u`
+// - IncompleteTrait: the class is a placeholder class that is currently empty,
+// but will be completed at a later time.
+// Note: This structure follows the one used in flang parser.
+//
+// The types used in the class definitions follow the names used in the spec
+// (there are a few exceptions to this). For example, given
+// Clause `foo`
+// - foo-modifier : description...
+// - list : list of variables
+// the corresponding class would be
+// template <...>
+// struct FooT {
+// using FooModifier = type that can represent the modifier
+// using List = ListT<ObjectT<...>>;
+// using TupleTrait = std::true_type;
+// std::tuple<std::optional<FooModifier>, List> t;
+// };
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_FRONTEND_OPENMP_CLAUSET_H
+#define LLVM_FRONTEND_OPENMP_CLAUSET_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Frontend/OpenMP/OMP.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <algorithm>
+#include <iterator>
+#include <optional>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <variant>
+
+#define ENUM(Name, ...) enum class Name { __VA_ARGS__ }
+#define OPT(x) std::optional<x>
+
+// A number of OpenMP clauses contain values that come from a given set of
+// possibilities. In the IR these are usually represented by enums. Both
+// clang and flang use different types for the enums, and the enum elements
+// representing the same thing may have different values between clang and
+// flang.
+// Since the representation below tries to adhere to the spec, and be source
+// language agnostic, it defines its own enums, independent from any language
+// frontend. As a consequence, when instantiating the templates below,
+// frontend-specific enums need to be translated into the representation
+// used here. The macros below are intended to assist with the conversion.
+
+// Helper macro for enum-class conversion.
+#define CLAUSET_SCOPED_ENUM_MEMBER_CONVERT(Ov, Tv) \
+ if (v == OtherEnum::Ov) { \
+ return ThisEnum::Tv; \
+ }
+
+// Helper macro for enum (non-class) conversion.
+#define CLAUSET_UNSCOPED_ENUM_MEMBER_CONVERT(Ov, Tv) \
+ if (v == Ov) { \
+ return ThisEnum::Tv; \
+ }
+
+#define CLAUSET_ENUM_CONVERT(func, OtherE, ThisE, Maps) \
+ auto func = [](OtherE v) -> ThisE { \
+ using ThisEnum = ThisE; \
+ using OtherEnum = OtherE; \
+ (void)sizeof(OtherEnum); /*Avoid "unused local typedef" warning*/ \
+ Maps; \
+ llvm_unreachable("Unexpected value in " #OtherE); \
+ }
+
+// Usage:
+//
+// Given two enums,
+// enum class Other { o1, o2 };
+// enum class This { t1, t2 };
+// generate conversion function "Func : Other -> This" with
+// CLAUSET_ENUM_CONVERT(
+// Func, Other, This,
+// CLAUSET_ENUM_MEMBER_CONVERT(o1, t1) // <- No comma
+// CLAUSET_ENUM_MEMBER_CONVERT(o2, t2)
+// ...
+// )
+//
+// Note that the sequence of M(other-value, this-value) is separated
+// with _spaces_, not commas.
+
+namespace detail {
+// Type trait to determine whether T is a specialization of std::variant.
+template <typename T> struct is_variant {
+ static constexpr bool value = false;
+};
+
+template <typename... Ts> struct is_variant<std::variant<Ts...>> {
+ static constexpr bool value = true;
+};
+
+template <typename T> constexpr bool is_variant_v = is_variant<T>::value;
+
+// Helper utility to create a type which is a union of two given variants.
+template <typename...> struct UnionOfTwo;
+
+template <typename... Types1, typename... Types2>
+struct UnionOfTwo<std::variant<Types1...>, std::variant<Types2...>> {
+ using type = std::variant<Types1..., Types2...>;
+};
+} // namespace detail
+
+namespace tomp {
+namespace type {
+
+// Helper utility to create a type which is a union of an arbitrary number
+// of variants.
+template <typename...> struct Union;
+
+template <> struct Union<> {
+ // Legal to define, illegal to instantiate.
+ using type = std::variant<>;
+};
+
+template <typename T, typename... Ts> struct Union<T, Ts...> {
+ static_assert(detail::is_variant_v<T>);
+ using type =
+ typename detail::UnionOfTwo<T, typename Union<Ts...>::type>::type;
+};
+
+template <typename T> using ListT = llvm::SmallVector<T, 0>;
+
+// The ObjectT class represents a variable (as defined in the OpenMP spec).
+//
+// A specialization of ObjectT<Id, Expr> must provide the following definitions:
+// {
+// using IdTy = Id;
+// using ExprTy = Expr;
+//
+// auto id() const -> IdTy {
+// return the identifier of the object (for use in tests for
+// presence/absence of the object)
+// }
+//
+// auto ref() const -> (e.g. const ExprTy&) {
+// return the expression accessing (referencing) the object
+// }
+// }
+//
+// For example, the ObjectT instance created for "var[x+1]" would have
+// the `id()` return the identifier for `var`, and the `ref()` return the
+// representation of the array-access `var[x+1]`.
+//
+// The identity of an object must always be present, i.e. it cannot be
+// nullptr, std::nullopt, etc. The reference is optional.
+//
+// Note: the ObjectT template is not defined. Any user of it is expected to
+// provide their own specialization that conforms to the above requirements.
+template <typename IdType, typename ExprType> struct ObjectT;
+
+template <typename I, typename E> using ObjectListT = ListT<ObjectT<I, E>>;
+
+using DirectiveName = llvm::omp::Directive;
+
+template <typename I, typename E> //
+struct DefinedOperatorT {
+ struct DefinedOpName {
+ using WrapperTrait = std::true_type;
+ ObjectT<I, E> v;
+ };
+ ENUM(IntrinsicOperator, Power, Multiply, Divide, Add, Subtract, Concat, LT,
+ LE, EQ, NE, GE, GT, NOT, AND, OR, EQV, NEQV, Min, Max);
+ using UnionTrait = std::true_type;
+ std::variant<DefinedOpName, IntrinsicOperator> u;
+};
+
+// V5.2: [3.2.6] `iterator` modifier
+template <typename E> //
+struct RangeT {
+ // range-specification: begin : end[: step]
+ using TupleTrait = std::true_type;
+ std::tuple<E, E, OPT(E)> t;
+};
+
+// V5.2: [3.2.6] `iterator` modifier
+template <typename TypeType, typename IdType, typename ExprType> //
+struct IteratorSpecifierT {
+ // iterators-specifier: [ iterator-type ] identifier = range-specification
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(TypeType), ObjectT<IdType, ExprType>, RangeT<ExprType>> t;
+};
+
+// Note:
+// For motion or map clauses the OpenMP spec allows a unique mapper modifier.
+// In practice, since these clauses apply to multiple objects, there can be
+// multiple effective mappers applicable to these objects (due to overloads,
+// etc.). Because of that store a list of mappers every time a mapper modifier
+// is allowed. If the mapper list contains a single element, it applies to
+// all objects in the clause, otherwise there should be as many mappers as
+// there are objects.
+// V5.2: [5.8.2] Mapper identifiers and `mapper` modifiers
+template <typename I, typename E> //
+struct MapperT {
+ using MapperIdentifier = ObjectT<I, E>;
+ using WrapperTrait = std::true_type;
+ MapperIdentifier v;
+};
+
+// V5.2: [15.8.1] `memory-order` clauses
+// When used as arguments for other clauses, e.g. `fail`.
+ENUM(MemoryOrder, AcqRel, Acquire, Relaxed, Release, SeqCst);
+ENUM(MotionExpectation, Present);
+// V5.2: [15.9.1] `task-dependence-type` modifier
+ENUM(TaskDependenceType, In, Out, Inout, Mutexinoutset, Inoutset, Depobj);
+
+template <typename I, typename E> //
+struct LoopIterationT {
+ struct Distance {
+ using TupleTrait = std::true_type;
+ std::tuple<DefinedOperatorT<I, E>, E> t;
+ };
+ using TupleTrait = std::true_type;
+ std::tuple<ObjectT<I, E>, OPT(Distance)> t;
+};
+
+template <typename I, typename E> //
+struct ProcedureDesignatorT {
+ using WrapperTrait = std::true_type;
+ ObjectT<I, E> v;
+};
+
+// Note:
+// For reduction clauses the OpenMP spec allows a unique reduction identifier.
+// For reasons analogous to those listed for the MapperT type, clauses that
+// according to the spec contain a reduction identifier will contain a list of
+// reduction identifiers. The same constraints apply: there is either a single
+// identifier that applies to all objects, or there are as many identifiers
+// as there are objects.
+template <typename I, typename E> //
+struct ReductionIdentifierT {
+ using UnionTrait = std::true_type;
+ std::variant<DefinedOperatorT<I, E>, ProcedureDesignatorT<I, E>> u;
+};
+
+template <typename T, typename I, typename E> //
+using IteratorT = ListT<IteratorSpecifierT<T, I, E>>;
+} // namespace type
+
+template <typename T> using ListT = type::ListT<T>;
+
+template <typename I, typename E> using ObjectT = type::ObjectT<I, E>;
+template <typename I, typename E> using ObjectListT = type::ObjectListT<I, E>;
+
+template <typename T, typename I, typename E>
+using IteratorT = type::IteratorT<T, I, E>;
+
+template <
+ typename ContainerTy, typename FunctionTy,
+ typename ElemTy = typename llvm::remove_cvref_t<ContainerTy>::value_type,
+ typename ResultTy = std::invoke_result_t<FunctionTy, ElemTy>>
+ListT<ResultTy> makeList(ContainerTy &&container, FunctionTy &&func) {
+ ListT<ResultTy> v;
+ llvm::transform(container, std::back_inserter(v), func);
+ return v;
+}
+
+namespace clause {
+// V5.2: [8.3.1] `assumption` clauses
+template <typename T, typename I, typename E> //
+struct AbsentT {
+ using List = ListT<type::DirectiveName>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [15.8.1] `memory-order` clauses
+template <typename T, typename I, typename E> //
+struct AcqRelT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [15.8.1] `memory-order` clauses
+template <typename T, typename I, typename E> //
+struct AcquireT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [7.5.2] `adjust_args` clause
+template <typename T, typename I, typename E> //
+struct AdjustArgsT {
+ using IncompleteTrait = std::true_type;
+};
+
+// V5.2: [12.5.1] `affinity` clause
+template <typename T, typename I, typename E> //
+struct AffinityT {
+ using Iterator = type::IteratorT<T, I, E>;
+ using LocatorList = ObjectListT<I, E>;
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Iterator), LocatorList> t;
+};
+
+// V5.2: [6.3] `align` clause
+template <typename T, typename I, typename E> //
+struct AlignT {
+ using Alignment = E;
+
+ using WrapperTrait = std::true_type;
+ Alignment v;
+};
+
+// V5.2: [5.11] `aligned` clause
+template <typename T, typename I, typename E> //
+struct AlignedT {
+ using Alignment = E;
+ using List = ObjectListT<I, E>;
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Alignment), List> t;
+};
+
+template <typename T, typename I, typename E> //
+struct AllocatorT;
+
+// V5.2: [6.6] `allocate` clause
+template <typename T, typename I, typename E> //
+struct AllocateT {
+ using AllocatorSimpleModifier = E;
+ using AllocatorComplexModifier = AllocatorT<T, I, E>;
+ using AlignModifier = AlignT<T, I, E>;
+ using List = ObjectListT<I, E>;
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(AllocatorSimpleModifier), OPT(AllocatorComplexModifier),
+ OPT(AlignModifier), List>
+ t;
+};
+
+// V5.2: [6.4] `allocator` clause
+template <typename T, typename I, typename E> //
+struct AllocatorT {
+ using Allocator = E;
+ using WrapperTrait = std::true_type;
+ Allocator v;
+};
+
+// V5.2: [7.5.3] `append_args` clause
+template <typename T, typename I, typename E> //
+struct AppendArgsT {
+ using IncompleteTrait = std::true_type;
+};
+
+// V5.2: [8.1] `at` clause
+template <typename T, typename I, typename E> //
+struct AtT {
+ ENUM(ActionTime, Compilation, Execution);
+ using WrapperTrait = std::true_type;
+ ActionTime v;
+};
+
+// V5.2: [8.2.1] `requirement` clauses
+template <typename T, typename I, typename E> //
+struct AtomicDefaultMemOrderT {
+ using MemoryOrder = type::MemoryOrder;
+ using WrapperTrait = std::true_type;
+ MemoryOrder v; // Name not provided in spec
+};
+
+// V5.2: [11.7.1] `bind` clause
+template <typename T, typename I, typename E> //
+struct BindT {
+ ENUM(Binding, Teams, Parallel, Thread);
+ using WrapperTrait = std::true_type;
+ Binding v;
+};
+
+// V5.2: [15.8.3] `extended-atomic` clauses
+template <typename T, typename I, typename E> //
+struct CaptureT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [4.4.3] `collapse` clause
+template <typename T, typename I, typename E> //
+struct CollapseT {
+ using N = E;
+ using WrapperTrait = std::true_type;
+ N v;
+};
+
+// V5.2: [15.8.3] `extended-atomic` clauses
+template <typename T, typename I, typename E> //
+struct CompareT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.3.1] `assumption` clauses
+template <typename T, typename I, typename E> //
+struct ContainsT {
+ using List = ListT<type::DirectiveName>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.7.1] `copyin` clause
+template <typename T, typename I, typename E> //
+struct CopyinT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.7.2] `copyprivate` clause
+template <typename T, typename I, typename E> //
+struct CopyprivateT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.4.1] `default` clause
+template <typename T, typename I, typename E> //
+struct DefaultT {
+ ENUM(DataSharingAttribute, Firstprivate, None, Private, Shared);
+ using WrapperTrait = std::true_type;
+ DataSharingAttribute v;
+};
+
+// V5.2: [5.8.7] `defaultmap` clause
+template <typename T, typename I, typename E> //
+struct DefaultmapT {
+ ENUM(ImplicitBehavior, Alloc, To, From, Tofrom, Firstprivate, None, Default,
+ Present);
+ ENUM(VariableCategory, Scalar, Aggregate, Pointer, Allocatable);
+ using TupleTrait = std::true_type;
+ std::tuple<ImplicitBehavior, OPT(VariableCategory)> t;
+};
+
+template <typename T, typename I, typename E> //
+struct DoacrossT;
+
+// V5.2: [15.9.5] `depend` clause
+template <typename T, typename I, typename E> //
+struct DependT {
+ using Iterator = type::IteratorT<T, I, E>;
+ using LocatorList = ObjectListT<I, E>;
+ using TaskDependenceType = tomp::type::TaskDependenceType;
+
+ struct WithLocators { // Modern form
+ using TupleTrait = std::true_type;
+ // Empty LocatorList means "omp_all_memory".
+ std::tuple<TaskDependenceType, OPT(Iterator), LocatorList> t;
+ };
+
+ using Doacross = DoacrossT<T, I, E>;
+ using UnionTrait = std::true_type;
+ std::variant<Doacross, WithLocators> u; // Doacross form is legacy
+};
+
+// V5.2: [3.5] `destroy` clause
+template <typename T, typename I, typename E> //
+struct DestroyT {
+ using DestroyVar = ObjectT<I, E>;
+ using WrapperTrait = std::true_type;
+ // DestroyVar can be ommitted in "depobj destroy".
+ OPT(DestroyVar) v;
+};
+
+// V5.2: [12.5.2] `detach` clause
+template <typename T, typename I, typename E> //
+struct DetachT {
+ using EventHandle = ObjectT<I, E>;
+ using WrapperTrait = std::true_type;
+ EventHandle v;
+};
+
+// V5.2: [13.2] `device` clause
+template <typename T, typename I, typename E> //
+struct DeviceT {
+ using DeviceDescription = E;
+ ENUM(DeviceModifier, Ancestor, DeviceNum);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(DeviceModifier), DeviceDescription> t;
+};
+
+// V5.2: [13.1] `device_type` clause
+template <typename T, typename I, typename E> //
+struct DeviceTypeT {
+ ENUM(DeviceTypeDescription, Any, Host, Nohost);
+ using WrapperTrait = std::true_type;
+ DeviceTypeDescription v;
+};
+
+// V5.2: [11.6.1] `dist_schedule` clause
+template <typename T, typename I, typename E> //
+struct DistScheduleT {
+ ENUM(Kind, Static);
+ using ChunkSize = E;
+ using TupleTrait = std::true_type;
+ std::tuple<Kind, OPT(ChunkSize)> t;
+};
+
+// V5.2: [15.9.6] `doacross` clause
+template <typename T, typename I, typename E> //
+struct DoacrossT {
+ using Vector = ListT<type::LoopIterationT<I, E>>;
+ ENUM(DependenceType, Source, Sink);
+ using TupleTrait = std::true_type;
+ // Empty Vector means "omp_cur_iteration"
+ std::tuple<DependenceType, Vector> t;
+};
+
+// V5.2: [8.2.1] `requirement` clauses
+template <typename T, typename I, typename E> //
+struct DynamicAllocatorsT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [5.8.4] `enter` clause
+template <typename T, typename I, typename E> //
+struct EnterT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.6.2] `exclusive` clause
+template <typename T, typename I, typename E> //
+struct ExclusiveT {
+ using WrapperTrait = std::true_type;
+ using List = ObjectListT<I, E>;
+ List v;
+};
+
+// V5.2: [15.8.3] `extended-atomic` clauses
+template <typename T, typename I, typename E> //
+struct FailT {
+ using MemoryOrder = type::MemoryOrder;
+ using WrapperTrait = std::true_type;
+ MemoryOrder v;
+};
+
+// V5.2: [10.5.1] `filter` clause
+template <typename T, typename I, typename E> //
+struct FilterT {
+ using ThreadNum = E;
+ using WrapperTrait = std::true_type;
+ ThreadNum v;
+};
+
+// V5.2: [12.3] `final` clause
+template <typename T, typename I, typename E> //
+struct FinalT {
+ using Finalize = E;
+ using WrapperTrait = std::true_type;
+ Finalize v;
+};
+
+// V5.2: [5.4.4] `firstprivate` clause
+template <typename T, typename I, typename E> //
+struct FirstprivateT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.9.2] `from` clause
+template <typename T, typename I, typename E> //
+struct FromT {
+ using LocatorList = ObjectListT<I, E>;
+ using Expectation = type::MotionExpectation;
+ using Iterator = type::IteratorT<T, I, E>;
+ // See note at the definition of the MapperT type.
+ using Mappers = ListT<type::MapperT<I, E>>; // Not a spec name
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Expectation), OPT(Mappers), OPT(Iterator), LocatorList> t;
+};
+
+// V5.2: [9.2.1] `full` clause
+template <typename T, typename I, typename E> //
+struct FullT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [12.6.1] `grainsize` clause
+template <typename T, typename I, typename E> //
+struct GrainsizeT {
+ ENUM(Prescriptiveness, Strict);
+ using GrainSize = E;
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Prescriptiveness), GrainSize> t;
+};
+
+// V5.2: [5.4.9] `has_device_addr` clause
+template <typename T, typename I, typename E> //
+struct HasDeviceAddrT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [15.1.2] `hint` clause
+template <typename T, typename I, typename E> //
+struct HintT {
+ using HintExpr = E;
+ using WrapperTrait = std::true_type;
+ HintExpr v;
+};
+
+// V5.2: [8.3.1] Assumption clauses
+template <typename T, typename I, typename E> //
+struct HoldsT {
+ using WrapperTrait = std::true_type;
+ E v; // No argument name in spec 5.2
+};
+
+// V5.2: [3.4] `if` clause
+template <typename T, typename I, typename E> //
+struct IfT {
+ using DirectiveNameModifier = type::DirectiveName;
+ using IfExpression = E;
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(DirectiveNameModifier), IfExpression> t;
+};
+
+// V5.2: [7.7.1] `branch` clauses
+template <typename T, typename I, typename E> //
+struct InbranchT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [5.6.1] `exclusive` clause
+template <typename T, typename I, typename E> //
+struct InclusiveT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [7.8.3] `indirect` clause
+template <typename T, typename I, typename E> //
+struct IndirectT {
+ using InvokedByFptr = E;
+ using WrapperTrait = std::true_type;
+ InvokedByFptr v;
+};
+
+// V5.2: [14.1.2] `init` clause
+template <typename T, typename I, typename E> //
+struct InitT {
+ using ForeignRuntimeId = E;
+ using InteropVar = ObjectT<I, E>;
+ using InteropPreference = ListT<ForeignRuntimeId>;
+ ENUM(InteropType, Target, Targetsync); // Repeatable
+ using InteropTypes = ListT<InteropType>; // Not a spec name
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(InteropPreference), InteropTypes, InteropVar> t;
+};
+
+// V5.2: [5.5.4] `initializer` clause
+template <typename T, typename I, typename E> //
+struct InitializerT {
+ using InitializerExpr = E;
+ using WrapperTrait = std::true_type;
+ InitializerExpr v;
+};
+
+// V5.2: [5.5.10] `in_reduction` clause
+template <typename T, typename I, typename E> //
+struct InReductionT {
+ using List = ObjectListT<I, E>;
+ // See note at the definition of the ReductionIdentifierT type.
+ // The name ReductionIdentifiers is not a spec name.
+ using ReductionIdentifiers = ListT<type::ReductionIdentifierT<I, E>>;
+ using TupleTrait = std::true_type;
+ std::tuple<ReductionIdentifiers, List> t;
+};
+
+// V5.2: [5.4.7] `is_device_ptr` clause
+template <typename T, typename I, typename E> //
+struct IsDevicePtrT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.4.5] `lastprivate` clause
+template <typename T, typename I, typename E> //
+struct LastprivateT {
+ using List = ObjectListT<I, E>;
+ ENUM(LastprivateModifier, Conditional);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(LastprivateModifier), List> t;
+};
+
+// V5.2: [5.4.6] `linear` clause
+template <typename T, typename I, typename E> //
+struct LinearT {
+ // std::get<type> won't work here due to duplicate types in the tuple.
+ using List = ObjectListT<I, E>;
+ using StepSimpleModifier = E;
+ using StepComplexModifier = E;
+ ENUM(LinearModifier, Ref, Val, Uval);
+
+ using TupleTrait = std::true_type;
+ // Step == nullptr means 1.
+ std::tuple<OPT(StepSimpleModifier), OPT(StepComplexModifier),
+ OPT(LinearModifier), List>
+ t;
+};
+
+// V5.2: [5.8.5] `link` clause
+template <typename T, typename I, typename E> //
+struct LinkT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.8.3] `map` clause
+template <typename T, typename I, typename E> //
+struct MapT {
+ using LocatorList = ObjectListT<I, E>;
+ ENUM(MapType, To, From, Tofrom, Alloc, Release, Delete);
+ ENUM(MapTypeModifier, Always, Close, Present, OmpxHold);
+ // See note at the definition of the MapperT type.
+ using Mappers = ListT<type::MapperT<I, E>>; // Not a spec name
+ using Iterator = type::IteratorT<T, I, E>;
+ using MapTypeModifiers = ListT<MapTypeModifier>; // Not a spec name
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(MapType), OPT(MapTypeModifiers), OPT(Mappers), OPT(Iterator),
+ LocatorList>
+ t;
+};
+
+// V5.2: [7.5.1] `match` clause
+template <typename T, typename I, typename E> //
+struct MatchT {
+ using IncompleteTrait = std::true_type;
+};
+
+// V5.2: [12.2] `mergeable` clause
+template <typename T, typename I, typename E> //
+struct MergeableT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.5.2] `message` clause
+template <typename T, typename I, typename E> //
+struct MessageT {
+ using MsgString = E;
+ using WrapperTrait = std::true_type;
+ MsgString v;
+};
+
+// V5.2: [7.6.2] `nocontext` clause
+template <typename T, typename I, typename E> //
+struct NocontextT {
+ using DoNotUpdateContext = E;
+ using WrapperTrait = std::true_type;
+ DoNotUpdateContext v;
+};
+
+// V5.2: [15.7] `nowait` clause
+template <typename T, typename I, typename E> //
+struct NogroupT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [10.4.1] `nontemporal` clause
+template <typename T, typename I, typename E> //
+struct NontemporalT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [8.3.1] `assumption` clauses
+template <typename T, typename I, typename E> //
+struct NoOpenmpT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.3.1] `assumption` clauses
+template <typename T, typename I, typename E> //
+struct NoOpenmpRoutinesT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.3.1] `assumption` clauses
+template <typename T, typename I, typename E> //
+struct NoParallelismT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [7.7.1] `branch` clauses
+template <typename T, typename I, typename E> //
+struct NotinbranchT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [7.6.1] `novariants` clause
+template <typename T, typename I, typename E> //
+struct NovariantsT {
+ using DoNotUseVariant = E;
+ using WrapperTrait = std::true_type;
+ DoNotUseVariant v;
+};
+
+// V5.2: [15.6] `nowait` clause
+template <typename T, typename I, typename E> //
+struct NowaitT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [12.6.2] `num_tasks` clause
+template <typename T, typename I, typename E> //
+struct NumTasksT {
+ using NumTasks = E;
+ ENUM(Prescriptiveness, Strict);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Prescriptiveness), NumTasks> t;
+};
+
+// V5.2: [10.2.1] `num_teams` clause
+template <typename T, typename I, typename E> //
+struct NumTeamsT {
+ using TupleTrait = std::true_type;
+ using LowerBound = E;
+ using UpperBound = E;
+ std::tuple<OPT(LowerBound), UpperBound> t;
+};
+
+// V5.2: [10.1.2] `num_threads` clause
+template <typename T, typename I, typename E> //
+struct NumThreadsT {
+ using Nthreads = E;
+ using WrapperTrait = std::true_type;
+ Nthreads v;
+};
+
+template <typename T, typename I, typename E> //
+struct OmpxAttributeT {
+ using EmptyTrait = std::true_type;
+};
+
+template <typename T, typename I, typename E> //
+struct OmpxBareT {
+ using EmptyTrait = std::true_type;
+};
+
+template <typename T, typename I, typename E> //
+struct OmpxDynCgroupMemT {
+ using WrapperTrait = std::true_type;
+ E v;
+};
+
+// V5.2: [10.3] `order` clause
+template <typename T, typename I, typename E> //
+struct OrderT {
+ ENUM(OrderModifier, Reproducible, Unconstrained);
+ ENUM(Ordering, Concurrent);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(OrderModifier), Ordering> t;
+};
+
+// V5.2: [4.4.4] `ordered` clause
+template <typename T, typename I, typename E> //
+struct OrderedT {
+ using N = E;
+ using WrapperTrait = std::true_type;
+ OPT(N) v;
+};
+
+// V5.2: [7.4.2] `otherwise` clause
+template <typename T, typename I, typename E> //
+struct OtherwiseT {
+ using IncompleteTrait = std::true_type;
+};
+
+// V5.2: [9.2.2] `partial` clause
+template <typename T, typename I, typename E> //
+struct PartialT {
+ using UnrollFactor = E;
+ using WrapperTrait = std::true_type;
+ OPT(UnrollFactor) v;
+};
+
+// V5.2: [12.4] `priority` clause
+template <typename T, typename I, typename E> //
+struct PriorityT {
+ using PriorityValue = E;
+ using WrapperTrait = std::true_type;
+ PriorityValue v;
+};
+
+// V5.2: [5.4.3] `private` clause
+template <typename T, typename I, typename E> //
+struct PrivateT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [10.1.4] `proc_bind` clause
+template <typename T, typename I, typename E> //
+struct ProcBindT {
+ ENUM(AffinityPolicy, Close, Master, Spread, Primary);
+ using WrapperTrait = std::true_type;
+ AffinityPolicy v;
+};
+
+// V5.2: [15.8.2] Atomic clauses
+template <typename T, typename I, typename E> //
+struct ReadT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [5.5.8] `reduction` clause
+template <typename T, typename I, typename E> //
+struct ReductionT {
+ using List = ObjectListT<I, E>;
+ // See note at the definition of the ReductionIdentifierT type.
+ // The name ReductionIdentifiers is not a spec name.
+ using ReductionIdentifiers = ListT<type::ReductionIdentifierT<I, E>>;
+ ENUM(ReductionModifier, Default, Inscan, Task);
+ using TupleTrait = std::true_type;
+ std::tuple<ReductionIdentifiers, OPT(ReductionModifier), List> t;
+};
+
+// V5.2: [15.8.1] `memory-order` clauses
+template <typename T, typename I, typename E> //
+struct RelaxedT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [15.8.1] `memory-order` clauses
+template <typename T, typename I, typename E> //
+struct ReleaseT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.2.1] `requirement` clauses
+template <typename T, typename I, typename E> //
+struct ReverseOffloadT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [10.4.2] `safelen` clause
+template <typename T, typename I, typename E> //
+struct SafelenT {
+ using Length = E;
+ using WrapperTrait = std::true_type;
+ Length v;
+};
+
+// V5.2: [11.5.3] `schedule` clause
+template <typename T, typename I, typename E> //
+struct ScheduleT {
+ ENUM(Kind, Static, Dynamic, Guided, Auto, Runtime);
+ using ChunkSize = E;
+ ENUM(OrderingModifier, Monotonic, Nonmonotonic);
+ ENUM(ChunkModifier, Simd);
+ using TupleTrait = std::true_type;
+ std::tuple<Kind, OPT(OrderingModifier), OPT(ChunkModifier), OPT(ChunkSize)> t;
+};
+
+// V5.2: [15.8.1] Memory-order clauses
+template <typename T, typename I, typename E> //
+struct SeqCstT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.5.1] `severity` clause
+template <typename T, typename I, typename E> //
+struct SeverityT {
+ ENUM(SevLevel, Fatal, Warning);
+ using WrapperTrait = std::true_type;
+ SevLevel v;
+};
+
+// V5.2: [5.4.2] `shared` clause
+template <typename T, typename I, typename E> //
+struct SharedT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [15.10.3] `parallelization-level` clauses
+template <typename T, typename I, typename E> //
+struct SimdT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [10.4.3] `simdlen` clause
+template <typename T, typename I, typename E> //
+struct SimdlenT {
+ using Length = E;
+ using WrapperTrait = std::true_type;
+ Length v;
+};
+
+// V5.2: [9.1.1] `sizes` clause
+template <typename T, typename I, typename E> //
+struct SizesT {
+ using SizeList = ListT<E>;
+ using WrapperTrait = std::true_type;
+ SizeList v;
+};
+
+// V5.2: [5.5.9] `task_reduction` clause
+template <typename T, typename I, typename E> //
+struct TaskReductionT {
+ using List = ObjectListT<I, E>;
+ // See note at the definition of the ReductionIdentifierT type.
+ // The name ReductionIdentifiers is not a spec name.
+ using ReductionIdentifiers = ListT<type::ReductionIdentifierT<I, E>>;
+ using TupleTrait = std::true_type;
+ std::tuple<ReductionIdentifiers, List> t;
+};
+
+// V5.2: [13.3] `thread_limit` clause
+template <typename T, typename I, typename E> //
+struct ThreadLimitT {
+ using Threadlim = E;
+ using WrapperTrait = std::true_type;
+ Threadlim v;
+};
+
+// V5.2: [15.10.3] `parallelization-level` clauses
+template <typename T, typename I, typename E> //
+struct ThreadsT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [5.9.1] `to` clause
+template <typename T, typename I, typename E> //
+struct ToT {
+ using LocatorList = ObjectListT<I, E>;
+ using Expectation = type::MotionExpectation;
+ // See note at the definition of the MapperT type.
+ using Mappers = ListT<type::MapperT<I, E>>; // Not a spec name
+ using Iterator = type::IteratorT<T, I, E>;
+
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Expectation), OPT(Mappers), OPT(Iterator), LocatorList> t;
+};
+
+// V5.2: [8.2.1] `requirement` clauses
+template <typename T, typename I, typename E> //
+struct UnifiedAddressT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [8.2.1] `requirement` clauses
+template <typename T, typename I, typename E> //
+struct UnifiedSharedMemoryT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [5.10] `uniform` clause
+template <typename T, typename I, typename E> //
+struct UniformT {
+ using ParameterList = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ ParameterList v;
+};
+
+template <typename T, typename I, typename E> //
+struct UnknownT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [12.1] `untied` clause
+template <typename T, typename I, typename E> //
+struct UntiedT {
+ using EmptyTrait = std::true_type;
+};
+
+// Both of the following
+// V5.2: [15.8.2] `atomic` clauses
+// V5.2: [15.9.3] `update` clause
+template <typename T, typename I, typename E> //
+struct UpdateT {
+ using TaskDependenceType = tomp::type::TaskDependenceType;
+ using WrapperTrait = std::true_type;
+ OPT(TaskDependenceType) v;
+};
+
+// V5.2: [14.1.3] `use` clause
+template <typename T, typename I, typename E> //
+struct UseT {
+ using InteropVar = ObjectT<I, E>;
+ using WrapperTrait = std::true_type;
+ InteropVar v;
+};
+
+// V5.2: [5.4.10] `use_device_addr` clause
+template <typename T, typename I, typename E> //
+struct UseDeviceAddrT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [5.4.8] `use_device_ptr` clause
+template <typename T, typename I, typename E> //
+struct UseDevicePtrT {
+ using List = ObjectListT<I, E>;
+ using WrapperTrait = std::true_type;
+ List v;
+};
+
+// V5.2: [6.8] `uses_allocators` clause
+template <typename T, typename I, typename E> //
+struct UsesAllocatorsT {
+ using MemSpace = E;
+ using TraitsArray = ObjectT<I, E>;
+ using Allocator = E;
+ using AllocatorSpec =
+ std::tuple<OPT(MemSpace), OPT(TraitsArray), Allocator>; // Not a spec name
+ using Allocators = ListT<AllocatorSpec>; // Not a spec name
+ using WrapperTrait = std::true_type;
+ Allocators v;
+};
+
+// V5.2: [15.8.3] `extended-atomic` clauses
+template <typename T, typename I, typename E> //
+struct WeakT {
+ using EmptyTrait = std::true_type;
+};
+
+// V5.2: [7.4.1] `when` clause
+template <typename T, typename I, typename E> //
+struct WhenT {
+ using IncompleteTrait = std::true_type;
+};
+
+// V5.2: [15.8.2] Atomic clauses
+template <typename T, typename I, typename E> //
+struct WriteT {
+ using EmptyTrait = std::true_type;
+};
+
+// ---
+
+template <typename T, typename I, typename E>
+using ExtensionClausesT =
+ std::variant<OmpxAttributeT<T, I, E>, OmpxBareT<T, I, E>,
+ OmpxDynCgroupMemT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using EmptyClausesT = std::variant<
+ AcqRelT<T, I, E>, AcquireT<T, I, E>, CaptureT<T, I, E>, CompareT<T, I, E>,
+ DynamicAllocatorsT<T, I, E>, FullT<T, I, E>, InbranchT<T, I, E>,
+ MergeableT<T, I, E>, NogroupT<T, I, E>, NoOpenmpRoutinesT<T, I, E>,
+ NoOpenmpT<T, I, E>, NoParallelismT<T, I, E>, NotinbranchT<T, I, E>,
+ NowaitT<T, I, E>, ReadT<T, I, E>, RelaxedT<T, I, E>, ReleaseT<T, I, E>,
+ ReverseOffloadT<T, I, E>, SeqCstT<T, I, E>, SimdT<T, I, E>,
+ ThreadsT<T, I, E>, UnifiedAddressT<T, I, E>, UnifiedSharedMemoryT<T, I, E>,
+ UnknownT<T, I, E>, UntiedT<T, I, E>, UseT<T, I, E>, WeakT<T, I, E>,
+ WriteT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using IncompleteClausesT =
+ std::variant<AdjustArgsT<T, I, E>, AppendArgsT<T, I, E>, MatchT<T, I, E>,
+ OtherwiseT<T, I, E>, WhenT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using TupleClausesT =
+ std::variant<AffinityT<T, I, E>, AlignedT<T, I, E>, AllocateT<T, I, E>,
+ DefaultmapT<T, I, E>, DeviceT<T, I, E>, DistScheduleT<T, I, E>,
+ DoacrossT<T, I, E>, FromT<T, I, E>, GrainsizeT<T, I, E>,
+ IfT<T, I, E>, InitT<T, I, E>, InReductionT<T, I, E>,
+ LastprivateT<T, I, E>, LinearT<T, I, E>, MapT<T, I, E>,
+ NumTasksT<T, I, E>, OrderT<T, I, E>, ReductionT<T, I, E>,
+ ScheduleT<T, I, E>, TaskReductionT<T, I, E>, ToT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using UnionClausesT = std::variant<DependT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using WrapperClausesT = std::variant<
+ AbsentT<T, I, E>, AlignT<T, I, E>, AllocatorT<T, I, E>,
+ AtomicDefaultMemOrderT<T, I, E>, AtT<T, I, E>, BindT<T, I, E>,
+ CollapseT<T, I, E>, ContainsT<T, I, E>, CopyinT<T, I, E>,
+ CopyprivateT<T, I, E>, DefaultT<T, I, E>, DestroyT<T, I, E>,
+ DetachT<T, I, E>, DeviceTypeT<T, I, E>, EnterT<T, I, E>,
+ ExclusiveT<T, I, E>, FailT<T, I, E>, FilterT<T, I, E>, FinalT<T, I, E>,
+ FirstprivateT<T, I, E>, HasDeviceAddrT<T, I, E>, HintT<T, I, E>,
+ HoldsT<T, I, E>, InclusiveT<T, I, E>, IndirectT<T, I, E>,
+ InitializerT<T, I, E>, IsDevicePtrT<T, I, E>, LinkT<T, I, E>,
+ MessageT<T, I, E>, NocontextT<T, I, E>, NontemporalT<T, I, E>,
+ NovariantsT<T, I, E>, NumTeamsT<T, I, E>, NumThreadsT<T, I, E>,
+ OrderedT<T, I, E>, PartialT<T, I, E>, PriorityT<T, I, E>, PrivateT<T, I, E>,
+ ProcBindT<T, I, E>, SafelenT<T, I, E>, SeverityT<T, I, E>, SharedT<T, I, E>,
+ SimdlenT<T, I, E>, SizesT<T, I, E>, ThreadLimitT<T, I, E>,
+ UniformT<T, I, E>, UpdateT<T, I, E>, UseDeviceAddrT<T, I, E>,
+ UseDevicePtrT<T, I, E>, UsesAllocatorsT<T, I, E>>;
+
+template <typename T, typename I, typename E>
+using UnionOfAllClausesT = typename type::Union< //
+ EmptyClausesT<T, I, E>, //
+ ExtensionClausesT<T, I, E>, //
+ IncompleteClausesT<T, I, E>, //
+ TupleClausesT<T, I, E>, //
+ UnionClausesT<T, I, E>, //
+ WrapperClausesT<T, I, E> //
+ >::type;
+
+} // namespace clause
+
+// The variant wrapper that encapsulates all possible specific clauses.
+// The `Extras` arguments are additional types representing local extensions
+// to the clause set, e.g.
+//
+// using Clause = ClauseT<Type, Id, Expr,
+// MyClause1, MyClause2>;
+//
+// The member Clause::u will be a variant containing all specific clauses
+// defined above, plus MyClause1 and MyClause2.
+template <typename TypeType, typename IdType, typename ExprType,
+ typename... Extras>
+struct ClauseT {
+ using TypeTy = TypeType;
+ using IdTy = IdType;
+ using ExprTy = ExprType;
+
+ using VariantTy = typename type::Union<
+ clause::UnionOfAllClausesT<TypeType, IdType, ExprType>,
+ std::variant<Extras...>>::type;
+
+ llvm::omp::Clause id; // The numeric id of the clause
+ using UnionTrait = std::true_type;
+ VariantTy u;
+};
+
+} // namespace tomp
+
+#undef OPT
+#undef ENUM
+
+#endif // LLVM_FRONTEND_OPENMP_CLAUSET_H
diff --git a/llvm/include/llvm/IR/BasicBlock.h b/llvm/include/llvm/IR/BasicBlock.h
index 0eea4cdccca5..0c5a07bde4ec 100644
--- a/llvm/include/llvm/IR/BasicBlock.h
+++ b/llvm/include/llvm/IR/BasicBlock.h
@@ -791,7 +791,7 @@ template <> struct DenseMapInfo<BasicBlock::iterator> {
static unsigned getHashValue(const BasicBlock::iterator &It) {
return DenseMapInfo<void *>::getHashValue(
reinterpret_cast<void *>(It.getNodePtr())) ^
- It.getHeadBit();
+ (unsigned)It.getHeadBit();
}
static bool isEqual(const BasicBlock::iterator &LHS,
diff --git a/llvm/include/llvm/IR/CallingConv.h b/llvm/include/llvm/IR/CallingConv.h
index ef8aaf52f4e6..a05d1a4d5878 100644
--- a/llvm/include/llvm/IR/CallingConv.h
+++ b/llvm/include/llvm/IR/CallingConv.h
@@ -264,6 +264,9 @@ namespace CallingConv {
/// except that the first parameter is mapped to x9.
ARM64EC_Thunk_Native = 109,
+ /// Calling convention used for RISC-V V-extension.
+ RISCV_VectorCall = 110,
+
/// The highest possible ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index e50cd1f1c73e..4290ef4486c6 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -1046,8 +1046,7 @@ public:
///
static Constant *getSizeOf(Type *Ty);
- static Constant *getNeg(Constant *C, bool HasNUW = false,
- bool HasNSW = false);
+ static Constant *getNeg(Constant *C, bool HasNSW = false);
static Constant *getNot(Constant *C);
static Constant *getAdd(Constant *C1, Constant *C2, bool HasNUW = false,
bool HasNSW = false);
@@ -1068,8 +1067,7 @@ public:
static Constant *getAddrSpaceCast(Constant *C, Type *Ty,
bool OnlyIfReduced = false);
- static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
- static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
+ static Constant *getNSWNeg(Constant *C) { return getNeg(C, /*HasNSW=*/true); }
static Constant *getNSWAdd(Constant *C1, Constant *C2) {
return getAdd(C1, C2, false, true);
diff --git a/llvm/include/llvm/IR/DiagnosticHandler.h b/llvm/include/llvm/IR/DiagnosticHandler.h
index db7d7444f75f..1f0544b9fa9f 100644
--- a/llvm/include/llvm/IR/DiagnosticHandler.h
+++ b/llvm/include/llvm/IR/DiagnosticHandler.h
@@ -28,7 +28,7 @@ struct DiagnosticHandler {
: DiagnosticContext(DiagContext) {}
virtual ~DiagnosticHandler() = default;
- using DiagnosticHandlerTy = void (*)(const DiagnosticInfo &DI, void *Context);
+ using DiagnosticHandlerTy = void (*)(const DiagnosticInfo *DI, void *Context);
/// DiagHandlerCallback is settable from the C API and base implementation
/// of DiagnosticHandler will call it from handleDiagnostics(). Any derived
@@ -42,7 +42,7 @@ struct DiagnosticHandler {
/// with a prefix based on the severity.
virtual bool handleDiagnostics(const DiagnosticInfo &DI) {
if (DiagHandlerCallback) {
- DiagHandlerCallback(DI, DiagnosticContext);
+ DiagHandlerCallback(&DI, DiagnosticContext);
return true;
}
return false;
diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h
index aa8188cd99fe..c61d502aa332 100644
--- a/llvm/include/llvm/IR/GlobalValue.h
+++ b/llvm/include/llvm/IR/GlobalValue.h
@@ -360,6 +360,7 @@ public:
// storage is shared between `G1` and `G2`.
void setSanitizerMetadata(SanitizerMetadata Meta);
void removeSanitizerMetadata();
+ void setNoSanitizeMetadata();
bool isTagged() const {
return hasSanitizerMetadata() && getSanitizerMetadata().Memtag;
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index c07ffea71151..2e2ec9a1c830 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -221,6 +221,12 @@ public:
AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
}
+ /// Set nosanitize metadata.
+ void SetNoSanitizeMetadata() {
+ AddOrRemoveMetadataToCopy(llvm::LLVMContext::MD_nosanitize,
+ llvm::MDNode::get(getContext(), std::nullopt));
+ }
+
/// Collect metadata with IDs \p MetadataKinds from \p Src which should be
/// added to all created instructions. Entries present in MedataDataToCopy but
/// not on \p Src will be dropped from MetadataToCopy.
@@ -1712,18 +1718,13 @@ public:
const Twine &Name = "", MDNode *FPMathTag = nullptr,
std::optional<fp::ExceptionBehavior> Except = std::nullopt);
- Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false,
- bool HasNSW = false) {
- return CreateSub(Constant::getNullValue(V->getType()), V, Name, HasNUW,
- HasNSW);
+ Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNSW = false) {
+ return CreateSub(Constant::getNullValue(V->getType()), V, Name,
+ /*HasNUW=*/0, HasNSW);
}
Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
- return CreateNeg(V, Name, false, true);
- }
-
- Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
- return CreateNeg(V, Name, true, false);
+ return CreateNeg(V, Name, /*HasNSW=*/true);
}
Value *CreateFNeg(Value *V, const Twine &Name = "",
@@ -2707,6 +2708,7 @@ public:
IRBuilder(const IRBuilder &) = delete;
InserterTy &getInserter() { return Inserter; }
+ const InserterTy &getInserter() const { return Inserter; }
};
template <typename FolderTy, typename InserterTy>
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index e8c2cba8418d..e4e5fa15c399 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -476,12 +476,6 @@ public:
Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
- static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
- BasicBlock::iterator InsertBefore);
- static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = nullptr);
- static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
- BasicBlock *InsertAtEnd);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
@@ -1038,7 +1032,7 @@ public:
/// the two operands. Insert the instruction into a BasicBlock right before
/// the specified instruction.
/// Create a CmpInst
- static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ static CmpInst *Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2,
const Twine &Name, BasicBlock::iterator InsertBefore);
/// Construct a compare instruction, given the opcode, the predicate and
@@ -1046,17 +1040,28 @@ public:
/// instruction into a BasicBlock right before the specified instruction.
/// The specified Instruction is allowed to be a dereferenced end iterator.
/// Create a CmpInst
- static CmpInst *Create(OtherOps Op,
- Predicate predicate, Value *S1,
- Value *S2, const Twine &Name = "",
+ static CmpInst *Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2,
+ const Twine &Name = "",
Instruction *InsertBefore = nullptr);
/// Construct a compare instruction, given the opcode, the predicate and the
/// two operands. Also automatically insert this instruction to the end of
/// the BasicBlock specified.
/// Create a CmpInst
- static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
- Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
+ static CmpInst *Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ /// Construct a compare instruction, given the opcode, the predicate,
+ /// the two operands and the instruction to copy the flags from. Optionally
+ /// (if InstBefore is specified) insert the instruction into a BasicBlock
+ /// right before the specified instruction. The specified Instruction is
+ /// allowed to be a dereferenced end iterator.
+ /// Create a CmpInst
+ static CmpInst *CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
+ Value *S2,
+ const Instruction *FlagsSource,
+ const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
/// Get the opcode casted to the right type
OtherOps getOpcode() const {
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 4e4cf71a349d..4ffa6349871b 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -5345,6 +5345,8 @@ protected:
TruncInst *cloneImpl() const;
public:
+ enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
+
/// Constructor with insert-before-instruction semantics
TruncInst(
Value *S, ///< The value to be truncated
@@ -5376,6 +5378,39 @@ public:
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
+
+ void setHasNoUnsignedWrap(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
+ }
+ void setHasNoSignedWrap(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
+ }
+
+ /// Test whether this operation is known to never
+ /// undergo unsigned overflow, aka the nuw property.
+ bool hasNoUnsignedWrap() const {
+ return SubclassOptionalData & NoUnsignedWrap;
+ }
+
+ /// Test whether this operation is known to never
+ /// undergo signed overflow, aka the nsw property.
+ bool hasNoSignedWrap() const {
+ return (SubclassOptionalData & NoSignedWrap) != 0;
+ }
+
+ /// Returns the no-wrap kind of the operation.
+ unsigned getNoWrapKind() const {
+ unsigned NoWrapKind = 0;
+ if (hasNoUnsignedWrap())
+ NoWrapKind |= NoUnsignedWrap;
+
+ if (hasNoSignedWrap())
+ NoWrapKind |= NoSignedWrap;
+
+ return NoWrapKind;
+ }
};
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index c07b83a81a63..4f22720f1c55 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1782,6 +1782,19 @@ public:
static bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
+
+ // Returns the convergence intrinsic referenced by |I|'s convergencectrl
+ // attribute if any.
+ static IntrinsicInst *getParentConvergenceToken(Instruction *I) {
+ auto *CI = dyn_cast<llvm::CallInst>(I);
+ if (!CI)
+ return nullptr;
+
+ auto Bundle = CI->getOperandBundle(llvm::LLVMContext::OB_convergencectrl);
+ assert(Bundle->Inputs.size() == 1 &&
+ Bundle->Inputs[0]->getType()->isTokenTy());
+ return dyn_cast<llvm::IntrinsicInst>(Bundle->Inputs[0].get());
+ }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index c0c447073799..c04f4c526921 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -700,10 +700,13 @@ class MSBuiltin<string name> {
//===--------------- Variable Argument Handling Intrinsics ----------------===//
//
-def int_vastart : DefaultAttrsIntrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
-def int_vacopy : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
- "llvm.va_copy">;
-def int_vaend : DefaultAttrsIntrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
+def int_vastart : DefaultAttrsIntrinsic<[],
+ [llvm_anyptr_ty], [], "llvm.va_start">;
+def int_vacopy : DefaultAttrsIntrinsic<[],
+ [llvm_anyptr_ty, LLVMMatchType<0>], [],
+ "llvm.va_copy">;
+def int_vaend : DefaultAttrsIntrinsic<[],
+ [llvm_anyptr_ty], [], "llvm.va_end">;
//===------------------- Garbage Collection Intrinsics --------------------===//
//
@@ -1590,11 +1593,11 @@ def int_experimental_patchpoint_void : Intrinsic<[],
llvm_ptr_ty, llvm_i32_ty,
llvm_vararg_ty],
[Throws]>;
-def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
- [llvm_i64_ty, llvm_i32_ty,
- llvm_ptr_ty, llvm_i32_ty,
- llvm_vararg_ty],
- [Throws]>;
+def int_experimental_patchpoint : Intrinsic<[llvm_any_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_ptr_ty, llvm_i32_ty,
+ llvm_vararg_ty],
+ [Throws]>;
//===------------------------ Garbage Collection Intrinsics ---------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 051e603c0819..3de20bb44e0c 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -2066,6 +2066,24 @@ def int_amdgcn_interp_inreg_p2_f16 :
[IntrNoMem, IntrSpeculatable,
ImmArg<ArgIndex<3>>]>;
+// llvm.amdgcn.interp.p10.rtz.f16 <p>, <i>, <p0>, <high>
+// gfx11+ fp16 interpolation intrinsic, with round-toward-zero rounding mode.
+// high selects whether high or low 16-bits are used for p and p0 operands
+def int_amdgcn_interp_p10_rtz_f16:
+ DefaultAttrsIntrinsic<[llvm_float_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
+ [IntrNoMem, IntrSpeculatable,
+ ImmArg<ArgIndex<3>>]>;
+
+// llvm.amdgcn.interp.p2.rtz.f16 <p>, <j>, <tmp>, <high>
+// gfx11+ fp16 interpolation intrinsic, with round-toward-zero rounding mode.
+// high selects whether high or low 16-bits are used for p operand
+def int_amdgcn_interp_p2_rtz_f16 :
+ DefaultAttrsIntrinsic<[llvm_half_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
+ [IntrNoMem, IntrSpeculatable,
+ ImmArg<ArgIndex<3>>]>;
+
// Deprecated: use llvm.amdgcn.live.mask instead.
def int_amdgcn_ps_live : DefaultAttrsIntrinsic <
[llvm_i1_ty],
@@ -2653,6 +2671,8 @@ class AMDGPUWmmaIntrinsicIU<LLVMType AB, LLVMType CD> :
// The OPSEL intrinsics read from and write to one half of the registers, selected by the op_sel bit.
// The tied versions of the f16/bf16 wmma intrinsics tie the destination matrix registers to the input accumulator registers.
// The content of the other 16-bit half is preserved from the input.
+
+defset list<Intrinsic> AMDGPUWMMAIntrinsicsGFX11 = {
def int_amdgcn_wmma_f16_16x16x16_f16_tied : AMDGPUWmmaIntrinsicOPSEL<llvm_anyfloat_ty, llvm_anyfloat_ty>;
def int_amdgcn_wmma_bf16_16x16x16_bf16_tied : AMDGPUWmmaIntrinsicOPSEL<llvm_anyint_ty, llvm_anyint_ty>;
@@ -2668,6 +2688,7 @@ def int_amdgcn_wmma_i32_16x16x16_iu4 : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, l
// GFX12: The op_sel bit must be 0.
def int_amdgcn_wmma_f16_16x16x16_f16 : AMDGPUWmmaIntrinsicOPSEL<llvm_anyfloat_ty, llvm_anyfloat_ty>;
def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL<llvm_anyint_ty, llvm_anyint_ty>;
+}
//===----------------------------------------------------------------------===//
// GFX12 Intrinsics
@@ -2687,20 +2708,6 @@ def int_amdgcn_permlanex16_var : ClangBuiltin<"__builtin_amdgcn_permlanex16_var"
[IntrNoMem, IntrConvergent, IntrWillReturn,
ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]>;
-
-// WMMA (Wave Matrix Multiply-Accumulate) intrinsics
-//
-// These operations perform a matrix multiplication and accumulation of
-// the form: D = A * B + C .
-
-// A and B are <8 x fp8> or <8 x bf8>, but since fp8 and bf8 are not supported by llvm we use <2 x i32>.
-def int_amdgcn_wmma_f32_16x16x16_fp8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
-def int_amdgcn_wmma_f32_16x16x16_fp8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
-def int_amdgcn_wmma_f32_16x16x16_bf8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
-def int_amdgcn_wmma_f32_16x16x16_bf8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
-// A and B are <16 x iu4>.
-def int_amdgcn_wmma_i32_16x16x32_iu4 : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
-
// SWMMAC (Wave Matrix(sparse) Multiply-Accumulate) intrinsics
//
// These operations perform a sparse matrix multiplication and accumulation of
@@ -2734,6 +2741,20 @@ class AMDGPUSWmmacIntrinsicIUIdx<LLVMType A, LLVMType B, LLVMType CD, LLVMType I
[IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>]
>;
+defset list<Intrinsic> AMDGPUWMMAIntrinsicsGFX12 = {
+// WMMA (Wave Matrix Multiply-Accumulate) intrinsics
+//
+// These operations perform a matrix multiplication and accumulation of
+// the form: D = A * B + C .
+
+// A and B are <8 x fp8> or <8 x bf8>, but since fp8 and bf8 are not supported by llvm we use <2 x i32>.
+def int_amdgcn_wmma_f32_16x16x16_fp8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
+def int_amdgcn_wmma_f32_16x16x16_fp8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
+def int_amdgcn_wmma_f32_16x16x16_bf8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
+def int_amdgcn_wmma_f32_16x16x16_bf8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
+// A and B are <16 x iu4>.
+def int_amdgcn_wmma_i32_16x16x32_iu4 : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
+
def int_amdgcn_swmmac_f32_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f32_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f16_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
@@ -2745,6 +2766,7 @@ def int_amdgcn_swmmac_f32_16x16x32_fp8_fp8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyin
def int_amdgcn_swmmac_f32_16x16x32_fp8_bf8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f32_16x16x32_bf8_fp8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f32_16x16x32_bf8_bf8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
+}
def int_amdgcn_global_atomic_ordered_add_b64 : AMDGPUAtomicRtn<llvm_i64_ty, global_ptr_ty>;
@@ -2765,17 +2787,14 @@ class AMDGPULoadIntrinsic<LLVMType ptr_ty>:
>;
// Wave32
-// <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1)) -> global_load_tr_b64
-// <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1)) -> global_load_tr_b128
-// <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1)) -> global_load_tr_b128
-// <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1)) -> global_load_tr_b128
+// <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1)) -> global_load_tr_b64
+// <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1)) -> global_load_tr_b128
// Wave64
-// i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1)) -> global_load_tr_b64
-// <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1)) -> global_load_tr_b128
-// <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1)) -> global_load_tr_b128
-// <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1)) -> global_load_tr_b128
+// i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1)) -> global_load_tr_b64
+// <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1)) -> global_load_tr_b128
-def int_amdgcn_global_load_tr : AMDGPULoadIntrinsic<global_ptr_ty>;
+def int_amdgcn_global_load_tr_b64 : AMDGPULoadIntrinsic<global_ptr_ty>;
+def int_amdgcn_global_load_tr_b128 : AMDGPULoadIntrinsic<global_ptr_ty>;
// i32 @llvm.amdgcn.wave.id()
def int_amdgcn_wave_id :
@@ -3012,6 +3031,7 @@ class AMDGPUMfmaIntrinsic<LLVMType DestTy, LLVMType SrcABTy> :
[IntrConvergent, IntrNoMem,
ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
+defset list<Intrinsic> AMDGPUMFMAIntrinsics908 = {
def int_amdgcn_mfma_f32_32x32x1f32 : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_float_ty>;
def int_amdgcn_mfma_f32_16x16x1f32 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_float_ty>;
def int_amdgcn_mfma_f32_4x4x1f32 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_float_ty>;
@@ -3032,6 +3052,7 @@ def int_amdgcn_mfma_f32_16x16x2bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v
def int_amdgcn_mfma_f32_4x4x2bf16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_32x32x4bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_16x16x8bf16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v2i16_ty>;
+}
//===----------------------------------------------------------------------===//
// gfx90a intrinsics
@@ -3043,6 +3064,7 @@ def int_amdgcn_flat_atomic_fadd : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_flat_atomic_fmin : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_flat_atomic_fmax : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
+defset list<Intrinsic> AMDGPUMFMAIntrinsics90A = {
def int_amdgcn_mfma_f32_32x32x4bf16_1k : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_16x16x4bf16_1k : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_4x4x4bf16_1k : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v4i16_ty>;
@@ -3054,25 +3076,12 @@ def int_amdgcn_mfma_f32_16x16x16bf16_1k : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, ll
// source operand.
def int_amdgcn_mfma_f64_16x16x4f64 : AMDGPUMfmaIntrinsic<llvm_v4f64_ty, llvm_double_ty>;
def int_amdgcn_mfma_f64_4x4x4f64 : AMDGPUMfmaIntrinsic<llvm_double_ty, llvm_double_ty>;
+}
//===----------------------------------------------------------------------===//
// gfx940 intrinsics
// ===----------------------------------------------------------------------===//
-// bf16 atomics use v2i16 argument since there is no bf16 data type in the llvm.
-def int_amdgcn_global_atomic_fadd_v2bf16 : AMDGPUAtomicRtn<llvm_v2i16_ty>;
-def int_amdgcn_flat_atomic_fadd_v2bf16 : AMDGPUAtomicRtn<llvm_v2i16_ty>;
-def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic<
- [llvm_v2i16_ty],
- [LLVMQualPointerType<3>, llvm_v2i16_ty],
- [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
- ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">;
-
-def int_amdgcn_mfma_i32_16x16x32_i8 : AMDGPUMfmaIntrinsic<llvm_v4i32_ty, llvm_i64_ty>;
-def int_amdgcn_mfma_i32_32x32x16_i8 : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i64_ty>;
-def int_amdgcn_mfma_f32_16x16x8_xf32 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v2f32_ty>;
-def int_amdgcn_mfma_f32_32x32x4_xf32 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2f32_ty>;
-
class AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> :
AMDGPUMfmaIntrinsic<DestTy, llvm_i64_ty>;
@@ -3081,9 +3090,6 @@ multiclass AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> {
def NAME#"_"#kind : AMDGPUMFp8MfmaIntrinsic<DestTy>;
}
-defm int_amdgcn_mfma_f32_16x16x32 : AMDGPUMFp8MfmaIntrinsic<llvm_v4f32_ty>;
-defm int_amdgcn_mfma_f32_32x32x16 : AMDGPUMFp8MfmaIntrinsic<llvm_v16f32_ty>;
-
// llvm.amdgcn.smfmac.?32.* vdst, srcA, srcB, srcC, index, cbsz, abid
class AMDGPUMSmfmacIntrinsic<LLVMType DestTy, LLVMType SrcA, LLVMType SrcB> :
ClangBuiltin<!subst("int", "__builtin", NAME)>,
@@ -3093,13 +3099,6 @@ class AMDGPUMSmfmacIntrinsic<LLVMType DestTy, LLVMType SrcA, LLVMType SrcB> :
[IntrConvergent, IntrNoMem,
ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
-def int_amdgcn_smfmac_f32_16x16x32_f16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
-def int_amdgcn_smfmac_f32_32x32x16_f16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
-def int_amdgcn_smfmac_f32_16x16x32_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
-def int_amdgcn_smfmac_f32_32x32x16_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
-def int_amdgcn_smfmac_i32_16x16x64_i8 : AMDGPUMSmfmacIntrinsic<llvm_v4i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;
-def int_amdgcn_smfmac_i32_32x32x32_i8 : AMDGPUMSmfmacIntrinsic<llvm_v16i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;
-
class AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> :
AMDGPUMSmfmacIntrinsic<DestTy, llvm_v2i32_ty, llvm_v4i32_ty>;
@@ -3108,8 +3107,34 @@ multiclass AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> {
def NAME#"_"#kind : AMDGPUMFp8SmfmacIntrinsic<DestTy>;
}
+// bf16 atomics use v2i16 argument since there is no bf16 data type in the llvm.
+def int_amdgcn_global_atomic_fadd_v2bf16 : AMDGPUAtomicRtn<llvm_v2i16_ty>;
+def int_amdgcn_flat_atomic_fadd_v2bf16 : AMDGPUAtomicRtn<llvm_v2i16_ty>;
+def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic<
+ [llvm_v2i16_ty],
+ [LLVMQualPointerType<3>, llvm_v2i16_ty],
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
+ ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">;
+
+defset list<Intrinsic> AMDGPUMFMAIntrinsics940 = {
+def int_amdgcn_mfma_i32_16x16x32_i8 : AMDGPUMfmaIntrinsic<llvm_v4i32_ty, llvm_i64_ty>;
+def int_amdgcn_mfma_i32_32x32x16_i8 : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i64_ty>;
+def int_amdgcn_mfma_f32_16x16x8_xf32 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v2f32_ty>;
+def int_amdgcn_mfma_f32_32x32x4_xf32 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2f32_ty>;
+
+defm int_amdgcn_mfma_f32_16x16x32 : AMDGPUMFp8MfmaIntrinsic<llvm_v4f32_ty>;
+defm int_amdgcn_mfma_f32_32x32x16 : AMDGPUMFp8MfmaIntrinsic<llvm_v16f32_ty>;
+
+def int_amdgcn_smfmac_f32_16x16x32_f16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
+def int_amdgcn_smfmac_f32_32x32x16_f16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
+def int_amdgcn_smfmac_f32_16x16x32_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
+def int_amdgcn_smfmac_f32_32x32x16_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
+def int_amdgcn_smfmac_i32_16x16x64_i8 : AMDGPUMSmfmacIntrinsic<llvm_v4i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;
+def int_amdgcn_smfmac_i32_32x32x32_i8 : AMDGPUMSmfmacIntrinsic<llvm_v16i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;
+
defm int_amdgcn_smfmac_f32_16x16x64 : AMDGPUMFp8SmfmacIntrinsic<llvm_v4f32_ty>;
defm int_amdgcn_smfmac_f32_32x32x32 : AMDGPUMFp8SmfmacIntrinsic<llvm_v16f32_ty>;
+}
// llvm.amdgcn.cvt.f32.bf8 float vdst, int srcA, imm byte_sel [0..3]
// byte_sel selects byte from srcA.
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 1164b241ba7b..a871fac46b9f 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -24,7 +24,15 @@ def int_dx_any : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_any_ty]>;
def int_dx_clamp : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
def int_dx_uclamp : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
-def int_dx_dot :
+def int_dx_dot2 :
+ Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyfloat_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
+ [IntrNoMem, IntrWillReturn, Commutative] >;
+def int_dx_dot3 :
+ Intrinsic<[LLVMVectorElementType<0>],
+ [llvm_anyfloat_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
+ [IntrNoMem, IntrWillReturn, Commutative] >;
+def int_dx_dot4 :
Intrinsic<[LLVMVectorElementType<0>],
[llvm_anyfloat_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>],
[IntrNoMem, IntrWillReturn, Commutative] >;
diff --git a/llvm/include/llvm/IR/Mangler.h b/llvm/include/llvm/IR/Mangler.h
index 747a4085235c..f28ffc961b6d 100644
--- a/llvm/include/llvm/IR/Mangler.h
+++ b/llvm/include/llvm/IR/Mangler.h
@@ -14,6 +14,7 @@
#define LLVM_IR_MANGLER_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
@@ -52,6 +53,9 @@ void emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
void emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
const Triple &T, Mangler &M);
+std::optional<std::string> getArm64ECMangledFunctionName(StringRef Name);
+std::optional<std::string> getArm64ECDemangledFunctionName(StringRef Name);
+
} // End llvm namespace
#endif
diff --git a/llvm/include/llvm/IR/PassManager.h b/llvm/include/llvm/IR/PassManager.h
index ec8b809d40bf..108465478d37 100644
--- a/llvm/include/llvm/IR/PassManager.h
+++ b/llvm/include/llvm/IR/PassManager.h
@@ -267,29 +267,24 @@ public:
return PA;
}
- template <typename PassT>
- LLVM_ATTRIBUTE_MINSIZE
- std::enable_if_t<!std::is_same<PassT, PassManager>::value>
- addPass(PassT &&Pass) {
+ // FIXME: Revert to enable_if style when gcc >= 11.1
+ template <typename PassT> LLVM_ATTRIBUTE_MINSIZE void addPass(PassT &&Pass) {
using PassModelT =
detail::PassModel<IRUnitT, PassT, AnalysisManagerT, ExtraArgTs...>;
- // Do not use make_unique or emplace_back, they cause too many template
- // instantiations, causing terrible compile times.
- Passes.push_back(std::unique_ptr<PassConceptT>(
- new PassModelT(std::forward<PassT>(Pass))));
- }
-
- /// When adding a pass manager pass that has the same type as this pass
- /// manager, simply move the passes over. This is because we don't have use
- /// cases rely on executing nested pass managers. Doing this could reduce
- /// implementation complexity and avoid potential invalidation issues that may
- /// happen with nested pass managers of the same type.
- template <typename PassT>
- LLVM_ATTRIBUTE_MINSIZE
- std::enable_if_t<std::is_same<PassT, PassManager>::value>
- addPass(PassT &&Pass) {
- for (auto &P : Pass.Passes)
- Passes.push_back(std::move(P));
+ if constexpr (!std::is_same_v<PassT, PassManager>) {
+ // Do not use make_unique or emplace_back, they cause too many template
+ // instantiations, causing terrible compile times.
+ Passes.push_back(std::unique_ptr<PassConceptT>(
+ new PassModelT(std::forward<PassT>(Pass))));
+ } else {
+ /// When adding a pass manager pass that has the same type as this pass
+ /// manager, simply move the passes over. This is because we don't have
+ /// use cases rely on executing nested pass managers. Doing this could
+ /// reduce implementation complexity and avoid potential invalidation
+ /// issues that may happen with nested pass managers of the same type.
+ for (auto &P : Pass.Passes)
+ Passes.push_back(std::move(P));
+ }
}
/// Returns if the pass manager contains any passes.
diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index 382009d9df78..92cb79d54afc 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -884,9 +884,9 @@ struct bind_const_intval_ty {
/// Match a specified integer value or vector of all elements of that
/// value.
template <bool AllowUndefs> struct specific_intval {
- APInt Val;
+ const APInt &Val;
- specific_intval(APInt V) : Val(std::move(V)) {}
+ specific_intval(const APInt &V) : Val(V) {}
template <typename ITy> bool match(ITy *V) {
const auto *CI = dyn_cast<ConstantInt>(V);
@@ -898,22 +898,37 @@ template <bool AllowUndefs> struct specific_intval {
}
};
+template <bool AllowUndefs> struct specific_intval64 {
+ uint64_t Val;
+
+ specific_intval64(uint64_t V) : Val(V) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ const auto *CI = dyn_cast<ConstantInt>(V);
+ if (!CI && V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));
+
+ return CI && CI->getValue() == Val;
+ }
+};
+
/// Match a specific integer value or vector with all elements equal to
/// the value.
-inline specific_intval<false> m_SpecificInt(APInt V) {
- return specific_intval<false>(std::move(V));
+inline specific_intval<false> m_SpecificInt(const APInt &V) {
+ return specific_intval<false>(V);
}
-inline specific_intval<false> m_SpecificInt(uint64_t V) {
- return m_SpecificInt(APInt(64, V));
+inline specific_intval64<false> m_SpecificInt(uint64_t V) {
+ return specific_intval64<false>(V);
}
-inline specific_intval<true> m_SpecificIntAllowUndef(APInt V) {
- return specific_intval<true>(std::move(V));
+inline specific_intval<true> m_SpecificIntAllowUndef(const APInt &V) {
+ return specific_intval<true>(V);
}
-inline specific_intval<true> m_SpecificIntAllowUndef(uint64_t V) {
- return m_SpecificIntAllowUndef(APInt(64, V));
+inline specific_intval64<true> m_SpecificIntAllowUndef(uint64_t V) {
+ return specific_intval64<true>(V);
}
/// Match a ConstantInt and bind to its value. This does not match
@@ -1170,7 +1185,7 @@ inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
}
template <typename LHS_t, typename RHS_t, unsigned Opcode,
- unsigned WrapFlags = 0>
+ unsigned WrapFlags = 0, bool Commutable = false>
struct OverflowingBinaryOp_match {
LHS_t L;
RHS_t R;
@@ -1188,7 +1203,9 @@ struct OverflowingBinaryOp_match {
if ((WrapFlags & OverflowingBinaryOperator::NoSignedWrap) &&
!Op->hasNoSignedWrap())
return false;
- return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
+ return (L.match(Op->getOperand(0)) && R.match(Op->getOperand(1))) ||
+ (Commutable && L.match(Op->getOperand(1)) &&
+ R.match(Op->getOperand(0)));
}
return false;
}
@@ -1235,6 +1252,16 @@ m_NUWAdd(const LHS &L, const RHS &R) {
OverflowingBinaryOperator::NoUnsignedWrap>(
L, R);
}
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<
+ LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true>
+m_c_NUWAdd(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap,
+ true>(L, R);
+}
+
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
OverflowingBinaryOperator::NoUnsignedWrap>
@@ -1319,10 +1346,31 @@ m_AddLike(const LHS &L, const RHS &R) {
return m_CombineOr(m_Add(L, R), m_DisjointOr(L, R));
}
+/// Match either "add nsw" or "or disjoint"
+template <typename LHS, typename RHS>
+inline match_combine_or<
+ OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoSignedWrap>,
+ DisjointOr_match<LHS, RHS>>
+m_NSWAddLike(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_NSWAdd(L, R), m_DisjointOr(L, R));
+}
+
+/// Match either "add nuw" or "or disjoint"
+template <typename LHS, typename RHS>
+inline match_combine_or<
+ OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap>,
+ DisjointOr_match<LHS, RHS>>
+m_NUWAddLike(const LHS &L, const RHS &R) {
+ return m_CombineOr(m_NUWAdd(L, R), m_DisjointOr(L, R));
+}
+
//===----------------------------------------------------------------------===//
// Class that matches a group of binary opcodes.
//
-template <typename LHS_t, typename RHS_t, typename Predicate>
+template <typename LHS_t, typename RHS_t, typename Predicate,
+ bool Commutable = false>
struct BinOpPred_match : Predicate {
LHS_t L;
RHS_t R;
@@ -1331,8 +1379,10 @@ struct BinOpPred_match : Predicate {
template <typename OpTy> bool match(OpTy *V) {
if (auto *I = dyn_cast<Instruction>(V))
- return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
- R.match(I->getOperand(1));
+ return this->isOpType(I->getOpcode()) &&
+ ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
+ (Commutable && L.match(I->getOperand(1)) &&
+ R.match(I->getOperand(0))));
return false;
}
};
@@ -1399,6 +1449,13 @@ m_BitwiseLogic(const LHS &L, const RHS &R) {
return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
}
+/// Matches bitwise logic operations in either order.
+template <typename LHS, typename RHS>
+inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op, true>
+m_c_BitwiseLogic(const LHS &L, const RHS &R) {
+ return BinOpPred_match<LHS, RHS, is_bitwiselogic_op, true>(L, R);
+}
+
/// Matches integer division operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
diff --git a/llvm/include/llvm/IR/ProfDataUtils.h b/llvm/include/llvm/IR/ProfDataUtils.h
index 255fa2ff1c79..c0897408986f 100644
--- a/llvm/include/llvm/IR/ProfDataUtils.h
+++ b/llvm/include/llvm/IR/ProfDataUtils.h
@@ -108,5 +108,8 @@ bool extractProfTotalWeight(const Instruction &I, uint64_t &TotalWeights);
/// a `prof` metadata reference to instruction `I`.
void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights);
+/// Scaling the profile data attached to 'I' using the ratio of S/T.
+void scaleProfData(Instruction &I, uint64_t S, uint64_t T);
+
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/MC/ConstantPools.h b/llvm/include/llvm/MC/ConstantPools.h
index 7eac75362eff..ff21ccda07a8 100644
--- a/llvm/include/llvm/MC/ConstantPools.h
+++ b/llvm/include/llvm/MC/ConstantPools.h
@@ -43,8 +43,13 @@ struct ConstantPoolEntry {
class ConstantPool {
using EntryVecTy = SmallVector<ConstantPoolEntry, 4>;
EntryVecTy Entries;
- std::map<int64_t, const MCSymbolRefExpr *> CachedConstantEntries;
- DenseMap<const MCSymbol *, const MCSymbolRefExpr *> CachedSymbolEntries;
+
+ // Caches of entries that already exist, indexed by their contents
+ // and also the size of the constant.
+ std::map<std::pair<int64_t, unsigned>, const MCSymbolRefExpr *>
+ CachedConstantEntries;
+ DenseMap<std::pair<const MCSymbol *, unsigned>, const MCSymbolRefExpr *>
+ CachedSymbolEntries;
public:
// Initialize a new empty constant pool
diff --git a/llvm/include/llvm/MC/DXContainerPSVInfo.h b/llvm/include/llvm/MC/DXContainerPSVInfo.h
index 7d21c18d252f..bad2fe78eb8f 100644
--- a/llvm/include/llvm/MC/DXContainerPSVInfo.h
+++ b/llvm/include/llvm/MC/DXContainerPSVInfo.h
@@ -9,9 +9,11 @@
#ifndef LLVM_MC_DXCONTAINERPSVINFO_H
#define LLVM_MC_DXCONTAINERPSVINFO_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/DXContainer.h"
+#include "llvm/MC/StringTableBuilder.h"
#include "llvm/TargetParser/Triple.h"
#include <array>
@@ -45,8 +47,9 @@ struct PSVSignatureElement {
// modifiable format, and can be used to serialize the data back into valid PSV
// RuntimeInfo.
struct PSVRuntimeInfo {
+ PSVRuntimeInfo() : DXConStrTabBuilder(StringTableBuilder::DXContainer) {}
bool IsFinalized = false;
- dxbc::PSV::v2::RuntimeInfo BaseData;
+ dxbc::PSV::v3::RuntimeInfo BaseData;
SmallVector<dxbc::PSV::v2::ResourceBindInfo> Resources;
SmallVector<PSVSignatureElement> InputElements;
SmallVector<PSVSignatureElement> OutputElements;
@@ -64,6 +67,7 @@ struct PSVRuntimeInfo {
std::array<SmallVector<uint32_t>, 4> InputOutputMap;
SmallVector<uint32_t> InputPatchMap;
SmallVector<uint32_t> PatchOutputMap;
+ llvm::StringRef EntryName;
// Serialize PSVInfo into the provided raw_ostream. The version field
// specifies the data version to encode, the default value specifies encoding
@@ -71,19 +75,12 @@ struct PSVRuntimeInfo {
void write(raw_ostream &OS,
uint32_t Version = std::numeric_limits<uint32_t>::max()) const;
- void finalize(Triple::EnvironmentType Stage) {
- IsFinalized = true;
- BaseData.SigInputElements = static_cast<uint32_t>(InputElements.size());
- BaseData.SigOutputElements = static_cast<uint32_t>(OutputElements.size());
- BaseData.SigPatchOrPrimElements =
- static_cast<uint32_t>(PatchOrPrimElements.size());
- if (!sys::IsBigEndianHost)
- return;
- BaseData.swapBytes();
- BaseData.swapBytes(Stage);
- for (auto &Res : Resources)
- Res.swapBytes();
- }
+ void finalize(Triple::EnvironmentType Stage);
+
+private:
+ SmallVector<uint32_t, 64> IndexBuffer;
+ SmallVector<llvm::dxbc::PSV::v0::SignatureElement, 32> SignatureElements;
+ StringTableBuilder DXConStrTabBuilder;
};
class Signature {
diff --git a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 0c9668904e82..27ecb7b85d22 100644
--- a/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -15,6 +15,7 @@
namespace llvm {
+class MCRegister;
class raw_ostream;
/// MCParsedAsmOperand - This abstract class represents a source-level assembly
@@ -57,7 +58,7 @@ public:
virtual bool isImm() const = 0;
/// isReg - Is this a register operand?
virtual bool isReg() const = 0;
- virtual unsigned getReg() const = 0;
+ virtual MCRegister getReg() const = 0;
/// isMem - Is this a memory operand?
virtual bool isMem() const = 0;
diff --git a/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h b/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
index 7edd3f8ce490..49ce417e6fbb 100644
--- a/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
+++ b/llvm/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -514,9 +514,7 @@ public:
/// by the tied-operands checks in the AsmMatcher. This method can be
/// overridden to allow e.g. a sub- or super-register as the tied operand.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1,
- const MCParsedAsmOperand &Op2) const {
- return Op1.isReg() && Op2.isReg() && Op1.getReg() == Op2.getReg();
- }
+ const MCParsedAsmOperand &Op2) const;
// Return whether this parser uses assignment statements with equals tokens
virtual bool equalIsAsmAssignment() { return true; };
diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h
index fb4d11ec1d4d..c648ef20fa84 100644
--- a/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -153,13 +153,6 @@ public:
bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
};
- /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
- /// index, -1 in any being invalid.
- struct SubRegCoveredBits {
- uint16_t Offset;
- uint16_t Size;
- };
-
private:
const MCRegisterDesc *Desc; // Pointer to the descriptor array
unsigned NumRegs; // Number of entries in the array
@@ -176,8 +169,6 @@ private:
const char *RegClassStrings; // Pointer to the class strings.
const uint16_t *SubRegIndices; // Pointer to the subreg lookup
// array.
- const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered
- // bit ranges array.
unsigned NumSubRegIndices; // Number of subreg indices.
const uint16_t *RegEncodingTable; // Pointer to array of register
// encodings.
@@ -278,7 +269,6 @@ public:
const int16_t *DL, const LaneBitmask *RUMS,
const char *Strings, const char *ClassStrings,
const uint16_t *SubIndices, unsigned NumIndices,
- const SubRegCoveredBits *SubIdxRanges,
const uint16_t *RET) {
Desc = D;
NumRegs = NR;
@@ -294,7 +284,6 @@ public:
NumRegUnits = NRU;
SubRegIndices = SubIndices;
NumSubRegIndices = NumIndices;
- SubRegIdxRanges = SubIdxRanges;
RegEncodingTable = RET;
// Initialize DWARF register mapping variables
@@ -387,16 +376,6 @@ public:
/// otherwise.
unsigned getSubRegIndex(MCRegister RegNo, MCRegister SubRegNo) const;
- /// Get the size of the bit range covered by a sub-register index.
- /// If the index isn't continuous, return the sum of the sizes of its parts.
- /// If the index is used to access subregisters of different sizes, return -1.
- unsigned getSubRegIdxSize(unsigned Idx) const;
-
- /// Get the offset of the bit range covered by a sub-register index.
- /// If an Offset doesn't make sense (the index isn't continuous, or is used to
- /// access sub-registers at different offsets), return -1.
- unsigned getSubRegIdxOffset(unsigned Idx) const;
-
/// Return the human-readable symbolic target-specific name for the
/// specified physical register.
const char *getName(MCRegister RegNo) const {
diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h
index 671511ab4b88..69867620e1bf 100644
--- a/llvm/include/llvm/MC/MCStreamer.h
+++ b/llvm/include/llvm/MC/MCStreamer.h
@@ -740,7 +740,7 @@ public:
/// Special case of EmitValue that avoids the client having
/// to pass in a MCExpr for constant integers.
virtual void emitIntValue(uint64_t Value, unsigned Size);
- virtual void emitIntValue(APInt Value);
+ virtual void emitIntValue(const APInt &Value);
/// Special case of EmitValue that avoids the client having to pass
/// in a MCExpr for constant integers & prints in Hex format for certain
diff --git a/llvm/include/llvm/MC/StringTableBuilder.h b/llvm/include/llvm/MC/StringTableBuilder.h
index 4ee421e22c17..a738683548cf 100644
--- a/llvm/include/llvm/MC/StringTableBuilder.h
+++ b/llvm/include/llvm/MC/StringTableBuilder.h
@@ -74,12 +74,8 @@ public:
/// Check if a string is contained in the string table. Since this class
/// doesn't store the string values, this function can be used to check if
/// storage needs to be done prior to adding the string.
- bool contains(StringRef S) const {
- return contains(CachedHashStringRef(S));
- }
- bool contains(CachedHashStringRef S) const {
- return StringIndexMap.count(S);
- }
+ bool contains(StringRef S) const { return contains(CachedHashStringRef(S)); }
+ bool contains(CachedHashStringRef S) const { return StringIndexMap.count(S); }
size_t getSize() const { return Size; }
void clear();
diff --git a/llvm/include/llvm/ObjCopy/CommonConfig.h b/llvm/include/llvm/ObjCopy/CommonConfig.h
index 8f69c9fbeaf5..9d6d5fb23b18 100644
--- a/llvm/include/llvm/ObjCopy/CommonConfig.h
+++ b/llvm/include/llvm/ObjCopy/CommonConfig.h
@@ -233,6 +233,7 @@ struct CommonConfig {
NameMatcher UnneededSymbolsToRemove;
NameMatcher SymbolsToWeaken;
NameMatcher SymbolsToKeepGlobal;
+ NameMatcher SymbolsToSkip;
// Map options
StringMap<SectionRename> SectionsToRename;
diff --git a/llvm/include/llvm/Object/COFF.h b/llvm/include/llvm/Object/COFF.h
index 2a5c3d8913b1..a548b2c15c5f 100644
--- a/llvm/include/llvm/Object/COFF.h
+++ b/llvm/include/llvm/Object/COFF.h
@@ -1362,47 +1362,6 @@ public:
SectionStrippedError() { setErrorCode(object_error::section_stripped); }
};
-inline std::optional<std::string>
-getArm64ECMangledFunctionName(StringRef Name) {
- bool IsCppFn = Name[0] == '?';
- if (IsCppFn && Name.find("$$h") != std::string::npos)
- return std::nullopt;
- if (!IsCppFn && Name[0] == '#')
- return std::nullopt;
-
- StringRef Prefix = "$$h";
- size_t InsertIdx = 0;
- if (IsCppFn) {
- InsertIdx = Name.find("@@");
- size_t ThreeAtSignsIdx = Name.find("@@@");
- if (InsertIdx != std::string::npos && InsertIdx != ThreeAtSignsIdx) {
- InsertIdx += 2;
- } else {
- InsertIdx = Name.find("@");
- if (InsertIdx != std::string::npos)
- InsertIdx++;
- }
- } else {
- Prefix = "#";
- }
-
- return std::optional<std::string>(
- (Name.substr(0, InsertIdx) + Prefix + Name.substr(InsertIdx)).str());
-}
-
-inline std::optional<std::string>
-getArm64ECDemangledFunctionName(StringRef Name) {
- if (Name[0] == '#')
- return std::string(Name.substr(1));
- if (Name[0] != '?')
- return std::nullopt;
-
- std::pair<StringRef, StringRef> Pair = Name.split("$$h");
- if (Pair.second.empty())
- return std::nullopt;
- return (Pair.first + Pair.second).str();
-}
-
} // end namespace object
} // end namespace llvm
diff --git a/llvm/include/llvm/Object/COFFImportFile.h b/llvm/include/llvm/Object/COFFImportFile.h
index 7268faa87eb7..2c06f529ecdf 100644
--- a/llvm/include/llvm/Object/COFFImportFile.h
+++ b/llvm/include/llvm/Object/COFFImportFile.h
@@ -17,6 +17,7 @@
#define LLVM_OBJECT_COFFIMPORTFILE_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
@@ -44,26 +45,7 @@ public:
void moveSymbolNext(DataRefImpl &Symb) const override { ++Symb.p; }
- Error printSymbolName(raw_ostream &OS, DataRefImpl Symb) const override {
- switch (Symb.p) {
- case ImpSymbol:
- OS << "__imp_";
- break;
- case ECAuxSymbol:
- OS << "__imp_aux_";
- break;
- }
- const char *Name = Data.getBufferStart() + sizeof(coff_import_header);
- if (Symb.p != ECThunkSymbol && COFF::isArm64EC(getMachine())) {
- if (std::optional<std::string> DemangledName =
- getArm64ECDemangledFunctionName(Name)) {
- OS << StringRef(*DemangledName);
- return Error::success();
- }
- }
- OS << StringRef(Name);
- return Error::success();
- }
+ Error printSymbolName(raw_ostream &OS, DataRefImpl Symb) const override;
Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override {
return SymbolRef::SF_Global;
diff --git a/llvm/include/llvm/Object/DXContainer.h b/llvm/include/llvm/Object/DXContainer.h
index b6e3d321da24..19c83ba6c6e8 100644
--- a/llvm/include/llvm/Object/DXContainer.h
+++ b/llvm/include/llvm/Object/DXContainer.h
@@ -125,7 +125,8 @@ class PSVRuntimeInfo {
uint32_t Size;
using InfoStruct =
std::variant<std::monostate, dxbc::PSV::v0::RuntimeInfo,
- dxbc::PSV::v1::RuntimeInfo, dxbc::PSV::v2::RuntimeInfo>;
+ dxbc::PSV::v1::RuntimeInfo, dxbc::PSV::v2::RuntimeInfo,
+ dxbc::PSV::v3::RuntimeInfo>;
InfoStruct BasicInfo;
ResourceArray Resources;
StringRef StringTable;
@@ -151,9 +152,11 @@ public:
ResourceArray getResources() const { return Resources; }
uint32_t getVersion() const {
- return Size >= sizeof(dxbc::PSV::v2::RuntimeInfo)
- ? 2
- : (Size >= sizeof(dxbc::PSV::v1::RuntimeInfo) ? 1 : 0);
+ return Size >= sizeof(dxbc::PSV::v3::RuntimeInfo)
+ ? 3
+ : (Size >= sizeof(dxbc::PSV::v2::RuntimeInfo) ? 2
+ : (Size >= sizeof(dxbc::PSV::v1::RuntimeInfo)) ? 1
+ : 0);
}
uint32_t getResourceStride() const { return Resources.Stride; }
@@ -161,6 +164,11 @@ public:
const InfoStruct &getInfo() const { return BasicInfo; }
template <typename T> const T *getInfoAs() const {
+ if (const auto *P = std::get_if<dxbc::PSV::v3::RuntimeInfo>(&BasicInfo))
+ return static_cast<const T *>(P);
+ if (std::is_same<T, dxbc::PSV::v3::RuntimeInfo>::value)
+ return nullptr;
+
if (const auto *P = std::get_if<dxbc::PSV::v2::RuntimeInfo>(&BasicInfo))
return static_cast<const T *>(P);
if (std::is_same<T, dxbc::PSV::v2::RuntimeInfo>::value)
diff --git a/llvm/include/llvm/Object/ELFObjectFile.h b/llvm/include/llvm/Object/ELFObjectFile.h
index f57a7ab8882a..1d457be93741 100644
--- a/llvm/include/llvm/Object/ELFObjectFile.h
+++ b/llvm/include/llvm/Object/ELFObjectFile.h
@@ -419,7 +419,7 @@ protected:
if (Contents[0] != ELFAttrs::Format_Version || Contents.size() == 1)
return Error::success();
- if (Error E = Attributes.parse(Contents, ELFT::TargetEndianness))
+ if (Error E = Attributes.parse(Contents, ELFT::Endianness))
return E;
break;
}
@@ -482,7 +482,7 @@ public:
bool isDyldType() const { return isDyldELFObject; }
static bool classof(const Binary *v) {
return v->getType() ==
- getELFType(ELFT::TargetEndianness == llvm::endianness::little,
+ getELFType(ELFT::Endianness == llvm::endianness::little,
ELFT::Is64Bits);
}
@@ -1155,10 +1155,9 @@ ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, ELFFile<ELFT> EF,
const Elf_Shdr *DotDynSymSec,
const Elf_Shdr *DotSymtabSec,
const Elf_Shdr *DotSymtabShndx)
- : ELFObjectFileBase(
- getELFType(ELFT::TargetEndianness == llvm::endianness::little,
- ELFT::Is64Bits),
- Object),
+ : ELFObjectFileBase(getELFType(ELFT::Endianness == llvm::endianness::little,
+ ELFT::Is64Bits),
+ Object),
EF(EF), DotDynSymSec(DotDynSymSec), DotSymtabSec(DotSymtabSec),
DotSymtabShndxSec(DotSymtabShndx) {}
@@ -1226,8 +1225,7 @@ uint8_t ELFObjectFile<ELFT>::getBytesInAddress() const {
template <class ELFT>
StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
- constexpr bool IsLittleEndian =
- ELFT::TargetEndianness == llvm::endianness::little;
+ constexpr bool IsLittleEndian = ELFT::Endianness == llvm::endianness::little;
switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
case ELF::ELFCLASS32:
switch (EF.getHeader().e_machine) {
@@ -1305,7 +1303,7 @@ StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
}
template <class ELFT> Triple::ArchType ELFObjectFile<ELFT>::getArch() const {
- bool IsLittleEndian = ELFT::TargetEndianness == llvm::endianness::little;
+ bool IsLittleEndian = ELFT::Endianness == llvm::endianness::little;
switch (EF.getHeader().e_machine) {
case ELF::EM_68K:
return Triple::m68k;
diff --git a/llvm/include/llvm/Object/ELFTypes.h b/llvm/include/llvm/Object/ELFTypes.h
index 4986ecf8323d..4617b70a2f12 100644
--- a/llvm/include/llvm/Object/ELFTypes.h
+++ b/llvm/include/llvm/Object/ELFTypes.h
@@ -52,6 +52,7 @@ private:
public:
static const endianness TargetEndianness = E;
+ static const endianness Endianness = E;
static const bool Is64Bits = Is64;
using uint = std::conditional_t<Is64, uint64_t, uint32_t>;
@@ -145,9 +146,9 @@ using ELF64BE = ELFType<llvm::endianness::big, true>;
// Section header.
template <class ELFT> struct Elf_Shdr_Base;
-template <endianness TargetEndianness>
-struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Shdr_Base<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Word sh_name; // Section name (index into string table)
Elf_Word sh_type; // Section type (SHT_*)
Elf_Word sh_flags; // Section flags (SHF_*)
@@ -160,9 +161,9 @@ struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
Elf_Word sh_entsize; // Size of records contained within the section
};
-template <endianness TargetEndianness>
-struct Elf_Shdr_Base<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Shdr_Base<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Word sh_name; // Section name (index into string table)
Elf_Word sh_type; // Section type (SHT_*)
Elf_Xword sh_flags; // Section flags (SHF_*)
@@ -190,9 +191,9 @@ struct Elf_Shdr_Impl : Elf_Shdr_Base<ELFT> {
template <class ELFT> struct Elf_Sym_Base;
-template <endianness TargetEndianness>
-struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Sym_Base<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Word st_name; // Symbol name (index into string table)
Elf_Addr st_value; // Value or address associated with the symbol
Elf_Word st_size; // Size of the symbol
@@ -201,9 +202,9 @@ struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
Elf_Half st_shndx; // Which section (header table index) it's defined in
};
-template <endianness TargetEndianness>
-struct Elf_Sym_Base<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Sym_Base<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Word st_name; // Symbol name (index into string table)
unsigned char st_info; // Symbol's type and binding attributes
unsigned char st_other; // Must be zero; reserved
@@ -349,9 +350,9 @@ struct Elf_Vernaux_Impl {
/// table section (.dynamic) look like.
template <class ELFT> struct Elf_Dyn_Base;
-template <endianness TargetEndianness>
-struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Dyn_Base<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Sword d_tag;
union {
Elf_Word d_val;
@@ -359,9 +360,9 @@ struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
} d_un;
};
-template <endianness TargetEndianness>
-struct Elf_Dyn_Base<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Dyn_Base<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Sxword d_tag;
union {
Elf_Xword d_val;
@@ -381,9 +382,9 @@ struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
uintX_t getPtr() const { return d_un.d_ptr; }
};
-template <endianness TargetEndianness>
-struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Rel_Impl<ELFType<Endianness, false>, false> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
static const bool IsRela = false;
Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
Elf_Word r_info; // Symbol table index and type of relocation to apply
@@ -416,17 +417,17 @@ struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
}
};
-template <endianness TargetEndianness>
-struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, true>
- : public Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Rel_Impl<ELFType<Endianness, false>, true>
+ : public Elf_Rel_Impl<ELFType<Endianness, false>, false> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
static const bool IsRela = true;
Elf_Sword r_addend; // Compute value for relocatable field by adding this
};
-template <endianness TargetEndianness>
-struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Rel_Impl<ELFType<Endianness, true>, false> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
static const bool IsRela = false;
Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
Elf_Xword r_info; // Symbol table index and type of relocation to apply
@@ -469,10 +470,10 @@ struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
}
};
-template <endianness TargetEndianness>
-struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, true>
- : public Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Rel_Impl<ELFType<Endianness, true>, true>
+ : public Elf_Rel_Impl<ELFType<Endianness, true>, false> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
static const bool IsRela = true;
Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
};
@@ -504,9 +505,9 @@ struct Elf_Ehdr_Impl {
unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
};
-template <endianness TargetEndianness>
-struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Phdr_Impl<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Word p_type; // Type of segment
Elf_Off p_offset; // FileOffset where segment is located, in bytes
Elf_Addr p_vaddr; // Virtual Address of beginning of segment
@@ -517,9 +518,9 @@ struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
Elf_Word p_align; // Segment alignment constraint
};
-template <endianness TargetEndianness>
-struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Phdr_Impl<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Word p_type; // Type of segment
Elf_Word p_flags; // Segment flags
Elf_Off p_offset; // FileOffset where segment is located, in bytes
@@ -574,17 +575,17 @@ struct Elf_GnuHash_Impl {
// Compressed section headers.
// http://www.sco.com/developers/gabi/latest/ch4.sheader.html#compression_header
-template <endianness TargetEndianness>
-struct Elf_Chdr_Impl<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <endianness Endianness>
+struct Elf_Chdr_Impl<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Word ch_type;
Elf_Word ch_size;
Elf_Word ch_addralign;
};
-template <endianness TargetEndianness>
-struct Elf_Chdr_Impl<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <endianness Endianness>
+struct Elf_Chdr_Impl<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Word ch_type;
Elf_Word ch_reserved;
Elf_Xword ch_size;
@@ -742,17 +743,17 @@ template <class ELFT> struct Elf_CGProfile_Impl {
template <class ELFT>
struct Elf_Mips_RegInfo;
-template <llvm::endianness TargetEndianness>
-struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+template <llvm::endianness Endianness>
+struct Elf_Mips_RegInfo<ELFType<Endianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, false)
Elf_Word ri_gprmask; // bit-mask of used general registers
Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
Elf_Addr ri_gp_value; // gp register value
};
-template <llvm::endianness TargetEndianness>
-struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
- LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+template <llvm::endianness Endianness>
+struct Elf_Mips_RegInfo<ELFType<Endianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(Endianness, true)
Elf_Word ri_gprmask; // bit-mask of used general registers
Elf_Word ri_pad; // unused padding field
Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
diff --git a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
index f7f8d5e6bf47..9c4d9e19f11b 100644
--- a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
+++ b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h
@@ -107,7 +107,7 @@ struct PSVInfo {
// the format.
uint32_t Version;
- dxbc::PSV::v2::RuntimeInfo Info;
+ dxbc::PSV::v3::RuntimeInfo Info;
uint32_t ResourceStride;
SmallVector<ResourceBindInfo> Resources;
SmallVector<SignatureElement> SigInputElements;
@@ -121,12 +121,15 @@ struct PSVInfo {
MaskVector InputPatchMap;
MaskVector PatchOutputMap;
+ StringRef EntryName;
+
void mapInfoForVersion(yaml::IO &IO);
PSVInfo();
PSVInfo(const dxbc::PSV::v0::RuntimeInfo *P, uint16_t Stage);
PSVInfo(const dxbc::PSV::v1::RuntimeInfo *P);
PSVInfo(const dxbc::PSV::v2::RuntimeInfo *P);
+ PSVInfo(const dxbc::PSV::v3::RuntimeInfo *P, StringRef StringTable);
};
struct SignatureParameter {
diff --git a/llvm/include/llvm/Passes/MachinePassRegistry.def b/llvm/include/llvm/Passes/MachinePassRegistry.def
index 016602730e0e..2f77ae655d9b 100644
--- a/llvm/include/llvm/Passes/MachinePassRegistry.def
+++ b/llvm/include/llvm/Passes/MachinePassRegistry.def
@@ -127,6 +127,8 @@ MACHINE_FUNCTION_PASS("dead-mi-elimination", DeadMachineInstructionElimPass())
// MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass())
MACHINE_FUNCTION_PASS("no-op-machine-function", NoOpMachineFunctionPass())
MACHINE_FUNCTION_PASS("print", PrintMIRPass())
+MACHINE_FUNCTION_PASS("require-all-machine-function-properties",
+ RequireAllMachineFunctionPropertiesPass())
#undef MACHINE_FUNCTION_PASS
// After a pass is converted to new pass manager, its entry should be moved from
diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h
index 8817a2585646..d1232124d5d8 100644
--- a/llvm/include/llvm/Passes/PassBuilder.h
+++ b/llvm/include/llvm/Passes/PassBuilder.h
@@ -672,6 +672,13 @@ public:
return Result;
}
+ /// Handle passes only accept one bool-valued parameter.
+ ///
+ /// \return false when Params is empty.
+ static Expected<bool> parseSinglePassOption(StringRef Params,
+ StringRef OptionName,
+ StringRef PassName);
+
private:
// O1 pass pipeline
FunctionPassManager
diff --git a/llvm/include/llvm/Passes/TargetPassRegistry.inc b/llvm/include/llvm/Passes/TargetPassRegistry.inc
new file mode 100644
index 000000000000..b618331c6998
--- /dev/null
+++ b/llvm/include/llvm/Passes/TargetPassRegistry.inc
@@ -0,0 +1,194 @@
+//===- TargetPassRegistry.inc - Registry of passes --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes in registerPassBuilderCallbacks
+// Just put the following lines in the body of registerPassBuilderCallbacks:
+// #define GET_PASS_REGISTRY "<Target>PassRegistry.def"
+// #include "llvm/Passes/TargetPassRegistry.inc"
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifdef GET_PASS_REGISTRY
+
+#if !__has_include(GET_PASS_REGISTRY)
+#error "must provide <Target>PassRegistry.def"
+#endif
+
+if (PopulateClassToPassNames) {
+ auto *PIC = PB.getPassInstrumentationCallbacks();
+
+#define ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define ADD_CLASS_PASS_TO_PASS_NAME_WITH_PARAMS(NAME, CLASS) \
+ PIC->addClassToPassName(CLASS, NAME);
+
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define MODULE_PASS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define MODULE_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
+ ADD_CLASS_PASS_TO_PASS_NAME_WITH_PARAMS(NAME, CLASS)
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
+ ADD_CLASS_PASS_TO_PASS_NAME_WITH_PARAMS(NAME, CLASS)
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define MACHINE_FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define MACHINE_FUNCTION_PASS(NAME, CREATE_PASS) \
+ ADD_CLASS_PASS_TO_PASS_NAME(NAME, CREATE_PASS)
+#define MACHINE_FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, \
+ PARAMS) \
+ ADD_CLASS_PASS_TO_PASS_NAME_WITH_PARAMS(NAME, CLASS)
+#include GET_PASS_REGISTRY
+#undef MODULE_ANALYSIS
+#undef MODULE_PASS
+#undef MODULE_PASS_WITH_PARAMS
+#undef FUNCTION_ANALYSIS
+#undef FUNCTION_ALIAS_ANALYSIS
+#undef FUNCTION_PASS
+#undef FUNCTION_PASS_WITH_PARAMS
+#undef LOOP_ANALYSIS
+#undef LOOP_PASS
+#undef MACHINE_FUNCTION_ANALYSIS
+#undef MACHINE_FUNCTION_PASS
+#undef MACHINE_FUNCTION_PASS_WITH_PARAMS
+#undef ADD_CLASS_PASS_TO_PASS_NAME
+#undef ADD_CLASS_PASS_TO_PASS_NAME_WITH_PARAMS
+}
+
+#define ADD_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ PM.addPass(CREATE_PASS); \
+ return true; \
+ }
+
+#define ADD_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (PassBuilder::checkParametrizedPassName(Name, NAME)) { \
+ auto Params = PassBuilder::parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) { \
+ errs() << NAME ": " << toString(Params.takeError()) << '\n'; \
+ return false; \
+ } \
+ PM.addPass(CREATE_PASS(Params.get())); \
+ return true; \
+ }
+
+PB.registerPipelineParsingCallback([=](StringRef Name, ModulePassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define MODULE_PASS(NAME, CREATE_PASS) ADD_PASS(NAME, CREATE_PASS)
+#include GET_PASS_REGISTRY
+#undef MODULE_PASS
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name, ModulePassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define MODULE_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
+ ADD_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#include GET_PASS_REGISTRY
+#undef MODULE_PASS_WITH_PARAMS
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name, FunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define FUNCTION_PASS(NAME, CREATE_PASS) ADD_PASS(NAME, CREATE_PASS)
+#include GET_PASS_REGISTRY
+#undef FUNCTION_PASS
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name, FunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \
+ ADD_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#include GET_PASS_REGISTRY
+#undef FUNCTION_PASS_WITH_PARAMS
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name, LoopPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define LOOP_PASS(NAME, CREATE_PASS) ADD_PASS(NAME, CREATE_PASS)
+#include GET_PASS_REGISTRY
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name,
+ MachineFunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define MACHINE_FUNCTION_PASS(NAME, CREATE_PASS) ADD_PASS(NAME, CREATE_PASS)
+#include GET_PASS_REGISTRY
+ return false;
+});
+
+PB.registerPipelineParsingCallback([=](StringRef Name, FunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+#define MACHINE_FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, \
+ PARAMS) \
+ ADD_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#include GET_PASS_REGISTRY
+#undef MACHINE_FUNCTION_PASS_WITH_PARAMS
+ return false;
+});
+
+#undef ADD_PASS
+#undef ADD_PASS_WITH_PARAMS
+
+PB.registerAnalysisRegistrationCallback([](ModuleAnalysisManager &AM) {
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ AM.registerPass([&] { return CREATE_PASS; });
+#include GET_PASS_REGISTRY
+#undef MODULE_ANALYSIS
+});
+
+PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &AM) {
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ AM.registerPass([&] { return CREATE_PASS; });
+#include GET_PASS_REGISTRY
+#undef FUNCTION_ANALYSIS
+});
+
+PB.registerParseAACallback([](StringRef Name, AAManager &AM) {
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ AM.registerFunctionAnalysis< \
+ std::remove_reference_t<decltype(CREATE_PASS)>>(); \
+ return true; \
+ }
+#include GET_PASS_REGISTRY
+#undef FUNCTION_ALIAS_ANALYSIS
+ return false;
+});
+
+PB.registerAnalysisRegistrationCallback([](LoopAnalysisManager &AM) {
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ AM.registerPass([&] { return CREATE_PASS; });
+#include GET_PASS_REGISTRY
+#undef LOOP_ANALYSIS
+});
+
+PB.registerAnalysisRegistrationCallback([](MachineFunctionAnalysisManager &AM) {
+#define MACHINE_FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ AM.registerPass([&] { return CREATE_PASS; });
+#include GET_PASS_REGISTRY
+#undef MACHINE_FUNCTION_ANALYSIS
+});
+
+#undef GET_PASS_REGISTRY
+#endif // GET_PASS_REGISTRY
diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
index 346ca4ad2eb3..f05b90114d75 100644
--- a/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
+++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
@@ -184,7 +184,7 @@ public:
private:
std::vector<std::string> Filenames;
std::vector<ProfileMappingRecord> MappingRecords;
- InstrProfSymtab ProfileNames;
+ std::unique_ptr<InstrProfSymtab> ProfileNames;
size_t CurrentRecord = 0;
std::vector<StringRef> FunctionsFilenames;
std::vector<CounterExpression> Expressions;
@@ -195,8 +195,9 @@ private:
// D69471, which can split up function records into multiple sections on ELF.
FuncRecordsStorage FuncRecords;
- BinaryCoverageReader(FuncRecordsStorage &&FuncRecords)
- : FuncRecords(std::move(FuncRecords)) {}
+ BinaryCoverageReader(std::unique_ptr<InstrProfSymtab> Symtab,
+ FuncRecordsStorage &&FuncRecords)
+ : ProfileNames(std::move(Symtab)), FuncRecords(std::move(FuncRecords)) {}
public:
BinaryCoverageReader(const BinaryCoverageReader &) = delete;
@@ -209,12 +210,10 @@ public:
SmallVectorImpl<object::BuildIDRef> *BinaryIDs = nullptr);
static Expected<std::unique_ptr<BinaryCoverageReader>>
- createCoverageReaderFromBuffer(StringRef Coverage,
- FuncRecordsStorage &&FuncRecords,
- InstrProfSymtab &&ProfileNames,
- uint8_t BytesInAddress,
- llvm::endianness Endian,
- StringRef CompilationDir = "");
+ createCoverageReaderFromBuffer(
+ StringRef Coverage, FuncRecordsStorage &&FuncRecords,
+ std::unique_ptr<InstrProfSymtab> ProfileNamesPtr, uint8_t BytesInAddress,
+ llvm::endianness Endian, StringRef CompilationDir = "");
Error readNextRecord(CoverageMappingRecord &Record) override;
};
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 25ec06a73920..3a71c02d6d5c 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -471,6 +471,13 @@ private:
public:
InstrProfSymtab() = default;
+ // Not copyable or movable.
+ // Consider std::unique_ptr for move.
+ InstrProfSymtab(const InstrProfSymtab &) = delete;
+ InstrProfSymtab &operator=(const InstrProfSymtab &) = delete;
+ InstrProfSymtab(InstrProfSymtab &&) = delete;
+ InstrProfSymtab &operator=(InstrProfSymtab &&) = delete;
+
/// Create InstrProfSymtab from an object file section which
/// contains function PGO names. When section may contain raw
/// string data or string data in compressed form. This method
@@ -1055,9 +1062,15 @@ inline uint64_t ComputeHash(StringRef K) { return ComputeHash(HashType, K); }
// as appropriate when updating the indexed profile format.
struct Header {
uint64_t Magic;
+ // The lower 32 bits specify the version of the indexed profile.
+ // The most significant 32 bits are reserved to specify the variant types of
+ // the profile.
uint64_t Version;
uint64_t Unused; // Becomes unused since version 4
uint64_t HashType;
+ // This field records the offset of this hash table's metadata (i.e., the
+ // number of buckets and entries), which follows right after the payload of
+ // the entire hash table.
uint64_t HashOffset;
uint64_t MemProfOffset;
uint64_t BinaryIdOffset;
@@ -1182,6 +1195,7 @@ namespace RawInstrProf {
// Version 7: Reorder binary id and include version in signature.
// Version 8: Use relative counter pointer.
// Version 9: Added relative bitmap bytes pointer and count used by MC/DC.
+// Version 10: Added vtable, a new type of value profile data.
const uint64_t Version = INSTR_PROF_RAW_VERSION;
template <class IntPtrT> inline uint64_t getMagic();
diff --git a/llvm/include/llvm/ProfileData/InstrProfWriter.h b/llvm/include/llvm/ProfileData/InstrProfWriter.h
index f70574d1f756..d2156c867872 100644
--- a/llvm/include/llvm/ProfileData/InstrProfWriter.h
+++ b/llvm/include/llvm/ProfileData/InstrProfWriter.h
@@ -75,11 +75,14 @@ private:
// deployment of newer versions of llvm-profdata.
bool WritePrevVersion = false;
+ // The MemProf version we should write.
+ memprof::IndexedVersion MemProfVersionRequested;
+
public:
- InstrProfWriter(bool Sparse = false,
- uint64_t TemporalProfTraceReservoirSize = 0,
- uint64_t MaxTemporalProfTraceLength = 0,
- bool WritePrevVersion = false);
+ InstrProfWriter(
+ bool Sparse = false, uint64_t TemporalProfTraceReservoirSize = 0,
+ uint64_t MaxTemporalProfTraceLength = 0, bool WritePrevVersion = false,
+ memprof::IndexedVersion MemProfVersionRequested = memprof::Version0);
~InstrProfWriter();
StringMap<ProfilingData> &getProfileData() { return FunctionData; }
diff --git a/llvm/include/llvm/ProfileData/MemProf.h b/llvm/include/llvm/ProfileData/MemProf.h
index 37c19094bc2a..ff00900a1466 100644
--- a/llvm/include/llvm/ProfileData/MemProf.h
+++ b/llvm/include/llvm/ProfileData/MemProf.h
@@ -1,6 +1,7 @@
#ifndef LLVM_PROFILEDATA_MEMPROF_H_
#define LLVM_PROFILEDATA_MEMPROF_H_
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/GlobalValue.h"
@@ -15,6 +16,20 @@
namespace llvm {
namespace memprof {
+// The versions of the indexed MemProf format
+enum IndexedVersion : uint64_t {
+ // Version 0: This version didn't have a version field.
+ Version0 = 0,
+ // Version 1: Added a version field to the header.
+ Version1 = 1,
+};
+
+constexpr uint64_t MinimumSupportedVersion = Version0;
+constexpr uint64_t MaximumSupportedVersion = Version1;
+
+// Verify that the minimum and maximum satisfy the obvious constraint.
+static_assert(MinimumSupportedVersion <= MaximumSupportedVersion);
+
enum class Meta : uint64_t {
Start = 0,
#define MIBEntryDef(NameTag, Name, Type) NameTag,
@@ -252,18 +267,26 @@ struct Frame {
}
};
+// A type representing the index into the table of call stacks.
+using CallStackId = uint64_t;
+
// Holds allocation information in a space efficient format where frames are
// represented using unique identifiers.
struct IndexedAllocationInfo {
// The dynamic calling context for the allocation in bottom-up (leaf-to-root)
// order. Frame contents are stored out-of-line.
+ // TODO: Remove once we fully transition to CSId.
llvm::SmallVector<FrameId> CallStack;
+ // Conceptually the same as above. We are going to keep both CallStack and
+ // CallStackId while we are transitioning from CallStack to CallStackId.
+ CallStackId CSId = 0;
// The statistics obtained from the runtime for the allocation.
PortableMemInfoBlock Info;
IndexedAllocationInfo() = default;
- IndexedAllocationInfo(ArrayRef<FrameId> CS, const MemInfoBlock &MB)
- : CallStack(CS.begin(), CS.end()), Info(MB) {}
+ IndexedAllocationInfo(ArrayRef<FrameId> CS, CallStackId CSId,
+ const MemInfoBlock &MB)
+ : CallStack(CS.begin(), CS.end()), CSId(CSId), Info(MB) {}
// Returns the size in bytes when this allocation info struct is serialized.
size_t serializedSize() const {
@@ -622,6 +645,21 @@ public:
return Frame::deserialize(D);
}
};
+
+// Compute a CallStackId for a given call stack.
+CallStackId hashCallStack(ArrayRef<FrameId> CS);
+
+// Verify that each CallStackId is computed with hashCallStack. This function
+// is intended to help transition from CallStack to CSId in
+// IndexedAllocationInfo.
+void verifyIndexedMemProfRecord(const IndexedMemProfRecord &Record);
+
+// Verify that each CallStackId is computed with hashCallStack. This function
+// is intended to help transition from CallStack to CSId in
+// IndexedAllocationInfo.
+void verifyFunctionProfileData(
+ const llvm::MapVector<GlobalValue::GUID, IndexedMemProfRecord>
+ &FunctionProfileData);
} // namespace memprof
} // namespace llvm
diff --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index 9738e742f7f1..539d157343fb 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -142,9 +142,8 @@ private:
std::optional<BPThreadPool> &TP) const;
/// Run bisection iterations
- void runIterations(const FunctionNodeRange Nodes, unsigned RecDepth,
- unsigned LeftBucket, unsigned RightBucket,
- std::mt19937 &RNG) const;
+ void runIterations(const FunctionNodeRange Nodes, unsigned LeftBucket,
+ unsigned RightBucket, std::mt19937 &RNG) const;
/// Run a bisection iteration to improve the optimization goal
/// \returns the total number of moved FunctionNodes
diff --git a/llvm/include/llvm/Support/DXILABI.h b/llvm/include/llvm/Support/DXILABI.h
index c1d81775b671..da4bea8fc46e 100644
--- a/llvm/include/llvm/Support/DXILABI.h
+++ b/llvm/include/llvm/Support/DXILABI.h
@@ -23,20 +23,20 @@ namespace llvm {
namespace dxil {
enum class ParameterKind : uint8_t {
- INVALID = 0,
- VOID,
- HALF,
- FLOAT,
- DOUBLE,
+ Invalid = 0,
+ Void,
+ Half,
+ Float,
+ Double,
I1,
I8,
I16,
I32,
I64,
- OVERLOAD,
- CBUFFER_RET,
- RESOURCE_RET,
- DXIL_HANDLE,
+ Overload,
+ CBufferRet,
+ ResourceRet,
+ DXILHandle,
};
/// The kind of resource for an SRV or UAV resource. Sometimes referred to as
diff --git a/llvm/include/llvm/Support/FormattedStream.h b/llvm/include/llvm/Support/FormattedStream.h
index 5f937cfa7984..850a18dbb941 100644
--- a/llvm/include/llvm/Support/FormattedStream.h
+++ b/llvm/include/llvm/Support/FormattedStream.h
@@ -52,6 +52,10 @@ class formatted_raw_ostream : public raw_ostream {
/// have the rest of it.
SmallString<4> PartialUTF8Char;
+ /// DisableScan - Temporarily disable scanning of output. Used to ignore color
+ /// codes.
+ bool DisableScan;
+
void write_impl(const char *Ptr, size_t Size) override;
/// current_pos - Return the current position within the stream,
@@ -89,9 +93,33 @@ class formatted_raw_ostream : public raw_ostream {
SetUnbuffered();
TheStream->SetUnbuffered();
+ enable_colors(TheStream->colors_enabled());
+
Scanned = nullptr;
}
+ void PreDisableScan() {
+ assert(!DisableScan);
+ ComputePosition(getBufferStart(), GetNumBytesInBuffer());
+ assert(PartialUTF8Char.empty());
+ DisableScan = true;
+ }
+
+ void PostDisableScan() {
+ assert(DisableScan);
+ DisableScan = false;
+ Scanned = getBufferStart() + GetNumBytesInBuffer();
+ }
+
+ struct DisableScanScope {
+ formatted_raw_ostream *S;
+
+ DisableScanScope(formatted_raw_ostream *FRO) : S(FRO) {
+ S->PreDisableScan();
+ }
+ ~DisableScanScope() { S->PostDisableScan(); }
+ };
+
public:
/// formatted_raw_ostream - Open the specified file for
/// writing. If an error occurs, information about the error is
@@ -104,12 +132,12 @@ public:
/// underneath it.
///
formatted_raw_ostream(raw_ostream &Stream)
- : TheStream(nullptr), Position(0, 0) {
+ : TheStream(nullptr), Position(0, 0), DisableScan(false) {
setStream(Stream);
}
- explicit formatted_raw_ostream() : TheStream(nullptr), Position(0, 0) {
- Scanned = nullptr;
- }
+ explicit formatted_raw_ostream()
+ : TheStream(nullptr), Position(0, 0), Scanned(nullptr),
+ DisableScan(false) {}
~formatted_raw_ostream() override {
flush();
@@ -136,17 +164,26 @@ public:
}
raw_ostream &resetColor() override {
- TheStream->resetColor();
+ if (colors_enabled()) {
+ DisableScanScope S(this);
+ raw_ostream::resetColor();
+ }
return *this;
}
raw_ostream &reverseColor() override {
- TheStream->reverseColor();
+ if (colors_enabled()) {
+ DisableScanScope S(this);
+ raw_ostream::reverseColor();
+ }
return *this;
}
raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override {
- TheStream->changeColor(Color, Bold, BG);
+ if (colors_enabled()) {
+ DisableScanScope S(this);
+ raw_ostream::changeColor(Color, Bold, BG);
+ }
return *this;
}
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 899eaad5842a..5765926d6d93 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -837,6 +837,11 @@ HANDLE_TARGET_OPCODE(G_MEMMOVE)
HANDLE_TARGET_OPCODE(G_MEMSET)
HANDLE_TARGET_OPCODE(G_BZERO)
+/// llvm.trap, llvm.debugtrap and llvm.ubsantrap intrinsics
+HANDLE_TARGET_OPCODE(G_TRAP)
+HANDLE_TARGET_OPCODE(G_DEBUGTRAP)
+HANDLE_TARGET_OPCODE(G_UBSANTRAP)
+
/// Vector reductions
HANDLE_TARGET_OPCODE(G_VECREDUCE_SEQ_FADD)
HANDLE_TARGET_OPCODE(G_VECREDUCE_SEQ_FMUL)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 67d405ba96fa..d0f471eb29b6 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1576,6 +1576,28 @@ def G_BZERO : GenericInstruction {
}
//------------------------------------------------------------------------------
+// Trap intrinsics
+//------------------------------------------------------------------------------
+def G_TRAP : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins);
+ let hasSideEffects = true;
+ let mayStore = true;
+}
+
+def G_DEBUGTRAP : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins);
+ let hasSideEffects = true;
+}
+
+def G_UBSANTRAP : GenericInstruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i8imm:$kind);
+ let hasSideEffects = true;
+}
+
+//------------------------------------------------------------------------------
// Bitfield extraction.
//------------------------------------------------------------------------------
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 6980cbd04aeb..72d3c0ea69bc 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -179,6 +179,7 @@ def FmArcp : MIFlagEnum<"FmArcp">;
def FmContract : MIFlagEnum<"FmContract">;
def FmAfn : MIFlagEnum<"FmAfn">;
def FmReassoc : MIFlagEnum<"FmReassoc">;
+def IsExact : MIFlagEnum<"IsExact">;
def MIFlags;
// def not; -> Already defined as a SDNode
@@ -1036,7 +1037,20 @@ def sdiv_by_const : GICombineRule<
[{ return Helper.matchSDivByConst(*${root}); }]),
(apply [{ Helper.applySDivByConst(*${root}); }])>;
-def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;
+def sdiv_by_pow2 : GICombineRule<
+ (defs root:$root),
+ (match (G_SDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
+ [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/true); }]),
+ (apply [{ Helper.applySDivByPow2(*${root}); }])>;
+
+def udiv_by_pow2 : GICombineRule<
+ (defs root:$root),
+ (match (G_UDIV $dst, $x, $y, (MIFlags (not IsExact))):$root,
+ [{ return Helper.matchDivByPow2(*${root}, /*IsSigned=*/false); }]),
+ (apply [{ Helper.applyUDivByPow2(*${root}); }])>;
+
+def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const,
+ sdiv_by_pow2, udiv_by_pow2]>;
def reassoc_ptradd : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index b1f3c500a1b6..3208c63fb42d 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -90,6 +90,7 @@ def : GINodeEquiv<G_UDIVFIX, udivfix>;
def : GINodeEquiv<G_SDIVFIXSAT, sdivfixsat>;
def : GINodeEquiv<G_UDIVFIXSAT, udivfixsat>;
def : GINodeEquiv<G_SELECT, select>;
+def : GINodeEquiv<G_SELECT, vselect>;
def : GINodeEquiv<G_FNEG, fneg>;
def : GINodeEquiv<G_FPEXT, fpextend>;
def : GINodeEquiv<G_FPTRUNC, fpround>;
@@ -250,6 +251,9 @@ def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap>;
def : GINodeEquiv<G_FENCE, atomic_fence>;
def : GINodeEquiv<G_PREFETCH, prefetch>;
+def : GINodeEquiv<G_TRAP, trap>;
+def : GINodeEquiv<G_DEBUGTRAP, debugtrap>;
+def : GINodeEquiv<G_UBSANTRAP, ubsantrap>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
// Should be used on defs that subclass GIComplexOperandMatcher<>.
diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index cb1c0ed2513d..1f7dc6922f13 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -83,10 +83,27 @@ class RegInfoByHwMode<list<HwMode> Ms = [], list<RegInfo> Ts = []>
list<RegInfo> Objects = Ts;
}
+class SubRegRange<int size, int offset = 0> {
+ int Size = size; // Sub register size in bits.
+ int Offset = offset; // Offset of the first bit of the sub-reg index.
+}
+
+class SubRegRangeByHwMode<list<HwMode> Ms = [], list<SubRegRange> Ts = []>
+ : HwModeSelect<Ms> {
+ // The length of this list must be the same as the length of Ms.
+ list<SubRegRange> Objects = Ts;
+}
+
// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
class SubRegIndex<int size, int offset = 0> {
string Namespace = "";
+ // The size/offset information, parameterized by a HW mode.
+ // If the HwModes provided for SubRegRanges does not include the DefaultMode,
+ // the/ Size and Offset fields below will be used for the default. Otherwise,
+ // the Size and Offset fields are ignored.
+ SubRegRangeByHwMode SubRegRanges;
+
// Size - Size (in bits) of the sub-registers represented by this index.
int Size = size;
diff --git a/llvm/include/llvm/TextAPI/DylibReader.h b/llvm/include/llvm/TextAPI/DylibReader.h
index b556fbf6832a..6861d3cb1591 100644
--- a/llvm/include/llvm/TextAPI/DylibReader.h
+++ b/llvm/include/llvm/TextAPI/DylibReader.h
@@ -13,6 +13,7 @@
#ifndef LLVM_TEXTAPI_DYLIBREADER_H
#define LLVM_TEXTAPI_DYLIBREADER_H
+#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TextAPI/ArchitectureSet.h"
@@ -43,6 +44,14 @@ Expected<Records> readFile(MemoryBufferRef Buffer, const ParseOption &Opt);
/// \param Buffer Data that points to dylib.
Expected<std::unique_ptr<InterfaceFile>> get(MemoryBufferRef Buffer);
+using SymbolToSourceLocMap = llvm::StringMap<RecordLoc>;
+/// Get the source location for each symbol from dylib.
+///
+/// \param DSYM Path to DSYM file.
+/// \param T Requested target slice for dylib.
+SymbolToSourceLocMap accumulateSourceLocFromDSYM(const StringRef DSYM,
+ const Target &T);
+
} // namespace llvm::MachO::DylibReader
#endif // LLVM_TEXTAPI_DYLIBREADER_H
diff --git a/llvm/include/llvm/TextAPI/Record.h b/llvm/include/llvm/TextAPI/Record.h
index ef152ce43387..7d721988ec3d 100644
--- a/llvm/include/llvm/TextAPI/Record.h
+++ b/llvm/include/llvm/TextAPI/Record.h
@@ -27,6 +27,23 @@ LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
class RecordsSlice;
+// Defines lightweight source location for records.
+struct RecordLoc {
+ RecordLoc() = default;
+ RecordLoc(std::string File, unsigned Line)
+ : File(std::move(File)), Line(Line) {}
+
+ /// Whether there is source location tied to the RecordLoc object.
+ bool isValid() const { return !File.empty(); }
+
+ bool operator==(const RecordLoc &O) const {
+ return std::tie(File, Line) == std::tie(O.File, O.Line);
+ }
+
+ const std::string File;
+ const unsigned Line = 0;
+};
+
// Defines a list of linkage types.
enum class RecordLinkage : uint8_t {
// Unknown linkage.
diff --git a/llvm/include/llvm/TextAPI/Utils.h b/llvm/include/llvm/TextAPI/Utils.h
index 31d3c45f9e29..319e0abce25b 100644
--- a/llvm/include/llvm/TextAPI/Utils.h
+++ b/llvm/include/llvm/TextAPI/Utils.h
@@ -14,8 +14,10 @@
#define LLVM_TEXTAPI_UTILS_H
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
#if !defined(PATH_MAX)
#define PATH_MAX 1024
@@ -68,5 +70,10 @@ std::error_code make_relative(StringRef From, StringRef To,
/// \param IsSymLink Whether path points to a symlink.
bool isPrivateLibrary(StringRef Path, bool IsSymLink = false);
+/// Create a regex rule from provided glob string.
+/// \param Glob String that represents glob input.
+/// \return The equivalent regex rule.
+llvm::Expected<llvm::Regex> createRegexFromGlob(llvm::StringRef Glob);
+
} // namespace llvm::MachO
#endif // LLVM_TEXTAPI_UTILS_H
diff --git a/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h b/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h
new file mode 100644
index 000000000000..7ae6194da7c9
--- /dev/null
+++ b/llvm/include/llvm/Transforms/IPO/SampleProfileMatcher.h
@@ -0,0 +1,154 @@
+//===- Transforms/IPO/SampleProfileMatcher.h ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file provides the interface for SampleProfileMatcher.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILEMATCHER_H
+#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILEMATCHER_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h"
+
+namespace llvm {
+
+// Sample profile matching - fuzzy match.
+class SampleProfileMatcher {
+ Module &M;
+ SampleProfileReader &Reader;
+ const PseudoProbeManager *ProbeManager;
+ const ThinOrFullLTOPhase LTOPhase;
+ SampleProfileMap FlattenedProfiles;
+ // For each function, the matcher generates a map, of which each entry is a
+ // mapping from the source location of current build to the source location in
+ // the profile.
+ StringMap<LocToLocMap> FuncMappings;
+
+ // Match state for an anchor/callsite.
+ enum class MatchState {
+ Unknown = 0,
+ // Initial match between input profile and current IR.
+ InitialMatch = 1,
+ // Initial mismatch between input profile and current IR.
+ InitialMismatch = 2,
+ // InitialMatch stays matched after fuzzy profile matching.
+ UnchangedMatch = 3,
+ // InitialMismatch stays mismatched after fuzzy profile matching.
+ UnchangedMismatch = 4,
+ // InitialMismatch is recovered after fuzzy profile matching.
+ RecoveredMismatch = 5,
+ // InitialMatch is removed and becomes mismatched after fuzzy profile
+ // matching.
+ RemovedMatch = 6,
+ };
+
+ // For each function, store every callsite and its matching state into this
+ // map, of which each entry is a pair of callsite location and MatchState.
+ // This is used for profile staleness computation and report.
+ StringMap<std::unordered_map<LineLocation, MatchState, LineLocationHash>>
+ FuncCallsiteMatchStates;
+
+ // Profile mismatch statstics:
+ uint64_t TotalProfiledFunc = 0;
+ // Num of checksum-mismatched function.
+ uint64_t NumStaleProfileFunc = 0;
+ uint64_t TotalProfiledCallsites = 0;
+ uint64_t NumMismatchedCallsites = 0;
+ uint64_t NumRecoveredCallsites = 0;
+ // Total samples for all profiled functions.
+ uint64_t TotalFunctionSamples = 0;
+ // Total samples for all checksum-mismatched functions.
+ uint64_t MismatchedFunctionSamples = 0;
+ uint64_t MismatchedCallsiteSamples = 0;
+ uint64_t RecoveredCallsiteSamples = 0;
+
+ // A dummy name for unknown indirect callee, used to differentiate from a
+ // non-call instruction that also has an empty callee name.
+ static constexpr const char *UnknownIndirectCallee =
+ "unknown.indirect.callee";
+
+public:
+ SampleProfileMatcher(Module &M, SampleProfileReader &Reader,
+ const PseudoProbeManager *ProbeManager,
+ ThinOrFullLTOPhase LTOPhase)
+ : M(M), Reader(Reader), ProbeManager(ProbeManager), LTOPhase(LTOPhase){};
+ void runOnModule();
+ void clearMatchingData() {
+ // Do not clear FuncMappings, it stores IRLoc to ProfLoc remappings which
+ // will be used for sample loader.
+ FuncCallsiteMatchStates.clear();
+ }
+
+private:
+ FunctionSamples *getFlattenedSamplesFor(const Function &F) {
+ StringRef CanonFName = FunctionSamples::getCanonicalFnName(F);
+ auto It = FlattenedProfiles.find(FunctionId(CanonFName));
+ if (It != FlattenedProfiles.end())
+ return &It->second;
+ return nullptr;
+ }
+ void runOnFunction(Function &F);
+ void findIRAnchors(const Function &F,
+ std::map<LineLocation, StringRef> &IRAnchors);
+ void findProfileAnchors(
+ const FunctionSamples &FS,
+ std::map<LineLocation, std::unordered_set<FunctionId>> &ProfileAnchors);
+ // Record the callsite match states for profile staleness report, the result
+ // is saved in FuncCallsiteMatchStates.
+ void recordCallsiteMatchStates(
+ const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
+ const std::map<LineLocation, std::unordered_set<FunctionId>>
+ &ProfileAnchors,
+ const LocToLocMap *IRToProfileLocationMap);
+
+ bool isMismatchState(const enum MatchState &State) {
+ return State == MatchState::InitialMismatch ||
+ State == MatchState::UnchangedMismatch ||
+ State == MatchState::RemovedMatch;
+ };
+
+ bool isInitialState(const enum MatchState &State) {
+ return State == MatchState::InitialMatch ||
+ State == MatchState::InitialMismatch;
+ };
+
+ bool isFinalState(const enum MatchState &State) {
+ return State == MatchState::UnchangedMatch ||
+ State == MatchState::UnchangedMismatch ||
+ State == MatchState::RecoveredMismatch ||
+ State == MatchState::RemovedMatch;
+ };
+
+ // Count the samples of checksum mismatched function for the top-level
+ // function and all inlinees.
+ void countMismatchedFuncSamples(const FunctionSamples &FS, bool IsTopLevel);
+ // Count the number of mismatched or recovered callsites.
+ void countMismatchCallsites(const FunctionSamples &FS);
+ // Count the samples of mismatched or recovered callsites for top-level
+ // function and all inlinees.
+ void countMismatchedCallsiteSamples(const FunctionSamples &FS);
+ void computeAndReportProfileStaleness();
+
+ LocToLocMap &getIRToProfileLocationMap(const Function &F) {
+ auto Ret = FuncMappings.try_emplace(
+ FunctionSamples::getCanonicalFnName(F.getName()), LocToLocMap());
+ return Ret.first->second;
+ }
+ void distributeIRToProfileLocationMap();
+ void distributeIRToProfileLocationMap(FunctionSamples &FS);
+ void runStaleProfileMatching(
+ const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
+ const std::map<LineLocation, std::unordered_set<FunctionId>>
+ &ProfileAnchors,
+ LocToLocMap &IRToProfileLocationMap);
+ void reportOrPersistProfileStats();
+};
+} // end namespace llvm
+#endif // LLVM_TRANSFORMS_IPO_SAMPLEPROFILEMATCHER_H
diff --git a/llvm/include/llvm/Transforms/Scalar/Float2Int.h b/llvm/include/llvm/Transforms/Scalar/Float2Int.h
index 83be329bed60..337e229efcf3 100644
--- a/llvm/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/llvm/include/llvm/Transforms/Scalar/Float2Int.h
@@ -44,7 +44,7 @@ private:
std::optional<ConstantRange> calcRange(Instruction *I);
void walkBackwards();
void walkForwards();
- bool validateAndTransform();
+ bool validateAndTransform(const DataLayout &DL);
Value *convert(Instruction *I, Type *ToTy);
void cleanup();
diff --git a/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
index 27b34ef023db..333ed6774d6c 100644
--- a/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -249,7 +249,7 @@ public:
Instruction *Addr, BasicBlock *ExitBlock) const;
void severSplitPHINodesOfEntry(BasicBlock *&Header);
- void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
+ void severSplitPHINodesOfExits(const SetVector<BasicBlock *> &Exits);
void splitReturnBlocks();
Function *constructFunction(const ValueSet &inputs,
diff --git a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
index 0a0e16d2a9e6..fb3ab33a0629 100644
--- a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
+++ b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
@@ -84,6 +84,7 @@ bool isLifetimeIntrinsic(Value *V);
Value *readRegister(IRBuilder<> &IRB, StringRef Name);
Value *getFP(IRBuilder<> &IRB);
Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB);
+Value *getAndroidSlotPtr(IRBuilder<> &IRB, int Slot);
} // namespace memtag
} // namespace llvm
diff --git a/llvm/include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h b/llvm/include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h
index 66814d395273..d898ee58307e 100644
--- a/llvm/include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h
+++ b/llvm/include/llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h
@@ -86,9 +86,12 @@ template <> struct IRTraits<BasicBlock> {
// SampleProfileProber.
class PseudoProbeManager {
DenseMap<uint64_t, PseudoProbeDescriptor> GUIDToProbeDescMap;
+ const ThinOrFullLTOPhase LTOPhase;
public:
- PseudoProbeManager(const Module &M) {
+ PseudoProbeManager(const Module &M,
+ ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
+ : LTOPhase(LTOPhase) {
if (NamedMDNode *FuncInfo =
M.getNamedMetadata(PseudoProbeDescMetadataName)) {
for (const auto *Operand : FuncInfo->operands()) {
@@ -126,17 +129,16 @@ public:
bool profileIsValid(const Function &F, const FunctionSamples &Samples) const {
const auto *Desc = getDesc(F);
- if (!Desc) {
- LLVM_DEBUG(dbgs() << "Probe descriptor missing for Function "
- << F.getName() << "\n");
- return false;
- }
- if (Desc->getFunctionHash() != Samples.getFunctionHash()) {
- LLVM_DEBUG(dbgs() << "Hash mismatch for Function " << F.getName()
- << "\n");
- return false;
- }
- return true;
+ assert((LTOPhase != ThinOrFullLTOPhase::ThinLTOPostLink || !Desc ||
+ profileIsHashMismatched(*Desc, Samples) ==
+ F.hasFnAttribute("profile-checksum-mismatch")) &&
+ "In post-link, profile checksum matching state doesn't match "
+ "function 'profile-checksum-mismatch' attribute.");
+ (void)LTOPhase;
+ // The desc for import function is unavailable. Check the function attribute
+ // for mismatch.
+ return (!Desc && !F.hasFnAttribute("profile-checksum-mismatch")) ||
+ (Desc && !profileIsHashMismatched(*Desc, Samples));
}
};
@@ -144,6 +146,10 @@ public:
extern cl::opt<bool> SampleProfileUseProfi;
+static inline bool skipProfileForFunction(const Function &F) {
+ return F.isDeclaration() || !F.hasFnAttribute("use-sample-profile");
+}
+
template <typename FT> class SampleProfileLoaderBaseImpl {
public:
SampleProfileLoaderBaseImpl(std::string Name, std::string RemapName,
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 6139b5be85be..749374a3aa48 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -751,7 +751,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
const DataLayout &DL) {
APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
- return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
+ return ConstantFoldLoadFromConstPtr(C, Ty, std::move(Offset), DL);
}
Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp
index e55eaa55f8e9..c75460f44c1d 100644
--- a/llvm/lib/Analysis/InlineCost.cpp
+++ b/llvm/lib/Analysis/InlineCost.cpp
@@ -800,7 +800,7 @@ class InlineCostCallAnalyzer final : public CallAnalyzer {
return false;
} else {
// Otherwise, require instrumentation profile.
- if (!(PSI->hasInstrumentationProfile() || PSI->hasSampleProfile()))
+ if (!PSI->hasInstrumentationProfile())
return false;
}
diff --git a/llvm/lib/Analysis/InlineOrder.cpp b/llvm/lib/Analysis/InlineOrder.cpp
index 09fc4f9a00f4..f156daa2f126 100644
--- a/llvm/lib/Analysis/InlineOrder.cpp
+++ b/llvm/lib/Analysis/InlineOrder.cpp
@@ -114,7 +114,10 @@ public:
CostBenefitPriority(const CallBase *CB, FunctionAnalysisManager &FAM,
const InlineParams &Params) {
auto IC = getInlineCostWrapper(const_cast<CallBase &>(*CB), FAM, Params);
- Cost = IC.getCost();
+ if (IC.isVariable())
+ Cost = IC.getCost();
+ else
+ Cost = IC.isNever() ? INT_MAX : INT_MIN;
StaticBonusApplied = IC.getStaticBonusApplied();
CostBenefit = IC.getCostBenefit();
}
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 7a37ae86c7f3..9ff3faff7990 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -6115,7 +6115,8 @@ static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
if (OffsetInt.srem(4) != 0)
return nullptr;
- Constant *Loaded = ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, OffsetInt, DL);
+ Constant *Loaded =
+ ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
if (!Loaded)
return nullptr;
@@ -6983,7 +6984,8 @@ Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
if (PtrOp == GV) {
// Index size may have changed due to address space casts.
Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
- return ConstantFoldLoadFromConstPtr(GV, LI->getType(), Offset, Q.DL);
+ return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
+ Q.DL);
}
return nullptr;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 9ae31d165235..b8bc81197c95 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1069,14 +1069,14 @@ static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
// Handle range checking idiom produced by InstCombine. We will subtract the
// offset from the allowed range for RHS in this case.
const APInt *C;
- if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
+ if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
Offset = *C;
return true;
}
// Handle the symmetric case. This appears in saturation patterns like
// (x == 16) ? 16 : (x + 1).
- if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
+ if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
Offset = -*C;
return true;
}
diff --git a/llvm/lib/Analysis/ReplayInlineAdvisor.cpp b/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
index 0814483db343..2ca02eb17417 100644
--- a/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/ReplayInlineAdvisor.cpp
@@ -43,8 +43,8 @@ ReplayInlineAdvisor::ReplayInlineAdvisor(
// main:3:1.1;
// We use the callsite string after `at callsite` to replay inlining.
line_iterator LineIt(*BufferOrErr.get(), /*SkipBlanks=*/true);
- static const std::string PositiveRemark = "' inlined into '";
- static const std::string NegativeRemark = "' will not be inlined into '";
+ const std::string PositiveRemark = "' inlined into '";
+ const std::string NegativeRemark = "' will not be inlined into '";
for (; !LineIt.is_at_eof(); ++LineIt) {
StringRef Line = *LineIt;
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index c8195584ade3..9e17dcaa5592 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -1190,107 +1190,113 @@ void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
llvm::sort(ScalarDescs, compareByVectorFnName);
}
+static const VecDesc VecFuncs_Accelerate[] = {
+#define TLI_DEFINE_ACCELERATE_VECFUNCS
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_DarwinLibSystemM[] = {
+#define TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_LIBMVEC_X86[] = {
+#define TLI_DEFINE_LIBMVEC_X86_VECFUNCS
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_MASSV[] = {
+#define TLI_DEFINE_MASSV_VECFUNCS
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_SVML[] = {
+#define TLI_DEFINE_SVML_VECFUNCS
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_SLEEFGNUABI_VF2[] = {
+#define TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
+ {SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+};
+static const VecDesc VecFuncs_SLEEFGNUABI_VF4[] = {
+#define TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
+ {SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+};
+static const VecDesc VecFuncs_SLEEFGNUABI_VFScalable[] = {
+#define TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
+ {SCAL, VEC, VF, MASK, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+static const VecDesc VecFuncs_ArmPL[] = {
+#define TLI_DEFINE_ARMPL_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
+ {SCAL, VEC, VF, MASK, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+};
+
+const VecDesc VecFuncs_AMDLIBM[] = {
+#define TLI_DEFINE_AMDLIBM_VECFUNCS
+#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
+ {SCAL, VEC, VF, MASK, VABI_PREFIX},
+#include "llvm/Analysis/VecFuncs.def"
+};
+
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
enum VectorLibrary VecLib, const llvm::Triple &TargetTriple) {
switch (VecLib) {
case Accelerate: {
- const VecDesc VecFuncs[] = {
- #define TLI_DEFINE_ACCELERATE_VECFUNCS
- #include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_Accelerate);
break;
}
case DarwinLibSystemM: {
- const VecDesc VecFuncs[] = {
- #define TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
- #include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_DarwinLibSystemM);
break;
}
case LIBMVEC_X86: {
- const VecDesc VecFuncs[] = {
- #define TLI_DEFINE_LIBMVEC_X86_VECFUNCS
- #include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_LIBMVEC_X86);
break;
}
case MASSV: {
- const VecDesc VecFuncs[] = {
- #define TLI_DEFINE_MASSV_VECFUNCS
- #include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_MASSV);
break;
}
case SVML: {
- const VecDesc VecFuncs[] = {
- #define TLI_DEFINE_SVML_VECFUNCS
- #include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_SVML);
break;
}
case SLEEFGNUABI: {
- const VecDesc VecFuncs_VF2[] = {
-#define TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
- {SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
-#include "llvm/Analysis/VecFuncs.def"
- };
- const VecDesc VecFuncs_VF4[] = {
-#define TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, VABI_PREFIX) \
- {SCAL, VEC, VF, /* MASK = */ false, VABI_PREFIX},
-#include "llvm/Analysis/VecFuncs.def"
- };
- const VecDesc VecFuncs_VFScalable[] = {
-#define TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
- {SCAL, VEC, VF, MASK, VABI_PREFIX},
-#include "llvm/Analysis/VecFuncs.def"
- };
-
switch (TargetTriple.getArch()) {
default:
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- addVectorizableFunctions(VecFuncs_VF2);
- addVectorizableFunctions(VecFuncs_VF4);
- addVectorizableFunctions(VecFuncs_VFScalable);
+ addVectorizableFunctions(VecFuncs_SLEEFGNUABI_VF2);
+ addVectorizableFunctions(VecFuncs_SLEEFGNUABI_VF4);
+ addVectorizableFunctions(VecFuncs_SLEEFGNUABI_VFScalable);
break;
}
break;
}
case ArmPL: {
- const VecDesc VecFuncs[] = {
-#define TLI_DEFINE_ARMPL_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
- {SCAL, VEC, VF, MASK, VABI_PREFIX},
-#include "llvm/Analysis/VecFuncs.def"
- };
-
switch (TargetTriple.getArch()) {
default:
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_ArmPL);
break;
}
break;
}
case AMDLIBM: {
- const VecDesc VecFuncs[] = {
-#define TLI_DEFINE_AMDLIBM_VECFUNCS
-#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF, MASK, VABI_PREFIX) \
- {SCAL, VEC, VF, MASK, VABI_PREFIX},
-#include "llvm/Analysis/VecFuncs.def"
- };
- addVectorizableFunctions(VecFuncs);
+ addVectorizableFunctions(VecFuncs_AMDLIBM);
break;
}
case NoLibrary:
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 797665cf06c8..b5e8a1d22f26 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -699,7 +699,7 @@ static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred,
}
default:
const APInt *Offset = nullptr;
- if (match(LHS, m_CombineOr(m_V, m_Add(m_V, m_APInt(Offset)))) &&
+ if (match(LHS, m_CombineOr(m_V, m_AddLike(m_V, m_APInt(Offset)))) &&
match(RHS, m_APInt(C))) {
ConstantRange LHSRange = ConstantRange::makeAllowedICmpRegion(Pred, *C);
if (Offset)
@@ -9285,7 +9285,7 @@ void llvm::findValuesAffectedByCondition(
} else {
// Handle (A + C1) u< C2, which is the canonical form of
// A > C3 && A < C4.
- if (match(A, m_Add(m_Value(X), m_ConstantInt())) &&
+ if (match(A, m_AddLike(m_Value(X), m_ConstantInt())) &&
match(B, m_ConstantInt()))
AddAffected(X);
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp
index 02f64fcfac4f..2301a27731ea 100644
--- a/llvm/lib/AsmParser/LLLexer.cpp
+++ b/llvm/lib/AsmParser/LLLexer.cpp
@@ -640,6 +640,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(tailcc);
KEYWORD(m68k_rtdcc);
KEYWORD(graalcc);
+ KEYWORD(riscv_vector_cc);
KEYWORD(cc);
KEYWORD(c);
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index f0be021668af..fe49e52ae428 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -2143,6 +2143,7 @@ void LLParser::parseOptionalDLLStorageClass(unsigned &Res) {
/// ::= 'tailcc'
/// ::= 'm68k_rtdcc'
/// ::= 'graalcc'
+/// ::= 'riscv_vector_cc'
/// ::= 'cc' UINT
///
bool LLParser::parseOptionalCallingConv(unsigned &CC) {
@@ -2213,6 +2214,9 @@ bool LLParser::parseOptionalCallingConv(unsigned &CC) {
case lltok::kw_tailcc: CC = CallingConv::Tail; break;
case lltok::kw_m68k_rtdcc: CC = CallingConv::M68k_RTD; break;
case lltok::kw_graalcc: CC = CallingConv::GRAAL; break;
+ case lltok::kw_riscv_vector_cc:
+ CC = CallingConv::RISCV_VectorCall;
+ break;
case lltok::kw_cc: {
Lex.Lex();
return parseUInt32(CC);
@@ -6810,7 +6814,19 @@ int LLParser::parseInstruction(Instruction *&Inst, BasicBlock *BB,
Inst->setNonNeg();
return 0;
}
- case lltok::kw_trunc:
+ case lltok::kw_trunc: {
+ bool NUW = EatIfPresent(lltok::kw_nuw);
+ bool NSW = EatIfPresent(lltok::kw_nsw);
+ if (!NUW)
+ NUW = EatIfPresent(lltok::kw_nuw);
+ if (parseCast(Inst, PFS, KeywordVal))
+ return true;
+ if (NUW)
+ cast<TruncInst>(Inst)->setHasNoUnsignedWrap(true);
+ if (NSW)
+ cast<TruncInst>(Inst)->setHasNoSignedWrap(true);
+ return false;
+ }
case lltok::kw_sext:
case lltok::kw_fptrunc:
case lltok::kw_fpext:
@@ -8387,7 +8403,7 @@ int LLParser::parseInsertValue(Instruction *&Inst, PerFunctionState &PFS) {
/// parseMDNodeVector
/// ::= { Element (',' Element)* }
/// Element
-/// ::= 'null' | TypeAndValue
+/// ::= 'null' | Metadata
bool LLParser::parseMDNodeVector(SmallVectorImpl<Metadata *> &Elts) {
if (parseToken(lltok::lbrace, "expected '{' here"))
return true;
@@ -8397,7 +8413,6 @@ bool LLParser::parseMDNodeVector(SmallVectorImpl<Metadata *> &Elts) {
return false;
do {
- // Null is a special case since it is typeless.
if (EatIfPresent(lltok::kw_null)) {
Elts.push_back(nullptr);
continue;
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 3fc8141381c6..aa6c9c95ca24 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -104,7 +104,7 @@ static cl::opt<bool> ExpandConstantExprs(
/// of debug intrinsics). UNSET is treated as FALSE, so the default action
/// is to do nothing. Individual tools can override this to incrementally add
/// support for the RemoveDIs format.
-cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInforFormat(
+cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat(
"load-bitcode-into-experimental-debuginfo-iterators", cl::Hidden,
cl::desc("Load bitcode directly into the new debug info format (regardless "
"of input format)"));
@@ -4300,11 +4300,11 @@ Error BitcodeReader::parseGlobalIndirectSymbolRecord(
Error BitcodeReader::parseModule(uint64_t ResumeBit,
bool ShouldLazyLoadMetadata,
ParserCallbacks Callbacks) {
- // Load directly into RemoveDIs format if LoadBitcodeIntoNewDbgInforFormat
+ // Load directly into RemoveDIs format if LoadBitcodeIntoNewDbgInfoFormat
// has been set to true (default action: load into the old debug format).
TheModule->IsNewDbgInfoFormat =
UseNewDbgInfoFormat &&
- LoadBitcodeIntoNewDbgInforFormat == cl::boolOrDefault::BOU_TRUE;
+ LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_TRUE;
this->ValueTypeCallback = std::move(Callbacks.ValueType);
if (ResumeBit) {
@@ -5022,9 +5022,19 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
return error("Invalid cast");
I = CastInst::Create(CastOp, Op, ResTy);
}
- if (OpNum < Record.size() && isa<PossiblyNonNegInst>(I) &&
- (Record[OpNum] & (1 << bitc::PNNI_NON_NEG)))
- I->setNonNeg(true);
+
+ if (OpNum < Record.size()) {
+ if (Opc == Instruction::ZExt) {
+ if (Record[OpNum] & (1 << bitc::PNNI_NON_NEG))
+ cast<PossiblyNonNegInst>(I)->setNonNeg(true);
+ } else if (Opc == Instruction::Trunc) {
+ if (Record[OpNum] & (1 << bitc::TIO_NO_UNSIGNED_WRAP))
+ cast<TruncInst>(I)->setHasNoUnsignedWrap(true);
+ if (Record[OpNum] & (1 << bitc::TIO_NO_SIGNED_WRAP))
+ cast<TruncInst>(I)->setHasNoSignedWrap(true);
+ }
+ }
+
InstructionList.push_back(I);
break;
}
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index a8b69f89e7de..221eeaae6e2b 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -1640,6 +1640,11 @@ static uint64_t getOptimizationFlags(const Value *V) {
} else if (const auto *NNI = dyn_cast<PossiblyNonNegInst>(V)) {
if (NNI->hasNonNeg())
Flags |= 1 << bitc::PNNI_NON_NEG;
+ } else if (const auto *TI = dyn_cast<TruncInst>(V)) {
+ if (TI->hasNoSignedWrap())
+ Flags |= 1 << bitc::TIO_NO_SIGNED_WRAP;
+ if (TI->hasNoUnsignedWrap())
+ Flags |= 1 << bitc::TIO_NO_UNSIGNED_WRAP;
}
return Flags;
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 894285a7eb25..d5db79df6862 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -135,10 +135,13 @@ public:
// IRBuilder to be used for replacement atomic instructions.
struct ReplacementIRBuilder : IRBuilder<InstSimplifyFolder> {
// Preserves the DebugLoc from I, and preserves still valid metadata.
+ // Enable StrictFP builder mode when appropriate.
explicit ReplacementIRBuilder(Instruction *I, const DataLayout &DL)
: IRBuilder(I->getContext(), DL) {
SetInsertPoint(I);
this->CollectMetadataToCopy(I, {LLVMContext::MD_pcsections});
+ if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
+ this->setIsFPConstrained(true);
}
};
diff --git a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
index 308f13c19f75..4ec966e56d6e 100644
--- a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
+++ b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
@@ -175,9 +175,10 @@ static void expandFPToI(Instruction *FPToI) {
// if.end:
Builder.SetInsertPoint(IfEnd);
Value *Add1 = Builder.CreateAdd(
- And2, ConstantInt::getSigned(IntTy, -int64_t(ExponentBias + BitWidth)));
- Value *Cmp3 =
- Builder.CreateICmpULT(Add1, ConstantInt::getSigned(IntTy, -BitWidth));
+ And2, ConstantInt::getSigned(
+ IntTy, -static_cast<int64_t>(ExponentBias + BitWidth)));
+ Value *Cmp3 = Builder.CreateICmpULT(
+ Add1, ConstantInt::getSigned(IntTy, -static_cast<int64_t>(BitWidth)));
Builder.CreateCondBr(Cmp3, IfThen5, IfEnd9);
// if.then5:
@@ -203,8 +204,8 @@ static void expandFPToI(Instruction *FPToI) {
// if.else:
Builder.SetInsertPoint(IfElse);
Value *Sub15 = Builder.CreateAdd(
- And2,
- ConstantInt::getSigned(IntTy, -(ExponentBias + FPMantissaWidth)));
+ And2, ConstantInt::getSigned(
+ IntTy, -static_cast<int64_t>(ExponentBias + FPMantissaWidth)));
Value *Shl = Builder.CreateShl(Or, Sub15);
Value *Mul16 = Builder.CreateMul(Shl, Sign);
Builder.CreateBr(End);
diff --git a/llvm/lib/CodeGen/FinalizeISel.cpp b/llvm/lib/CodeGen/FinalizeISel.cpp
index 978355f8eb1b..bf967eac22f1 100644
--- a/llvm/lib/CodeGen/FinalizeISel.cpp
+++ b/llvm/lib/CodeGen/FinalizeISel.cpp
@@ -59,8 +59,7 @@ bool FinalizeISel::runOnMachineFunction(MachineFunction &MF) {
// Set AdjustsStack to true if the instruction selector emits a stack
// frame setup instruction or a stack aligning inlineasm.
- if (MI.getOpcode() == TII->getCallFrameSetupOpcode() ||
- MI.isStackAligningInlineAsm())
+ if (TII->isFrameInstr(MI) || MI.isStackAligningInlineAsm())
MF.getFrameInfo().setAdjustsStack(true);
// If MI is a pseudo, expand it.
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index 1869e0d41a51..551ba1e6036c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -174,6 +174,20 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
switch (Opc) {
default:
break;
+ case TargetOpcode::G_ICMP: {
+ assert(SrcOps.size() == 3 && "Invalid sources");
+ assert(DstOps.size() == 1 && "Invalid dsts");
+ LLT SrcTy = SrcOps[1].getLLTTy(*getMRI());
+
+ if (std::optional<SmallVector<APInt>> Cst =
+ ConstantFoldICmp(SrcOps[0].getPredicate(), SrcOps[1].getReg(),
+ SrcOps[2].getReg(), *getMRI())) {
+ if (SrcTy.isVector())
+ return buildBuildVectorConstant(DstOps[0], *Cst);
+ return buildConstant(DstOps[0], Cst->front());
+ }
+ break;
+ }
case TargetOpcode::G_ADD:
case TargetOpcode::G_PTR_ADD:
case TargetOpcode::G_AND:
@@ -256,10 +270,16 @@ MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
return buildFConstant(DstOps[0], *Cst);
break;
}
- case TargetOpcode::G_CTLZ: {
+ case TargetOpcode::G_CTLZ:
+ case TargetOpcode::G_CTTZ: {
assert(SrcOps.size() == 1 && "Expected one source");
assert(DstOps.size() == 1 && "Expected one dest");
- auto MaybeCsts = ConstantFoldCTLZ(SrcOps[0].getReg(), *getMRI());
+ std::function<unsigned(APInt)> CB;
+ if (Opc == TargetOpcode::G_CTLZ)
+ CB = [](APInt V) -> unsigned { return V.countl_zero(); };
+ else
+ CB = [](APInt V) -> unsigned { return V.countTrailingZeros(); };
+ auto MaybeCsts = ConstantFoldCountZeros(SrcOps[0].getReg(), *getMRI(), CB);
if (!MaybeCsts)
break;
if (MaybeCsts->size() == 1)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index d3f86af1e290..98e7c73a801f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -872,7 +872,6 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
- Builder.setInstrAndDebugLoc(MI);
Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
MI.eraseFromParent();
}
@@ -1299,7 +1298,6 @@ bool CombinerHelper::matchCombineIndexedLoadStore(
void CombinerHelper::applyCombineIndexedLoadStore(
MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
- Builder.setInstrAndDebugLoc(MI);
unsigned Opcode = MI.getOpcode();
bool IsStore = Opcode == TargetOpcode::G_STORE;
unsigned NewOpcode = getIndexedOpc(Opcode);
@@ -1416,14 +1414,8 @@ void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
// deps by "moving" the instruction incorrectly. Also keep track of which
// instruction is first so we pick it's operands, avoiding use-before-def
// bugs.
- MachineInstr *FirstInst;
- if (dominates(MI, *OtherMI)) {
- Builder.setInstrAndDebugLoc(MI);
- FirstInst = &MI;
- } else {
- Builder.setInstrAndDebugLoc(*OtherMI);
- FirstInst = OtherMI;
- }
+ MachineInstr *FirstInst = dominates(MI, *OtherMI) ? &MI : OtherMI;
+ Builder.setInstrAndDebugLoc(*FirstInst);
Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
: TargetOpcode::G_UDIVREM,
@@ -1556,7 +1548,6 @@ static APFloat constantFoldFpUnary(const MachineInstr &MI,
void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
const ConstantFP *Cst) {
- Builder.setInstrAndDebugLoc(MI);
APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue());
const ConstantFP *NewCst = ConstantFP::get(Builder.getContext(), Folded);
Builder.buildFConstant(MI.getOperand(0), *NewCst);
@@ -1691,7 +1682,6 @@ void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
Opcode == TargetOpcode::G_USHLSAT) &&
"Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
- Builder.setInstrAndDebugLoc(MI);
LLT Ty = MRI.getType(MI.getOperand(1).getReg());
unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
auto Imm = MatchInfo.Imm;
@@ -1807,7 +1797,6 @@ void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
LLT DestType = MRI.getType(MI.getOperand(0).getReg());
- Builder.setInstrAndDebugLoc(MI);
Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
@@ -1943,7 +1932,6 @@ void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
int64_t ShiftAmtVal = MatchData.Imm;
LLT ExtSrcTy = MRI.getType(ExtSrcReg);
- Builder.setInstrAndDebugLoc(MI);
auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
auto NarrowShift =
Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
@@ -2013,7 +2001,6 @@ void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
LLT SrcTy = MRI.getType(Operands[0]);
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
bool CanReuseInputDirectly = DstTy == SrcTy;
- Builder.setInstrAndDebugLoc(MI);
for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
Register DstReg = MI.getOperand(Idx).getReg();
Register SrcReg = Operands[Idx];
@@ -2066,7 +2053,6 @@ void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
assert((MI.getNumOperands() - 1 == Csts.size()) &&
"Not enough operands to replace all defs");
unsigned NumElems = MI.getNumOperands() - 1;
- Builder.setInstrAndDebugLoc(MI);
for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
Register DstReg = MI.getOperand(Idx).getReg();
Builder.buildConstant(DstReg, Csts[Idx]);
@@ -2104,7 +2090,6 @@ bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
}
void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
- Builder.setInstrAndDebugLoc(MI);
Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
Builder.buildTrunc(Dst0Reg, SrcReg);
@@ -2152,8 +2137,6 @@ void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
LLT Dst0Ty = MRI.getType(Dst0Reg);
LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
- Builder.setInstrAndDebugLoc(MI);
-
if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
Builder.buildZExt(Dst0Reg, ZExtSrcReg);
} else {
@@ -2207,7 +2190,6 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
LLT HalfTy = LLT::scalar(HalfSize);
- Builder.setInstr(MI);
auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
unsigned NarrowShiftAmt = ShiftVal - HalfSize;
@@ -2292,7 +2274,6 @@ bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
Register DstReg = MI.getOperand(0).getReg();
- Builder.setInstr(MI);
Builder.buildCopy(DstReg, Reg);
MI.eraseFromParent();
}
@@ -2300,7 +2281,6 @@ void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
Register DstReg = MI.getOperand(0).getReg();
- Builder.setInstr(MI);
Builder.buildZExtOrTrunc(DstReg, Reg);
MI.eraseFromParent();
}
@@ -2343,7 +2323,6 @@ void CombinerHelper::applyCombineAddP2IToPtrAdd(
LLT PtrTy = MRI.getType(LHS);
- Builder.setInstrAndDebugLoc(MI);
auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
Builder.buildPtrToInt(Dst, PtrAdd);
MI.eraseFromParent();
@@ -2375,7 +2354,6 @@ void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
auto &PtrAdd = cast<GPtrAdd>(MI);
Register Dst = PtrAdd.getReg(0);
- Builder.setInstrAndDebugLoc(MI);
Builder.buildConstant(Dst, NewCst);
PtrAdd.eraseFromParent();
}
@@ -2455,7 +2433,6 @@ void CombinerHelper::applyCombineExtOfExt(
(MI.getOpcode() == TargetOpcode::G_SEXT &&
SrcExtOp == TargetOpcode::G_ZEXT)) {
Register DstReg = MI.getOperand(0).getReg();
- Builder.setInstrAndDebugLoc(MI);
Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
MI.eraseFromParent();
}
@@ -2488,7 +2465,6 @@ void CombinerHelper::applyCombineTruncOfExt(
replaceRegWith(MRI, DstReg, SrcReg);
return;
}
- Builder.setInstrAndDebugLoc(MI);
if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
else
@@ -2576,8 +2552,6 @@ bool CombinerHelper::matchCombineTruncOfShift(
void CombinerHelper::applyCombineTruncOfShift(
MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
- Builder.setInstrAndDebugLoc(MI);
-
MachineInstr *ShiftMI = MatchInfo.first;
LLT NewShiftTy = MatchInfo.second;
@@ -2823,7 +2797,6 @@ void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) {
APInt NewConst = VRegAndVal->Value.urem(
APInt(ConstTy.getSizeInBits(), DstTy.getScalarSizeInBits()));
- Builder.setInstrAndDebugLoc(MI);
auto NewConstInstr = Builder.buildConstant(ConstTy, NewConst.getZExtValue());
Builder.buildInstr(
MI.getOpcode(), {MI.getOperand(0)},
@@ -2866,35 +2839,31 @@ bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
- Builder.setInstr(MI);
Builder.buildFConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
- Builder.setInstr(MI);
Builder.buildConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
- Builder.setInstr(MI);
Builder.buildConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
-void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, ConstantFP *CFP) {
+void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
+ ConstantFP *CFP) {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
- Builder.setInstr(MI);
Builder.buildFConstant(MI.getOperand(0), CFP->getValueAPF());
MI.eraseFromParent();
}
void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
- Builder.setInstr(MI);
Builder.buildUndef(MI.getOperand(0));
MI.eraseFromParent();
}
@@ -2962,7 +2931,6 @@ bool CombinerHelper::matchCombineInsertVecElts(
void CombinerHelper::applyCombineInsertVecElts(
MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
- Builder.setInstr(MI);
Register UndefReg;
auto GetUndef = [&]() {
if (UndefReg)
@@ -2981,7 +2949,6 @@ void CombinerHelper::applyCombineInsertVecElts(
void CombinerHelper::applySimplifyAddToSub(
MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
- Builder.setInstr(MI);
Register SubLHS, SubRHS;
std::tie(SubLHS, SubRHS) = MatchInfo;
Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
@@ -3084,7 +3051,6 @@ void CombinerHelper::applyBuildInstructionSteps(
MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
assert(MatchInfo.InstrsToBuild.size() &&
"Expected at least one instr to build?");
- Builder.setInstr(MI);
for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
assert(InstrToBuild.Opcode && "Expected a valid opcode?");
assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
@@ -3120,7 +3086,6 @@ void CombinerHelper::applyAshShlToSextInreg(
int64_t ShiftAmt;
std::tie(Src, ShiftAmt) = MatchInfo;
unsigned Size = MRI.getType(Src).getScalarSizeInBits();
- Builder.setInstrAndDebugLoc(MI);
Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
MI.eraseFromParent();
}
@@ -3399,7 +3364,6 @@ bool CombinerHelper::matchXorOfAndWithSameReg(
void CombinerHelper::applyXorOfAndWithSameReg(
MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
// Fold (xor (and x, y), y) -> (and (not x), y)
- Builder.setInstrAndDebugLoc(MI);
Register X, Y;
std::tie(X, Y) = MatchInfo;
auto Not = Builder.buildNot(MRI.getType(X), X);
@@ -3431,7 +3395,6 @@ bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
auto &PtrAdd = cast<GPtrAdd>(MI);
- Builder.setInstrAndDebugLoc(PtrAdd);
Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
PtrAdd.eraseFromParent();
}
@@ -3442,7 +3405,6 @@ void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
Register Src0 = MI.getOperand(1).getReg();
Register Pow2Src1 = MI.getOperand(2).getReg();
LLT Ty = MRI.getType(DstReg);
- Builder.setInstrAndDebugLoc(MI);
// Fold (urem x, pow2) -> (and x, pow2-1)
auto NegOne = Builder.buildConstant(Ty, -1);
@@ -3507,8 +3469,6 @@ bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
/// to fold.
void CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
const unsigned &SelectOperand) {
- Builder.setInstrAndDebugLoc(MI);
-
Register Dst = MI.getOperand(0).getReg();
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -4029,7 +3989,6 @@ void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- Builder.setInstrAndDebugLoc(MI);
if (ScalarTy != DstTy) {
assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
Builder.buildTrunc(DstReg, Reg);
@@ -4095,14 +4054,12 @@ void CombinerHelper::applyExtractAllEltsFromBuildVector(
void CombinerHelper::applyBuildFn(
MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
- Builder.setInstrAndDebugLoc(MI);
- MatchInfo(Builder);
+ applyBuildFnNoErase(MI, MatchInfo);
MI.eraseFromParent();
}
void CombinerHelper::applyBuildFnNoErase(
MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
- Builder.setInstrAndDebugLoc(MI);
MatchInfo(Builder);
}
@@ -4204,7 +4161,6 @@ void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
MI.getOpcode() == TargetOpcode::G_ROTR);
unsigned Bitsize =
MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
- Builder.setInstrAndDebugLoc(MI);
Register Amt = MI.getOperand(2).getReg();
LLT AmtTy = MRI.getType(Amt);
auto Bits = Builder.buildConstant(AmtTy, Bitsize);
@@ -5027,7 +4983,6 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
auto &MIB = Builder;
- MIB.setInstrAndDebugLoc(MI);
bool UseNPQ = false;
SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
@@ -5213,7 +5168,6 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType();
auto &MIB = Builder;
- MIB.setInstrAndDebugLoc(MI);
bool UseSRA = false;
SmallVector<Register, 16> Shifts, Factors;
@@ -5270,6 +5224,93 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
return MIB.buildMul(Ty, Res, Factor);
}
+bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
+ assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
+ MI.getOpcode() == TargetOpcode::G_UDIV) &&
+ "Expected SDIV or UDIV");
+ auto &Div = cast<GenericMachineInstr>(MI);
+ Register RHS = Div.getReg(2);
+ auto MatchPow2 = [&](const Constant *C) {
+ auto *CI = dyn_cast<ConstantInt>(C);
+ return CI && (CI->getValue().isPowerOf2() ||
+ (IsSigned && CI->getValue().isNegatedPowerOf2()));
+ };
+ return matchUnaryPredicate(MRI, RHS, MatchPow2, /*AllowUndefs=*/false);
+}
+
+void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
+ auto &SDiv = cast<GenericMachineInstr>(MI);
+ Register Dst = SDiv.getReg(0);
+ Register LHS = SDiv.getReg(1);
+ Register RHS = SDiv.getReg(2);
+ LLT Ty = MRI.getType(Dst);
+ LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+ LLT CCVT =
+ Ty.isVector() ? LLT::vector(Ty.getElementCount(), 1) : LLT::scalar(1);
+
+ // Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
+ // to the following version:
+ //
+ // %c1 = G_CTTZ %rhs
+ // %inexact = G_SUB $bitwidth, %c1
+ // %sign = %G_ASHR %lhs, $(bitwidth - 1)
+ // %lshr = G_LSHR %sign, %inexact
+ // %add = G_ADD %lhs, %lshr
+ // %ashr = G_ASHR %add, %c1
+ // %ashr = G_SELECT, %isoneorallones, %lhs, %ashr
+ // %zero = G_CONSTANT $0
+ // %neg = G_NEG %ashr
+ // %isneg = G_ICMP SLT %rhs, %zero
+ // %res = G_SELECT %isneg, %neg, %ashr
+
+ unsigned BitWidth = Ty.getScalarSizeInBits();
+ auto Zero = Builder.buildConstant(Ty, 0);
+
+ auto Bits = Builder.buildConstant(ShiftAmtTy, BitWidth);
+ auto C1 = Builder.buildCTTZ(ShiftAmtTy, RHS);
+ auto Inexact = Builder.buildSub(ShiftAmtTy, Bits, C1);
+ // Splat the sign bit into the register
+ auto Sign = Builder.buildAShr(
+ Ty, LHS, Builder.buildConstant(ShiftAmtTy, BitWidth - 1));
+
+ // Add (LHS < 0) ? abs2 - 1 : 0;
+ auto LSrl = Builder.buildLShr(Ty, Sign, Inexact);
+ auto Add = Builder.buildAdd(Ty, LHS, LSrl);
+ auto AShr = Builder.buildAShr(Ty, Add, C1);
+
+ // Special case: (sdiv X, 1) -> X
+ // Special Case: (sdiv X, -1) -> 0-X
+ auto One = Builder.buildConstant(Ty, 1);
+ auto MinusOne = Builder.buildConstant(Ty, -1);
+ auto IsOne = Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, One);
+ auto IsMinusOne =
+ Builder.buildICmp(CmpInst::Predicate::ICMP_EQ, CCVT, RHS, MinusOne);
+ auto IsOneOrMinusOne = Builder.buildOr(CCVT, IsOne, IsMinusOne);
+ AShr = Builder.buildSelect(Ty, IsOneOrMinusOne, LHS, AShr);
+
+ // If divided by a positive value, we're done. Otherwise, the result must be
+ // negated.
+ auto Neg = Builder.buildNeg(Ty, AShr);
+ auto IsNeg = Builder.buildICmp(CmpInst::Predicate::ICMP_SLT, CCVT, RHS, Zero);
+ Builder.buildSelect(MI.getOperand(0).getReg(), IsNeg, Neg, AShr);
+ MI.eraseFromParent();
+}
+
+void CombinerHelper::applyUDivByPow2(MachineInstr &MI) {
+ assert(MI.getOpcode() == TargetOpcode::G_UDIV && "Expected UDIV");
+ auto &UDiv = cast<GenericMachineInstr>(MI);
+ Register Dst = UDiv.getReg(0);
+ Register LHS = UDiv.getReg(1);
+ Register RHS = UDiv.getReg(2);
+ LLT Ty = MRI.getType(Dst);
+ LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+
+ auto C1 = Builder.buildCTTZ(ShiftAmtTy, RHS);
+ Builder.buildLShr(MI.getOperand(0).getReg(), LHS, C1);
+ MI.eraseFromParent();
+}
+
bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_UMULH);
Register RHS = MI.getOperand(2).getReg();
@@ -5294,7 +5335,6 @@ void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
unsigned NumEltBits = Ty.getScalarSizeInBits();
- Builder.setInstrAndDebugLoc(MI);
auto LogBase2 = buildLogBase2(RHS, Builder);
auto ShiftAmt =
Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2);
@@ -5374,7 +5414,6 @@ bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
}
void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
- Builder.setInstrAndDebugLoc(MI);
Register Dst = MI.getOperand(0).getReg();
Builder.buildFNeg(
Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0));
@@ -6945,10 +6984,6 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) {
LLT DstTy = MRI.getType(Dst);
LLT CarryTy = MRI.getType(Carry);
- // We want do fold the [u|s]addo.
- if (!MRI.hasOneNonDBGUse(Dst))
- return false;
-
// Fold addo, if the carry is dead -> add, undef.
if (MRI.use_nodbg_empty(Carry) &&
isLegalOrBeforeLegalizer({TargetOpcode::G_ADD, {DstTy}})) {
@@ -6959,10 +6994,6 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) {
return true;
}
- // We want do fold the [u|s]addo.
- if (!MRI.hasOneNonDBGUse(Carry))
- return false;
-
// Canonicalize constant to RHS.
if (isConstantOrConstantVectorI(LHS) && !isConstantOrConstantVectorI(RHS)) {
if (IsSigned) {
@@ -6994,7 +7025,7 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) {
return true;
}
- // Fold (addo x, 0) -> x, no borrow
+ // Fold (addo x, 0) -> x, no carry
if (MaybeRHS && *MaybeRHS == 0 && isConstantLegalOrBeforeLegalizer(CarryTy)) {
MatchInfo = [=](MachineIRBuilder &B) {
B.buildCopy(Dst, LHS);
diff --git a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
index 2e2cc9a95bd9..51ab7b6262c6 100644
--- a/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
@@ -405,18 +405,23 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
case TargetOpcode::G_LOAD: {
const MachineMemOperand *MMO = *MI.memoperands_begin();
- if (const MDNode *Ranges = MMO->getRanges()) {
- computeKnownBitsFromRangeMetadata(*Ranges, Known);
- }
-
+ KnownBits KnownRange(MMO->getMemoryType().getScalarSizeInBits());
+ if (const MDNode *Ranges = MMO->getRanges())
+ computeKnownBitsFromRangeMetadata(*Ranges, KnownRange);
+ Known = KnownRange.anyext(Known.getBitWidth());
break;
}
+ case TargetOpcode::G_SEXTLOAD:
case TargetOpcode::G_ZEXTLOAD: {
if (DstTy.isVector())
break;
- // Everything above the retrieved bits is zero
- Known.Zero.setBitsFrom(
- (*MI.memoperands_begin())->getSizeInBits().getValue());
+ const MachineMemOperand *MMO = *MI.memoperands_begin();
+ KnownBits KnownRange(MMO->getMemoryType().getScalarSizeInBits());
+ if (const MDNode *Ranges = MMO->getRanges())
+ computeKnownBitsFromRangeMetadata(*Ranges, KnownRange);
+ Known = Opcode == TargetOpcode::G_SEXTLOAD
+ ? KnownRange.sext(Known.getBitWidth())
+ : KnownRange.zext(Known.getBitWidth());
break;
}
case TargetOpcode::G_ASHR: {
@@ -589,6 +594,17 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
}
break;
}
+ case TargetOpcode::G_CTLZ:
+ case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
+ KnownBits SrcOpKnown;
+ computeKnownBitsImpl(MI.getOperand(1).getReg(), SrcOpKnown, DemandedElts,
+ Depth + 1);
+ // If we have a known 1, its position is our upper bound.
+ unsigned PossibleLZ = SrcOpKnown.countMaxLeadingZeros();
+ unsigned LowBits = llvm::bit_width(PossibleLZ);
+ Known.Zero.setBitsFrom(LowBits);
+ break;
+ }
}
assert(!Known.hasConflict() && "Bits known to be one AND zero?");
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 0811c5653866..47e980e05281 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1562,9 +1562,14 @@ bool IRTranslator::translateCast(unsigned Opcode, const User &U,
if (U.getType()->getScalarType()->isBFloatTy() ||
U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
return false;
+
+ uint32_t Flags = 0;
+ if (const Instruction *I = dyn_cast<Instruction>(&U))
+ Flags = MachineInstr::copyFlagsFromInstruction(*I);
+
Register Op = getOrCreateVReg(*U.getOperand(0));
Register Res = getOrCreateVReg(U);
- MIRBuilder.buildInstr(Opcode, {Res}, {Op});
+ MIRBuilder.buildInstr(Opcode, {Res}, {Op}, Flags);
return true;
}
@@ -1771,6 +1776,32 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
return true;
}
+bool IRTranslator::translateTrap(const CallInst &CI,
+ MachineIRBuilder &MIRBuilder,
+ unsigned Opcode) {
+ StringRef TrapFuncName =
+ CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
+ if (TrapFuncName.empty()) {
+ if (Opcode == TargetOpcode::G_UBSANTRAP) {
+ uint64_t Code = cast<ConstantInt>(CI.getOperand(0))->getZExtValue();
+ MIRBuilder.buildInstr(Opcode, {}, ArrayRef<llvm::SrcOp>{Code});
+ } else {
+ MIRBuilder.buildInstr(Opcode);
+ }
+ return true;
+ }
+
+ CallLowering::CallLoweringInfo Info;
+ if (Opcode == TargetOpcode::G_UBSANTRAP)
+ Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
+ CI.getArgOperand(0)->getType(), 0});
+
+ Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
+ Info.CB = &CI;
+ Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
+ return CLI->lowerCall(MIRBuilder, Info);
+}
+
bool IRTranslator::translateVectorInterleave2Intrinsic(
const CallInst &CI, MachineIRBuilder &MIRBuilder) {
assert(CI.getIntrinsicID() == Intrinsic::experimental_vector_interleave2 &&
@@ -2459,22 +2490,11 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
return true;
}
case Intrinsic::trap:
+ return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
case Intrinsic::debugtrap:
- case Intrinsic::ubsantrap: {
- StringRef TrapFuncName =
- CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
- if (TrapFuncName.empty())
- break; // Use the default handling.
- CallLowering::CallLoweringInfo Info;
- if (ID == Intrinsic::ubsantrap) {
- Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
- CI.getArgOperand(0)->getType(), 0});
- }
- Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
- Info.CB = &CI;
- Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
- return CLI->lowerCall(MIRBuilder, Info);
- }
+ return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
+ case Intrinsic::ubsantrap:
+ return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
case Intrinsic::allow_runtime_check:
case Intrinsic::allow_ubsan_check:
MIRBuilder.buildCopy(getOrCreateVReg(CI),
@@ -3052,7 +3072,7 @@ bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuil
}
}
- MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>());
+ MIRBuilder.buildTrap();
return true;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index abe23af00a78..797bbf7efe60 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1699,6 +1699,20 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
case TargetOpcode::G_FLDEXP:
case TargetOpcode::G_STRICT_FLDEXP:
return narrowScalarFLDEXP(MI, TypeIdx, NarrowTy);
+ case TargetOpcode::G_VSCALE: {
+ Register Dst = MI.getOperand(0).getReg();
+ LLT Ty = MRI.getType(Dst);
+
+ // Assume VSCALE(1) fits into a legal integer
+ const APInt One(NarrowTy.getSizeInBits(), 1);
+ auto VScaleBase = MIRBuilder.buildVScale(NarrowTy, One);
+ auto ZExt = MIRBuilder.buildZExt(Ty, VScaleBase);
+ auto C = MIRBuilder.buildConstant(Ty, *MI.getOperand(1).getCImm());
+ MIRBuilder.buildMul(Dst, ZExt, C);
+
+ MI.eraseFromParent();
+ return Legalized;
+ }
}
}
@@ -2966,7 +2980,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
case TargetOpcode::G_VECREDUCE_FMIN:
case TargetOpcode::G_VECREDUCE_FMAX:
case TargetOpcode::G_VECREDUCE_FMINIMUM:
- case TargetOpcode::G_VECREDUCE_FMAXIMUM:
+ case TargetOpcode::G_VECREDUCE_FMAXIMUM: {
if (TypeIdx != 0)
return UnableToLegalize;
Observer.changingInstr(MI);
@@ -2980,6 +2994,19 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Observer.changedInstr(MI);
return Legalized;
}
+ case TargetOpcode::G_VSCALE: {
+ MachineOperand &SrcMO = MI.getOperand(1);
+ LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
+ const APInt &SrcVal = SrcMO.getCImm()->getValue();
+ // The CImm is always a signed value
+ const APInt Val = SrcVal.sext(WideTy.getSizeInBits());
+ Observer.changingInstr(MI);
+ SrcMO.setCImm(ConstantInt::get(Ctx, Val));
+ widenScalarDst(MI, WideTy);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+ }
}
static void getUnmergePieces(SmallVectorImpl<Register> &Pieces,
@@ -3741,9 +3768,11 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
}
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
auto [OldValRes, SuccessRes, Addr, CmpVal, NewVal] = MI.getFirst5Regs();
- MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
+ Register NewOldValRes = MRI.cloneVirtualRegister(OldValRes);
+ MIRBuilder.buildAtomicCmpXchg(NewOldValRes, Addr, CmpVal, NewVal,
**MI.memoperands_begin());
- MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
+ MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, NewOldValRes, CmpVal);
+ MIRBuilder.buildCopy(OldValRes, NewOldValRes);
MI.eraseFromParent();
return Legalized;
}
@@ -3762,8 +3791,12 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
case G_UADDO: {
auto [Res, CarryOut, LHS, RHS] = MI.getFirst4Regs();
- MIRBuilder.buildAdd(Res, LHS, RHS);
- MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS);
+ Register NewRes = MRI.cloneVirtualRegister(Res);
+
+ MIRBuilder.buildAdd(NewRes, LHS, RHS);
+ MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, NewRes, RHS);
+
+ MIRBuilder.buildCopy(Res, NewRes);
MI.eraseFromParent();
return Legalized;
@@ -3773,6 +3806,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
const LLT CondTy = MRI.getType(CarryOut);
const LLT Ty = MRI.getType(Res);
+ Register NewRes = MRI.cloneVirtualRegister(Res);
+
// Initial add of the two operands.
auto TmpRes = MIRBuilder.buildAdd(Ty, LHS, RHS);
@@ -3781,15 +3816,18 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT LowerHintTy) {
// Add the sum and the carry.
auto ZExtCarryIn = MIRBuilder.buildZExt(Ty, CarryIn);
- MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn);
+ MIRBuilder.buildAdd(NewRes, TmpRes, ZExtCarryIn);
// Second check for carry. We can only carry if the initial sum is all 1s
// and the carry is set, resulting in a new sum of 0.
auto Zero = MIRBuilder.buildConstant(Ty, 0);
- auto ResEqZero = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, Res, Zero);
+ auto ResEqZero =
+ MIRBuilder.buildICmp(CmpInst::ICMP_EQ, CondTy, NewRes, Zero);
auto Carry2 = MIRBuilder.buildAnd(CondTy, ResEqZero, CarryIn);
MIRBuilder.buildOr(CarryOut, Carry, Carry2);
+ MIRBuilder.buildCopy(Res, NewRes);
+
MI.eraseFromParent();
return Legalized;
}
@@ -5421,14 +5459,22 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
case TargetOpcode::G_FPTOUI:
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_UITOFP: {
- if (TypeIdx != 0)
- return UnableToLegalize;
Observer.changingInstr(MI);
- LLT SrcTy = LLT::fixed_vector(
- MoreTy.getNumElements(),
- MRI.getType(MI.getOperand(1).getReg()).getElementType());
- moreElementsVectorSrc(MI, SrcTy, 1);
- moreElementsVectorDst(MI, MoreTy, 0);
+ LLT SrcExtTy;
+ LLT DstExtTy;
+ if (TypeIdx == 0) {
+ DstExtTy = MoreTy;
+ SrcExtTy = LLT::fixed_vector(
+ MoreTy.getNumElements(),
+ MRI.getType(MI.getOperand(1).getReg()).getElementType());
+ } else {
+ DstExtTy = LLT::fixed_vector(
+ MoreTy.getNumElements(),
+ MRI.getType(MI.getOperand(0).getReg()).getElementType());
+ SrcExtTy = MoreTy;
+ }
+ moreElementsVectorSrc(MI, SrcExtTy, 1);
+ moreElementsVectorDst(MI, DstExtTy, 0);
Observer.changedInstr(MI);
return Legalized;
}
@@ -6354,12 +6400,26 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI) {
// 8 bits can hold CTPOP result of 128 bit int or smaller. Mul with this
// bitmask will set 8 msb in ResTmp to sum of all B8Counts in 8 bit blocks.
auto MulMask = B.buildConstant(Ty, APInt::getSplat(Size, APInt(8, 0x01)));
- auto ResTmp = B.buildMul(Ty, B8Count, MulMask);
// Shift count result from 8 high bits to low bits.
auto C_SizeM8 = B.buildConstant(Ty, Size - 8);
- B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
+ auto IsMulSupported = [this](const LLT Ty) {
+ auto Action = LI.getAction({TargetOpcode::G_MUL, {Ty}}).Action;
+ return Action == Legal || Action == WidenScalar || Action == Custom;
+ };
+ if (IsMulSupported(Ty)) {
+ auto ResTmp = B.buildMul(Ty, B8Count, MulMask);
+ B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
+ } else {
+ auto ResTmp = B8Count;
+ for (unsigned Shift = 8; Shift < Size; Shift *= 2) {
+ auto ShiftC = B.buildConstant(Ty, Shift);
+ auto Shl = B.buildShl(Ty, ResTmp, ShiftC);
+ ResTmp = B.buildAdd(Ty, ResTmp, Shl);
+ }
+ B.buildLShr(MI.getOperand(0).getReg(), ResTmp, C_SizeM8);
+ }
MI.eraseFromParent();
return Legalized;
}
@@ -7622,10 +7682,12 @@ LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
LLT Ty = Dst0Ty;
LLT BoolTy = Dst1Ty;
+ Register NewDst0 = MRI.cloneVirtualRegister(Dst0);
+
if (IsAdd)
- MIRBuilder.buildAdd(Dst0, LHS, RHS);
+ MIRBuilder.buildAdd(NewDst0, LHS, RHS);
else
- MIRBuilder.buildSub(Dst0, LHS, RHS);
+ MIRBuilder.buildSub(NewDst0, LHS, RHS);
// TODO: If SADDSAT/SSUBSAT is legal, compare results to detect overflow.
@@ -7638,12 +7700,15 @@ LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
// (LHS) if and only if the other operand (RHS) is (non-zero) positive,
// otherwise there will be overflow.
auto ResultLowerThanLHS =
- MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, Dst0, LHS);
+ MIRBuilder.buildICmp(CmpInst::ICMP_SLT, BoolTy, NewDst0, LHS);
auto ConditionRHS = MIRBuilder.buildICmp(
IsAdd ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGT, BoolTy, RHS, Zero);
MIRBuilder.buildXor(Dst1, ConditionRHS, ResultLowerThanLHS);
+
+ MIRBuilder.buildCopy(Dst0, NewDst0);
MI.eraseFromParent();
+
return Legalized;
}
@@ -7855,7 +7920,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerBswap(MachineInstr &MI) {
//{ (Src & Mask) >> N } | { (Src << N) & Mask }
static MachineInstrBuilder SwapN(unsigned N, DstOp Dst, MachineIRBuilder &B,
- MachineInstrBuilder Src, APInt Mask) {
+ MachineInstrBuilder Src, const APInt &Mask) {
const LLT Ty = Dst.getLLTTy(*B.getMRI());
MachineInstrBuilder C_N = B.buildConstant(Ty, N);
MachineInstrBuilder MaskLoNTo0 = B.buildConstant(Ty, Mask);
@@ -8215,9 +8280,22 @@ LegalizerHelper::lowerAbsToMaxNeg(MachineInstr &MI) {
// %res = G_SMAX %a, %v2
Register SrcReg = MI.getOperand(1).getReg();
LLT Ty = MRI.getType(SrcReg);
+ auto Zero = MIRBuilder.buildConstant(Ty, 0);
+ auto Sub = MIRBuilder.buildSub(Ty, Zero, SrcReg);
+ MIRBuilder.buildSMax(MI.getOperand(0), SrcReg, Sub);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerAbsToCNeg(MachineInstr &MI) {
+ Register SrcReg = MI.getOperand(1).getReg();
+ Register DestReg = MI.getOperand(0).getReg();
+ LLT Ty = MRI.getType(SrcReg), IType = LLT::scalar(1);
auto Zero = MIRBuilder.buildConstant(Ty, 0).getReg(0);
auto Sub = MIRBuilder.buildSub(Ty, Zero, SrcReg).getReg(0);
- MIRBuilder.buildSMax(MI.getOperand(0), SrcReg, Sub);
+ auto ICmp = MIRBuilder.buildICmp(CmpInst::ICMP_SGT, IType, SrcReg, Zero);
+ MIRBuilder.buildSelect(DestReg, ICmp, SrcReg, Sub);
MI.eraseFromParent();
return Legalized;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 4ee1793d33d2..c9ee35373cd4 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -154,7 +154,8 @@ static bool mutationIsSane(const LegalizeRule &Rule,
case WidenScalar: {
if (OldTy.isVector()) {
// Number of elements should not change.
- if (!NewTy.isVector() || OldTy.getNumElements() != NewTy.getNumElements())
+ if (!NewTy.isVector() ||
+ OldTy.getElementCount() != NewTy.getElementCount())
return false;
} else {
// Both types must be vectors
diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
index 9fc8ecd60b03..fb9656c09ca3 100644
--- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp
@@ -128,14 +128,14 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
// vector objects on the stack.
// BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
// following situations arise:
- if (PtrDiff >= 0 && Size1.hasValue()) {
+ if (PtrDiff >= 0 && Size1.hasValue() && !Size1.isScalable()) {
// [----BasePtr0----]
// [---BasePtr1--]
// ========PtrDiff========>
IsAlias = !((int64_t)Size1.getValue() <= PtrDiff);
return true;
}
- if (PtrDiff < 0 && Size2.hasValue()) {
+ if (PtrDiff < 0 && Size2.hasValue() && !Size2.isScalable()) {
// [----BasePtr0----]
// [---BasePtr1--]
// =====(-PtrDiff)====>
@@ -248,10 +248,20 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI,
return false;
}
+ // If NumBytes is scalable and offset is not 0, conservatively return may
+ // alias
+ if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||
+ (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))
+ return true;
+
+ const bool BothNotScalable =
+ !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();
+
// Try to prove that there is aliasing, or that there is no aliasing. Either
// way, we can return now. If nothing can be proved, proceed with more tests.
bool IsAlias;
- if (GISelAddressing::aliasIsKnownForLoadStore(MI, Other, IsAlias, MRI))
+ if (BothNotScalable &&
+ GISelAddressing::aliasIsKnownForLoadStore(MI, Other, IsAlias, MRI))
return IsAlias;
// The following all rely on MMO0 and MMO1 being valid.
@@ -267,12 +277,18 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI,
Size1.hasValue()) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
- int64_t Overlap0 = Size0.getValue() + SrcValOffset0 - MinOffset;
- int64_t Overlap1 = Size1.getValue() + SrcValOffset1 - MinOffset;
- if (AA->isNoAlias(MemoryLocation(MUC0.MMO->getValue(), Overlap0,
- MUC0.MMO->getAAInfo()),
- MemoryLocation(MUC1.MMO->getValue(), Overlap1,
- MUC1.MMO->getAAInfo())))
+ int64_t Overlap0 =
+ Size0.getValue().getKnownMinValue() + SrcValOffset0 - MinOffset;
+ int64_t Overlap1 =
+ Size1.getValue().getKnownMinValue() + SrcValOffset1 - MinOffset;
+ LocationSize Loc0 =
+ Size0.isScalable() ? Size0 : LocationSize::precise(Overlap0);
+ LocationSize Loc1 =
+ Size1.isScalable() ? Size1 : LocationSize::precise(Overlap1);
+
+ if (AA->isNoAlias(
+ MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),
+ MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))
return false;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index f7aaa0f02efc..b8ba782254c3 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -811,6 +811,13 @@ MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
return VScale;
}
+MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
+ const APInt &MinElts) {
+ ConstantInt *CI =
+ ConstantInt::get(getMF().getFunction().getContext(), MinElts);
+ return buildVScale(Res, *CI);
+}
+
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
if (HasSideEffects && IsConvergent)
return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
@@ -923,14 +930,14 @@ MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
}
MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
- Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
- Register NewVal, MachineMemOperand &MMO) {
+ const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
+ const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
#ifndef NDEBUG
- LLT OldValResTy = getMRI()->getType(OldValRes);
- LLT SuccessResTy = getMRI()->getType(SuccessRes);
- LLT AddrTy = getMRI()->getType(Addr);
- LLT CmpValTy = getMRI()->getType(CmpVal);
- LLT NewValTy = getMRI()->getType(NewVal);
+ LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
+ LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
+ LLT AddrTy = Addr.getLLTTy(*getMRI());
+ LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
+ LLT NewValTy = NewVal.getLLTTy(*getMRI());
assert(OldValResTy.isScalar() && "invalid operand type");
assert(SuccessResTy.isScalar() && "invalid operand type");
assert(AddrTy.isPointer() && "invalid operand type");
@@ -940,24 +947,25 @@ MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
assert(OldValResTy == NewValTy && "type mismatch");
#endif
- return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
- .addDef(OldValRes)
- .addDef(SuccessRes)
- .addUse(Addr)
- .addUse(CmpVal)
- .addUse(NewVal)
- .addMemOperand(&MMO);
+ auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
+ OldValRes.addDefToMIB(*getMRI(), MIB);
+ SuccessRes.addDefToMIB(*getMRI(), MIB);
+ Addr.addSrcToMIB(MIB);
+ CmpVal.addSrcToMIB(MIB);
+ NewVal.addSrcToMIB(MIB);
+ MIB.addMemOperand(&MMO);
+ return MIB;
}
MachineInstrBuilder
-MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
- Register CmpVal, Register NewVal,
+MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
+ const SrcOp &CmpVal, const SrcOp &NewVal,
MachineMemOperand &MMO) {
#ifndef NDEBUG
- LLT OldValResTy = getMRI()->getType(OldValRes);
- LLT AddrTy = getMRI()->getType(Addr);
- LLT CmpValTy = getMRI()->getType(CmpVal);
- LLT NewValTy = getMRI()->getType(NewVal);
+ LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
+ LLT AddrTy = Addr.getLLTTy(*getMRI());
+ LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
+ LLT NewValTy = NewVal.getLLTTy(*getMRI());
assert(OldValResTy.isScalar() && "invalid operand type");
assert(AddrTy.isPointer() && "invalid operand type");
assert(CmpValTy.isValid() && "invalid operand type");
@@ -966,12 +974,13 @@ MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
assert(OldValResTy == NewValTy && "type mismatch");
#endif
- return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
- .addDef(OldValRes)
- .addUse(Addr)
- .addUse(CmpVal)
- .addUse(NewVal)
- .addMemOperand(&MMO);
+ auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
+ OldValRes.addDefToMIB(*getMRI(), MIB);
+ Addr.addSrcToMIB(MIB);
+ CmpVal.addSrcToMIB(MIB);
+ NewVal.addSrcToMIB(MIB);
+ MIB.addMemOperand(&MMO);
+ return MIB;
}
MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
@@ -1153,7 +1162,7 @@ void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
else
assert((TstTy.isScalar() ||
(TstTy.isVector() &&
- TstTy.getNumElements() == Op0Ty.getNumElements())) &&
+ TstTy.getElementCount() == Op0Ty.getElementCount())) &&
"type mismatch");
#endif
}
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index a9fa73b60a09..c3bc3203b636 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -966,14 +966,15 @@ llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
}
std::optional<SmallVector<unsigned>>
-llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
+llvm::ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
+ std::function<unsigned(APInt)> CB) {
LLT Ty = MRI.getType(Src);
SmallVector<unsigned> FoldedCTLZs;
auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
auto MaybeCst = getIConstantVRegVal(R, MRI);
if (!MaybeCst)
return std::nullopt;
- return MaybeCst->countl_zero();
+ return CB(*MaybeCst);
};
if (Ty.isVector()) {
// Try to constant fold each element.
@@ -996,6 +997,74 @@ llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
return std::nullopt;
}
+std::optional<SmallVector<APInt>>
+llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
+ const MachineRegisterInfo &MRI) {
+ LLT Ty = MRI.getType(Op1);
+ if (Ty != MRI.getType(Op2))
+ return std::nullopt;
+
+ auto TryFoldScalar = [&MRI, Pred](Register LHS,
+ Register RHS) -> std::optional<APInt> {
+ auto LHSCst = getIConstantVRegVal(LHS, MRI);
+ auto RHSCst = getIConstantVRegVal(RHS, MRI);
+ if (!LHSCst || !RHSCst)
+ return std::nullopt;
+
+ switch (Pred) {
+ case CmpInst::Predicate::ICMP_EQ:
+ return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
+ case CmpInst::Predicate::ICMP_NE:
+ return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
+ case CmpInst::Predicate::ICMP_UGT:
+ return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
+ case CmpInst::Predicate::ICMP_UGE:
+ return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
+ case CmpInst::Predicate::ICMP_ULT:
+ return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
+ case CmpInst::Predicate::ICMP_ULE:
+ return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
+ case CmpInst::Predicate::ICMP_SGT:
+ return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
+ case CmpInst::Predicate::ICMP_SGE:
+ return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
+ case CmpInst::Predicate::ICMP_SLT:
+ return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
+ case CmpInst::Predicate::ICMP_SLE:
+ return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
+ default:
+ return std::nullopt;
+ }
+ };
+
+ SmallVector<APInt> FoldedICmps;
+
+ if (Ty.isVector()) {
+ // Try to constant fold each element.
+ auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
+ auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
+ if (!BV1 || !BV2)
+ return std::nullopt;
+ assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
+ for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
+ if (auto MaybeFold =
+ TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
+ FoldedICmps.emplace_back(*MaybeFold);
+ continue;
+ }
+ return std::nullopt;
+ }
+ return FoldedICmps;
+ }
+
+ if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
+ FoldedICmps.emplace_back(*MaybeCst);
+ return FoldedICmps;
+ }
+
+ return std::nullopt;
+}
+
bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
GISelKnownBits *KB) {
std::optional<DefinitionAndSourceRegister> DefSrcReg =
diff --git a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
index 5caf20add2a1..1602cd99c383 100644
--- a/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
+++ b/llvm/lib/CodeGen/LowLevelTypeUtils.cpp
@@ -51,7 +51,7 @@ MVT llvm::getMVTForLLT(LLT Ty) {
return MVT::getVectorVT(
MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
- Ty.getNumElements());
+ Ty.getElementCount());
}
EVT llvm::getApproximateEVTForLLT(LLT Ty, const DataLayout &DL,
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
index 870611248466..7bb216553204 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
@@ -213,6 +213,8 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
.Case("nuw", MIToken::kw_nuw)
.Case("nsw", MIToken::kw_nsw)
.Case("exact", MIToken::kw_exact)
+ .Case("nneg", MIToken::kw_nneg)
+ .Case("disjoint", MIToken::kw_disjoint)
.Case("nofpexcept", MIToken::kw_nofpexcept)
.Case("unpredictable", MIToken::kw_unpredictable)
.Case("debug-location", MIToken::kw_debug_location)
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h
index 0f344da52182..6617ec68e941 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.h
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.h
@@ -74,6 +74,8 @@ struct MIToken {
kw_exact,
kw_nofpexcept,
kw_unpredictable,
+ kw_nneg,
+ kw_disjoint,
kw_debug_location,
kw_debug_instr_number,
kw_dbg_instr_ref,
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 691c60d22724..95924f056628 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -1471,7 +1471,9 @@ bool MIParser::parseInstruction(unsigned &OpCode, unsigned &Flags) {
Token.is(MIToken::kw_exact) ||
Token.is(MIToken::kw_nofpexcept) ||
Token.is(MIToken::kw_noconvergent) ||
- Token.is(MIToken::kw_unpredictable)) {
+ Token.is(MIToken::kw_unpredictable) ||
+ Token.is(MIToken::kw_nneg) ||
+ Token.is(MIToken::kw_disjoint)) {
// clang-format on
// Mine frame and fast math flags
if (Token.is(MIToken::kw_frame_setup))
@@ -1504,6 +1506,10 @@ bool MIParser::parseInstruction(unsigned &OpCode, unsigned &Flags) {
Flags |= MachineInstr::Unpredictable;
if (Token.is(MIToken::kw_noconvergent))
Flags |= MachineInstr::NoConvergent;
+ if (Token.is(MIToken::kw_nneg))
+ Flags |= MachineInstr::NonNeg;
+ if (Token.is(MIToken::kw_disjoint))
+ Flags |= MachineInstr::Disjoint;
lex();
}
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 8efe67a9a72b..4cf3074ea3ff 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -806,6 +806,10 @@ void MIPrinter::print(const MachineInstr &MI) {
OS << "unpredictable ";
if (MI.getFlag(MachineInstr::NoConvergent))
OS << "noconvergent ";
+ if (MI.getFlag(MachineInstr::NonNeg))
+ OS << "nneg ";
+ if (MI.getFlag(MachineInstr::Disjoint))
+ OS << "disjoint ";
OS << TII->getName(MI.getOpcode());
if (I < E)
diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 9a0ab300b21b..8dc6781fcb01 100644
--- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -411,6 +411,7 @@ private:
typedef enum { DebugUse = false, RegularUse = true } DebugType;
void ReadRegister(MCRegister Reg, MachineInstr &Reader, DebugType DT);
+ void readSuccessorLiveIns(const MachineBasicBlock &MBB);
void ForwardCopyPropagateBlock(MachineBasicBlock &MBB);
void BackwardCopyPropagateBlock(MachineBasicBlock &MBB);
void EliminateSpillageCopies(MachineBasicBlock &MBB);
@@ -463,6 +464,22 @@ void MachineCopyPropagation::ReadRegister(MCRegister Reg, MachineInstr &Reader,
}
}
+void MachineCopyPropagation::readSuccessorLiveIns(
+ const MachineBasicBlock &MBB) {
+ if (MaybeDeadCopies.empty())
+ return;
+
+ // If a copy result is livein to a successor, it is not dead.
+ for (const MachineBasicBlock *Succ : MBB.successors()) {
+ for (const auto &LI : Succ->liveins()) {
+ for (MCRegUnit Unit : TRI->regunits(LI.PhysReg)) {
+ if (MachineInstr *Copy = Tracker.findCopyForUnit(Unit, *TRI))
+ MaybeDeadCopies.remove(Copy);
+ }
+ }
+ }
+}
+
/// Return true if \p PreviousCopy did copy register \p Src to register \p Def.
/// This fact may have been obscured by sub register usage or may not be true at
/// all even though Src and Def are subregisters of the registers used in
@@ -640,7 +657,7 @@ bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
/// The umull instruction is unpredictable unless RdHi and RdLo are different.
bool MachineCopyPropagation::hasOverlappingMultipleDef(
const MachineInstr &MI, const MachineOperand &MODef, Register Def) {
- for (const MachineOperand &MIDef : MI.defs()) {
+ for (const MachineOperand &MIDef : MI.all_defs()) {
if ((&MIDef != &MODef) && MIDef.isReg() &&
TRI->regsOverlap(Def, MIDef.getReg()))
return true;
@@ -914,10 +931,17 @@ void MachineCopyPropagation::ForwardCopyPropagateBlock(MachineBasicBlock &MBB) {
Tracker.clobberRegister(Reg, *TRI, *TII, UseCopyInstr);
}
- // If MBB doesn't have successors, delete the copies whose defs are not used.
- // If MBB does have successors, then conservative assume the defs are live-out
- // since we don't want to trust live-in lists.
- if (MBB.succ_empty()) {
+ bool TracksLiveness = MRI->tracksLiveness();
+
+ // If liveness is tracked, we can use the live-in lists to know which
+ // copies aren't dead.
+ if (TracksLiveness)
+ readSuccessorLiveIns(MBB);
+
+ // If MBB doesn't have succesor, delete copies whose defs are not used.
+ // If MBB does have successors, we can only delete copies if we are able to
+ // use liveness information from successors to confirm they are really dead.
+ if (MBB.succ_empty() || TracksLiveness) {
for (MachineInstr *MaybeDead : MaybeDeadCopies) {
LLVM_DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
MaybeDead->dump());
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index fe2f9ccd33a3..83604003a038 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -39,6 +39,7 @@
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
@@ -553,6 +554,22 @@ uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
MIFlags |= MachineInstr::MIFlag::NoSWrap;
if (OB->hasNoUnsignedWrap())
MIFlags |= MachineInstr::MIFlag::NoUWrap;
+ } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
+ if (TI->hasNoSignedWrap())
+ MIFlags |= MachineInstr::MIFlag::NoSWrap;
+ if (TI->hasNoUnsignedWrap())
+ MIFlags |= MachineInstr::MIFlag::NoUWrap;
+ }
+
+ // Copy the nonneg flag.
+ if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
+ if (PNI->hasNonNeg())
+ MIFlags |= MachineInstr::MIFlag::NonNeg;
+ // Copy the disjoint flag.
+ } else if (const PossiblyDisjointInst *PD =
+ dyn_cast<PossiblyDisjointInst>(&I)) {
+ if (PD->isDisjoint())
+ MIFlags |= MachineInstr::MIFlag::Disjoint;
}
// Copy the exact flag.
@@ -1306,6 +1323,7 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
LocationSize WidthB = MMOb->getSize();
bool KnownWidthA = WidthA.hasValue();
bool KnownWidthB = WidthB.hasValue();
+ bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
const Value *ValA = MMOa->getValue();
const Value *ValB = MMOb->getValue();
@@ -1321,12 +1339,14 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
SameVal = true;
}
- if (SameVal) {
+ if (SameVal && BothMMONonScalable) {
if (!KnownWidthA || !KnownWidthB)
return true;
int64_t MaxOffset = std::max(OffsetA, OffsetB);
- LocationSize LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB;
- return (MinOffset + (int)LowWidth.getValue() > MaxOffset);
+ int64_t LowWidth = (MinOffset == OffsetA)
+ ? WidthA.getValue().getKnownMinValue()
+ : WidthB.getValue().getKnownMinValue();
+ return (MinOffset + LowWidth > MaxOffset);
}
if (!AA)
@@ -1338,15 +1358,29 @@ static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, AAResults *AA,
assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
- int64_t OverlapA = KnownWidthA ? WidthA.getValue() + OffsetA - MinOffset
- : MemoryLocation::UnknownSize;
- int64_t OverlapB = KnownWidthB ? WidthB.getValue() + OffsetB - MinOffset
- : MemoryLocation::UnknownSize;
+ // If Scalable Location Size has non-zero offset, Width + Offset does not work
+ // at the moment
+ if ((WidthA.isScalable() && OffsetA > 0) ||
+ (WidthB.isScalable() && OffsetB > 0))
+ return true;
+
+ int64_t OverlapA =
+ KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
+ : MemoryLocation::UnknownSize;
+ int64_t OverlapB =
+ KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
+ : MemoryLocation::UnknownSize;
+
+ LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
+ ? WidthA
+ : LocationSize::precise(OverlapA);
+ LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
+ ? WidthB
+ : LocationSize::precise(OverlapB);
return !AA->isNoAlias(
- MemoryLocation(ValA, OverlapA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
- MemoryLocation(ValB, OverlapB,
- UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
+ MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
}
bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
@@ -1689,6 +1723,10 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "nofpexcept ";
if (getFlag(MachineInstr::NoMerge))
OS << "nomerge ";
+ if (getFlag(MachineInstr::NonNeg))
+ OS << "nneg ";
+ if (getFlag(MachineInstr::Disjoint))
+ OS << "disjoint ";
// Print the opcode name.
if (TII)
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 937ca539513a..ace05902d5df 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1107,12 +1107,13 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags F,
const MDNode *Ranges, SyncScope::ID SSID,
AtomicOrdering Ordering,
AtomicOrdering FailureOrdering)
- : MachineMemOperand(ptrinfo, F,
- !TS.hasValue() || TS.isScalable()
- ? LLT()
- : LLT::scalar(8 * TS.getValue().getKnownMinValue()),
- BaseAlignment, AAInfo, Ranges, SSID, Ordering,
- FailureOrdering) {}
+ : MachineMemOperand(
+ ptrinfo, F,
+ !TS.hasValue() ? LLT()
+ : TS.isScalable()
+ ? LLT::scalable_vector(1, 8 * TS.getValue().getKnownMinValue())
+ : LLT::scalar(8 * TS.getValue().getKnownMinValue()),
+ BaseAlignment, AAInfo, Ranges, SSID, Ordering, FailureOrdering) {}
void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
// The Value and Offset may differ due to CSE. But the flags and size
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 0d5bf3299387..cbeb02a72552 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -85,6 +85,7 @@ namespace MISchedPostRASched {
enum Direction {
TopDown,
BottomUp,
+ Bidirectional,
};
} // end namespace MISchedPostRASched
cl::opt<MISchedPostRASched::Direction> PostRADirection(
@@ -93,10 +94,13 @@ cl::opt<MISchedPostRASched::Direction> PostRADirection(
// Default to top-down because it was implemented first and existing targets
// expect that behavior by default.
cl::init(MISchedPostRASched::TopDown),
- cl::values(clEnumValN(MISchedPostRASched::TopDown, "topdown",
- "Force top-down post reg-alloc list scheduling"),
- clEnumValN(MISchedPostRASched::BottomUp, "bottomup",
- "Force bottom-up post reg-alloc list scheduling")));
+ cl::values(
+ clEnumValN(MISchedPostRASched::TopDown, "topdown",
+ "Force top-down post reg-alloc list scheduling"),
+ clEnumValN(MISchedPostRASched::BottomUp, "bottomup",
+ "Force bottom-up post reg-alloc list scheduling"),
+ clEnumValN(MISchedPostRASched::Bidirectional, "bidirectional",
+ "Force bidirectional post reg-alloc list scheduling")));
cl::opt<bool>
DumpCriticalPathLength("misched-dcpl", cl::Hidden,
cl::desc("Print critical path length to stdout"));
@@ -500,8 +504,10 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
ScheduleDAGMI::DumpDirection D;
if (PostRADirection == MISchedPostRASched::TopDown)
D = ScheduleDAGMI::DumpDirection::TopDown;
- else
+ else if (PostRADirection == MISchedPostRASched::BottomUp)
D = ScheduleDAGMI::DumpDirection::BottomUp;
+ else
+ D = ScheduleDAGMI::DumpDirection::Bidirectional;
Scheduler->setDumpDirection(D);
scheduleRegions(*Scheduler, true);
@@ -3715,7 +3721,7 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
TCand.reset(CandPolicy());
pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
assert(TCand.SU == TopCand.SU &&
- "Last pick result should correspond to re-picking right now");
+ "Last pick result should correspond to re-picking right now");
}
#endif
}
@@ -3891,6 +3897,9 @@ void PostGenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
} else if (PostRADirection == MISchedPostRASched::BottomUp) {
RegionPolicy.OnlyTopDown = false;
RegionPolicy.OnlyBottomUp = true;
+ } else if (PostRADirection == MISchedPostRASched::Bidirectional) {
+ RegionPolicy.OnlyBottomUp = false;
+ RegionPolicy.OnlyTopDown = false;
}
}
@@ -3970,6 +3979,87 @@ void PostGenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
}
}
+/// Pick the best candidate node from either the top or bottom queue.
+SUnit *PostGenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
+ // FIXME: This is similiar to GenericScheduler::pickNodeBidirectional. Factor
+ // out common parts.
+
+ // Schedule as far as possible in the direction of no choice. This is most
+ // efficient, but also provides the best heuristics for CriticalPSets.
+ if (SUnit *SU = Bot.pickOnlyChoice()) {
+ IsTopNode = false;
+ tracePick(Only1, false);
+ return SU;
+ }
+ if (SUnit *SU = Top.pickOnlyChoice()) {
+ IsTopNode = true;
+ tracePick(Only1, true);
+ return SU;
+ }
+ // Set the bottom-up policy based on the state of the current bottom zone and
+ // the instructions outside the zone, including the top zone.
+ CandPolicy BotPolicy;
+ setPolicy(BotPolicy, /*IsPostRA=*/true, Bot, &Top);
+ // Set the top-down policy based on the state of the current top zone and
+ // the instructions outside the zone, including the bottom zone.
+ CandPolicy TopPolicy;
+ setPolicy(TopPolicy, /*IsPostRA=*/true, Top, &Bot);
+
+ // See if BotCand is still valid (because we previously scheduled from Top).
+ LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
+ if (!BotCand.isValid() || BotCand.SU->isScheduled ||
+ BotCand.Policy != BotPolicy) {
+ BotCand.reset(CandPolicy());
+ pickNodeFromQueue(Bot, BotCand);
+ assert(BotCand.Reason != NoCand && "failed to find the first candidate");
+ } else {
+ LLVM_DEBUG(traceCandidate(BotCand));
+#ifndef NDEBUG
+ if (VerifyScheduling) {
+ SchedCandidate TCand;
+ TCand.reset(CandPolicy());
+ pickNodeFromQueue(Bot, BotCand);
+ assert(TCand.SU == BotCand.SU &&
+ "Last pick result should correspond to re-picking right now");
+ }
+#endif
+ }
+
+ // Check if the top Q has a better candidate.
+ LLVM_DEBUG(dbgs() << "Picking from Top:\n");
+ if (!TopCand.isValid() || TopCand.SU->isScheduled ||
+ TopCand.Policy != TopPolicy) {
+ TopCand.reset(CandPolicy());
+ pickNodeFromQueue(Top, TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find the first candidate");
+ } else {
+ LLVM_DEBUG(traceCandidate(TopCand));
+#ifndef NDEBUG
+ if (VerifyScheduling) {
+ SchedCandidate TCand;
+ TCand.reset(CandPolicy());
+ pickNodeFromQueue(Top, TopCand);
+ assert(TCand.SU == TopCand.SU &&
+ "Last pick result should correspond to re-picking right now");
+ }
+#endif
+ }
+
+ // Pick best from BotCand and TopCand.
+ assert(BotCand.isValid());
+ assert(TopCand.isValid());
+ SchedCandidate Cand = BotCand;
+ TopCand.Reason = NoCand;
+ if (tryCandidate(Cand, TopCand)) {
+ Cand.setBest(TopCand);
+ LLVM_DEBUG(traceCandidate(Cand));
+ }
+
+ IsTopNode = Cand.AtTop;
+ tracePick(Cand);
+ return Cand.SU;
+}
+
/// Pick the next node to schedule.
SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
if (DAG->top() == DAG->bottom()) {
@@ -3980,13 +4070,12 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
SUnit *SU;
do {
if (RegionPolicy.OnlyBottomUp) {
- assert(!RegionPolicy.OnlyTopDown);
SU = Bot.pickOnlyChoice();
if (SU) {
tracePick(Only1, true);
} else {
CandPolicy NoPolicy;
- SchedCandidate BotCand(NoPolicy);
+ BotCand.reset(NoPolicy);
// Set the bottom-up policy based on the state of the current bottom
// zone and the instructions outside the zone, including the top zone.
setPolicy(BotCand.Policy, /*IsPostRA=*/true, Bot, nullptr);
@@ -3996,15 +4085,13 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
SU = BotCand.SU;
}
IsTopNode = false;
- } else {
-
- assert(RegionPolicy.OnlyTopDown);
+ } else if (RegionPolicy.OnlyTopDown) {
SU = Top.pickOnlyChoice();
if (SU) {
tracePick(Only1, true);
} else {
CandPolicy NoPolicy;
- SchedCandidate TopCand(NoPolicy);
+ TopCand.reset(NoPolicy);
// Set the top-down policy based on the state of the current top zone
// and the instructions outside the zone, including the bottom zone.
setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
@@ -4014,6 +4101,8 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
SU = TopCand.SU;
}
IsTopNode = true;
+ } else {
+ SU = pickNodeBidirectional(IsTopNode);
}
} while (SU->isScheduled);
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index c69d36fc7fdd..e4e05ce9278c 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1506,7 +1506,8 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
if ((DstTy.isVector() != SrcTy.isVector()) ||
- (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
+ (DstTy.isVector() &&
+ DstTy.getElementCount() != SrcTy.getElementCount()))
report("Generic vector icmp/fcmp must preserve number of lanes", MI);
break;
@@ -1867,6 +1868,17 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
+ case TargetOpcode::G_UBSANTRAP: {
+ const MachineOperand &KindOp = MI->getOperand(0);
+ if (!MI->getOperand(0).isImm()) {
+ report("Crash kind must be an immediate", &KindOp, 0);
+ break;
+ }
+ int64_t Kind = MI->getOperand(0).getImm();
+ if (!isInt<8>(Kind))
+ report("Crash kind must be 8 bit wide", &KindOp, 0);
+ break;
+ }
case TargetOpcode::G_VECREDUCE_SEQ_FADD:
case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
@@ -3697,6 +3709,9 @@ void MachineVerifier::verifyStackFrame() {
if (I.getOpcode() == FrameSetupOpcode) {
if (BBState.ExitIsSetup)
report("FrameSetup is after another FrameSetup", &I);
+ if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
+ report("AdjustsStack not set in presence of a frame pseudo "
+ "instruction.", &I);
BBState.ExitValue -= TII->getFrameTotalSize(I);
BBState.ExitIsSetup = true;
}
@@ -3712,6 +3727,9 @@ void MachineVerifier::verifyStackFrame() {
errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
<< AbsSPAdj << ">.\n";
}
+ if (!MRI->isSSA() && !MF->getFrameInfo().adjustsStack())
+ report("AdjustsStack not set in presence of a frame pseudo "
+ "instruction.", &I);
BBState.ExitValue += Size;
BBState.ExitIsSetup = false;
}
diff --git a/llvm/lib/CodeGen/RegisterBankInfo.cpp b/llvm/lib/CodeGen/RegisterBankInfo.cpp
index 5548430d1b0a..72b07eb1902d 100644
--- a/llvm/lib/CodeGen/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterBankInfo.cpp
@@ -484,9 +484,10 @@ void RegisterBankInfo::applyDefaultMapping(const OperandsMapper &OpdMapper) {
// the storage. However, right now we don't necessarily bump all
// the types to storage size. For instance, we can consider
// s16 G_AND legal whereas the storage size is going to be 32.
- assert(OrigTy.getSizeInBits() <= NewTy.getSizeInBits() &&
- "Types with difference size cannot be handled by the default "
- "mapping");
+ assert(
+ TypeSize::isKnownLE(OrigTy.getSizeInBits(), NewTy.getSizeInBits()) &&
+ "Types with difference size cannot be handled by the default "
+ "mapping");
LLVM_DEBUG(dbgs() << "\nChange type of new opd from " << NewTy << " to "
<< OrigTy);
MRI.setType(NewReg, OrigTy);
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index f199625bf67a..2f46b23a97c6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -609,6 +609,9 @@ namespace {
SDValue &CC, bool MatchStrict = false) const;
bool isOneUseSetCC(SDValue N) const;
+ SDValue foldAddToAvg(SDNode *N, const SDLoc &DL);
+ SDValue foldSubToAvg(SDNode *N, const SDLoc &DL);
+
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
@@ -2490,7 +2493,8 @@ SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
return SelectOp;
}
-static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
+static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, const SDLoc &DL,
+ SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Expecting add or sub");
@@ -2522,33 +2526,37 @@ static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
// add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1))
// sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1))
EVT VT = C.getValueType();
- SDLoc DL(N);
SDValue LowBit = DAG.getZExtOrTrunc(SetCC.getOperand(0), DL, VT);
SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) :
DAG.getConstant(CN->getAPIntValue() - 1, DL, VT);
return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit);
}
-// Attempt to form avgceilu(A, B) from (A | B) - ((A ^ B) >> 1)
-static SDValue combineFixedwidthToAVGCEILU(SDNode *N, SelectionDAG &DAG) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+// Attempt to form avgceil(A, B) from (A | B) - ((A ^ B) >> 1)
+SDValue DAGCombiner::foldSubToAvg(SDNode *N, const SDLoc &DL) {
SDValue N0 = N->getOperand(0);
EVT VT = N0.getValueType();
- SDLoc DL(N);
- if (TLI.isOperationLegal(ISD::AVGCEILU, VT)) {
- SDValue A, B;
- if (sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)),
- m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
- m_SpecificInt(1))))) {
- return DAG.getNode(ISD::AVGCEILU, DL, VT, A, B);
- }
+ SDValue A, B;
+
+ if (hasOperation(ISD::AVGCEILU, VT) &&
+ sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)),
+ m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
+ m_SpecificInt(1))))) {
+ return DAG.getNode(ISD::AVGCEILU, DL, VT, A, B);
+ }
+ if (hasOperation(ISD::AVGCEILS, VT) &&
+ sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)),
+ m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)),
+ m_SpecificInt(1))))) {
+ return DAG.getNode(ISD::AVGCEILS, DL, VT, A, B);
}
return SDValue();
}
/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
/// a shift and add with a different constant.
-static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
+static SDValue foldAddSubOfSignBit(SDNode *N, const SDLoc &DL,
+ SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Expecting add or sub");
@@ -2576,7 +2584,6 @@ static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
// Eliminate the 'not' by adjusting the shift and add/sub constant:
// add (srl (not X), 31), C --> add (sra X, 31), (C + 1)
// sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1)
- SDLoc DL(N);
if (SDValue NewC = DAG.FoldConstantArithmetic(
IsAdd ? ISD::ADD : ISD::SUB, DL, VT,
{ConstantOp, DAG.getConstant(1, DL, VT)})) {
@@ -2837,20 +2844,25 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
return SDValue();
}
-// Attempt to form avgflooru(A, B) from (A & B) + ((A ^ B) >> 1)
-static SDValue combineFixedwidthToAVGFLOORU(SDNode *N, SelectionDAG &DAG) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+// Attempt to form avgfloor(A, B) from (A & B) + ((A ^ B) >> 1)
+SDValue DAGCombiner::foldAddToAvg(SDNode *N, const SDLoc &DL) {
SDValue N0 = N->getOperand(0);
EVT VT = N0.getValueType();
- SDLoc DL(N);
- if (TLI.isOperationLegal(ISD::AVGFLOORU, VT)) {
- SDValue A, B;
- if (sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)),
- m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
- m_SpecificInt(1))))) {
- return DAG.getNode(ISD::AVGFLOORU, DL, VT, A, B);
- }
+ SDValue A, B;
+
+ if (hasOperation(ISD::AVGFLOORU, VT) &&
+ sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)),
+ m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
+ m_SpecificInt(1))))) {
+ return DAG.getNode(ISD::AVGFLOORU, DL, VT, A, B);
}
+ if (hasOperation(ISD::AVGFLOORS, VT) &&
+ sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)),
+ m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)),
+ m_SpecificInt(1))))) {
+ return DAG.getNode(ISD::AVGFLOORS, DL, VT, A, B);
+ }
+
return SDValue();
}
@@ -2863,20 +2875,23 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (SDValue Combined = visitADDLike(N))
return Combined;
- if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
+ if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG))
return V;
- if (SDValue V = foldAddSubOfSignBit(N, DAG))
+ if (SDValue V = foldAddSubOfSignBit(N, DL, DAG))
return V;
- // Try to match AVGFLOORU fixedwidth pattern
- if (SDValue V = combineFixedwidthToAVGFLOORU(N, DAG))
+ // Try to match AVGFLOOR fixedwidth pattern
+ if (SDValue V = foldAddToAvg(N, DL))
return V;
// fold (a+b) -> (a|b) iff a and b share no bits.
if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
- DAG.haveNoCommonBitsSet(N0, N1))
- return DAG.getNode(ISD::OR, DL, VT, N0, N1);
+ DAG.haveNoCommonBitsSet(N0, N1)) {
+ SDNodeFlags Flags;
+ Flags.setDisjoint(true);
+ return DAG.getNode(ISD::OR, DL, VT, N0, N1, Flags);
+ }
// Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
if (N0.getOpcode() == ISD::VSCALE && N1.getOpcode() == ISD::VSCALE) {
@@ -3473,6 +3488,11 @@ static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI,
return SDValue();
if (Opcode != ISD::UADDO && Opcode != ISD::USUBO)
return SDValue();
+ // Guarantee identical type of CarryOut
+ EVT CarryOutType = N->getValueType(0);
+ if (CarryOutType != Carry0.getValue(1).getValueType() ||
+ CarryOutType != Carry1.getValue(1).getValueType())
+ return SDValue();
// Canonicalize the add/sub of A and B (the top node in the above ASCII art)
// as Carry0 and the add/sub of the carry in as Carry1 (the middle node).
@@ -3520,7 +3540,7 @@ static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI,
// TODO: match other operations that can merge flags (ADD, etc)
DAG.ReplaceAllUsesOfValueWith(Carry1.getValue(0), Merged.getValue(0));
if (N->getOpcode() == ISD::AND)
- return DAG.getConstant(0, DL, MVT::i1);
+ return DAG.getConstant(0, DL, CarryOutType);
return Merged.getValue(1);
}
@@ -3695,6 +3715,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
+ unsigned BitWidth = VT.getScalarSizeInBits();
SDLoc DL(N);
auto PeekThroughFreeze = [](SDValue N) {
@@ -3725,16 +3746,12 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
- ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
-
// fold (sub x, c) -> (add x, -c)
- if (N1C) {
+ if (ConstantSDNode *N1C = getAsNonOpaqueConstant(N1))
return DAG.getNode(ISD::ADD, DL, VT, N0,
DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
- }
if (isNullOrNullSplat(N0)) {
- unsigned BitWidth = VT.getScalarSizeInBits();
// Right-shifting everything out but the sign bit followed by negation is
// the same as flipping arithmetic/logical shift type without the negation:
// -(X >>u 31) -> (X >>s 31)
@@ -3860,17 +3877,17 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (N1.isUndef())
return N1;
- if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
+ if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG))
return V;
- if (SDValue V = foldAddSubOfSignBit(N, DAG))
+ if (SDValue V = foldAddSubOfSignBit(N, DL, DAG))
return V;
- // Try to match AVGCEILU fixedwidth pattern
- if (SDValue V = combineFixedwidthToAVGCEILU(N, DAG))
+ // Try to match AVGCEIL fixedwidth pattern
+ if (SDValue V = foldSubToAvg(N, DL))
return V;
- if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, SDLoc(N)))
+ if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, DL))
return V;
if (SDValue V = foldSubToUSubSat(VT, N, DL))
@@ -3934,8 +3951,8 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue S0 = N1.getOperand(0);
if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0))
if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
- if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
- return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
+ if (C->getAPIntValue() == (BitWidth - 1))
+ return DAG.getNode(ISD::ABS, DL, VT, S0);
}
}
@@ -3977,8 +3994,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) {
SDValue ShAmt = N1.getOperand(1);
ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
- if (ShAmtC &&
- ShAmtC->getAPIntValue() == (N1.getScalarValueSizeInBits() - 1)) {
+ if (ShAmtC && ShAmtC->getAPIntValue() == (BitWidth - 1)) {
SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0), ShAmt);
return DAG.getNode(ISD::ADD, DL, VT, N0, SRA);
}
@@ -3989,7 +4005,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// N0 - (X << BW-1) --> N0 + (X << BW-1)
if (N1.getOpcode() == ISD::SHL) {
ConstantSDNode *ShlC = isConstOrConstSplat(N1.getOperand(1));
- if (ShlC && ShlC->getAPIntValue() == VT.getScalarSizeInBits() - 1)
+ if (ShlC && ShlC->getAPIntValue() == (BitWidth - 1))
return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
}
@@ -4022,23 +4038,17 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
}
}
- // max(a,b) - min(a,b) --> abd(a,b)
- auto MatchSubMaxMin = [&](unsigned Max, unsigned Min, unsigned Abd) {
- if (N0.getOpcode() != Max || N1.getOpcode() != Min)
- return SDValue();
- if ((N0.getOperand(0) != N1.getOperand(0) ||
- N0.getOperand(1) != N1.getOperand(1)) &&
- (N0.getOperand(0) != N1.getOperand(1) ||
- N0.getOperand(1) != N1.getOperand(0)))
- return SDValue();
- if (!hasOperation(Abd, VT))
- return SDValue();
- return DAG.getNode(Abd, DL, VT, N0.getOperand(0), N0.getOperand(1));
- };
- if (SDValue R = MatchSubMaxMin(ISD::SMAX, ISD::SMIN, ISD::ABDS))
- return R;
- if (SDValue R = MatchSubMaxMin(ISD::UMAX, ISD::UMIN, ISD::ABDU))
- return R;
+ // smax(a,b) - smin(a,b) --> abds(a,b)
+ if (hasOperation(ISD::ABDS, VT) &&
+ sd_match(N0, m_SMax(m_Value(A), m_Value(B))) &&
+ sd_match(N1, m_SMin(m_Specific(A), m_Specific(B))))
+ return DAG.getNode(ISD::ABDS, DL, VT, A, B);
+
+ // umax(a,b) - umin(a,b) --> abdu(a,b)
+ if (hasOperation(ISD::ABDU, VT) &&
+ sd_match(N0, m_UMax(m_Value(A), m_Value(B))) &&
+ sd_match(N1, m_UMin(m_Specific(A), m_Specific(B))))
+ return DAG.getNode(ISD::ABDU, DL, VT, A, B);
return SDValue();
}
@@ -4131,13 +4141,11 @@ SDValue DAGCombiner::visitSUBO(SDNode *N) {
return CombineTo(N, DAG.getConstant(0, DL, VT),
DAG.getConstant(0, DL, CarryVT));
- ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
-
// fold (subox, c) -> (addo x, -c)
- if (IsSigned && N1C && !N1C->isMinSignedValue()) {
- return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0,
- DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
- }
+ if (ConstantSDNode *N1C = getAsNonOpaqueConstant(N1))
+ if (IsSigned && !N1C->isMinSignedValue())
+ return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0,
+ DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
// fold (subo x, 0) -> x + no borrow
if (isNullOrNullSplat(N1))
@@ -9284,8 +9292,11 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
// fold (a^b) -> (a|b) iff a and b share no bits.
if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
- DAG.haveNoCommonBitsSet(N0, N1))
- return DAG.getNode(ISD::OR, DL, VT, N0, N1);
+ DAG.haveNoCommonBitsSet(N0, N1)) {
+ SDNodeFlags Flags;
+ Flags.setDisjoint(true);
+ return DAG.getNode(ISD::OR, DL, VT, N0, N1, Flags);
+ }
// look for 'add-like' folds:
// XOR(N0,MIN_SIGNED_VALUE) == ADD(N0,MIN_SIGNED_VALUE)
@@ -13138,6 +13149,37 @@ tryToFoldExtOfMaskedLoad(SelectionDAG &DAG, const TargetLowering &TLI, EVT VT,
return NewLoad;
}
+// fold ([s|z]ext (atomic_load)) -> ([s|z]ext (truncate ([s|z]ext atomic_load)))
+static SDValue tryToFoldExtOfAtomicLoad(SelectionDAG &DAG,
+ const TargetLowering &TLI, EVT VT,
+ SDValue N0,
+ ISD::LoadExtType ExtLoadType) {
+ auto *ALoad = dyn_cast<AtomicSDNode>(N0);
+ if (!ALoad || ALoad->getOpcode() != ISD::ATOMIC_LOAD)
+ return {};
+ EVT MemoryVT = ALoad->getMemoryVT();
+ if (!TLI.isAtomicLoadExtLegal(ExtLoadType, VT, MemoryVT))
+ return {};
+ // Can't fold into ALoad if it is already extending differently.
+ ISD::LoadExtType ALoadExtTy = ALoad->getExtensionType();
+ if ((ALoadExtTy == ISD::ZEXTLOAD && ExtLoadType == ISD::SEXTLOAD) ||
+ (ALoadExtTy == ISD::SEXTLOAD && ExtLoadType == ISD::ZEXTLOAD))
+ return {};
+
+ EVT OrigVT = ALoad->getValueType(0);
+ assert(OrigVT.getSizeInBits() < VT.getSizeInBits() && "VT should be wider.");
+ auto *NewALoad = cast<AtomicSDNode>(DAG.getAtomic(
+ ISD::ATOMIC_LOAD, SDLoc(ALoad), MemoryVT, VT, ALoad->getChain(),
+ ALoad->getBasePtr(), ALoad->getMemOperand()));
+ NewALoad->setExtensionType(ExtLoadType);
+ DAG.ReplaceAllUsesOfValueWith(
+ SDValue(ALoad, 0),
+ DAG.getNode(ISD::TRUNCATE, SDLoc(ALoad), OrigVT, SDValue(NewALoad, 0)));
+ // Update the chain uses.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(ALoad, 1), SDValue(NewALoad, 1));
+ return SDValue(NewALoad, 0);
+}
+
static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
bool LegalOperations) {
assert((N->getOpcode() == ISD::SIGN_EXTEND ||
@@ -13409,6 +13451,11 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::SEXTLOAD))
return foldedExt;
+ // Try to simplify (sext (atomic_load x)).
+ if (SDValue foldedExt =
+ tryToFoldExtOfAtomicLoad(DAG, TLI, VT, N0, ISD::SEXTLOAD))
+ return foldedExt;
+
// fold (sext (and/or/xor (load x), cst)) ->
// (and/or/xor (sextload x), (sext cst))
if (ISD::isBitwiseLogicOp(N0.getOpcode()) &&
@@ -13720,6 +13767,11 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (SDValue ExtLoad = CombineExtLoad(N))
return ExtLoad;
+ // Try to simplify (zext (atomic_load x)).
+ if (SDValue foldedExt =
+ tryToFoldExtOfAtomicLoad(DAG, TLI, VT, N0, ISD::ZEXTLOAD))
+ return foldedExt;
+
// fold (zext (and/or/xor (load x), cst)) ->
// (and/or/xor (zextload x), (zext cst))
// Unless (and (load x) cst) will match as a zextload already and has
@@ -13844,11 +13896,20 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::SHL) {
// If the original shl may be shifting out bits, do not perform this
// transformation.
- // TODO: Add MaskedValueIsZero check.
unsigned KnownZeroBits = ShVal.getValueSizeInBits() -
ShVal.getOperand(0).getValueSizeInBits();
- if (ShAmtC->getAPIntValue().ugt(KnownZeroBits))
- return SDValue();
+ if (ShAmtC->getAPIntValue().ugt(KnownZeroBits)) {
+ // If the shift is too large, then see if we can deduce that the
+ // shift is safe anyway.
+ // Create a mask that has ones for the bits being shifted out.
+ APInt ShiftOutMask =
+ APInt::getHighBitsSet(ShVal.getValueSizeInBits(),
+ ShAmtC->getAPIntValue().getZExtValue());
+
+ // Check if the bits being shifted out are known to be zero.
+ if (!DAG.MaskedValueIsZero(ShVal, ShiftOutMask))
+ return SDValue();
+ }
}
// Ensure that the shift amount is wide enough for the shifted value.
@@ -23450,9 +23511,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
SmallVector<SDValue, 8> Ops;
-
EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
- SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
// Keep track of what we encounter.
bool AnyInteger = false;
@@ -23462,7 +23521,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
!Op.getOperand(0).getValueType().isVector())
Ops.push_back(Op.getOperand(0));
else if (ISD::UNDEF == Op.getOpcode())
- Ops.push_back(ScalarUndef);
+ Ops.push_back(DAG.getNode(ISD::UNDEF, DL, SVT));
else
return SDValue();
@@ -23482,13 +23541,12 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
- ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
if (AnyInteger) {
for (SDValue &Op : Ops) {
if (Op.getValueType() == SVT)
continue;
if (Op.isUndef())
- Op = ScalarUndef;
+ Op = DAG.getNode(ISD::UNDEF, DL, SVT);
else
Op = DAG.getBitcast(SVT, Op);
}
@@ -24201,7 +24259,7 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
// TODO: Use "BaseIndexOffset" to make this more effective.
SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL);
- LocationSize StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
+ LocationSize StoreSize = LocationSize::precise(VT.getStoreSize());
MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO;
if (Offset.isScalable()) {
@@ -26121,6 +26179,13 @@ SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
}
}
+ // Handle case where we've ended up inserting back into the source vector
+ // we extracted the subvector from.
+ // insert_subvector(N0, extract_subvector(N0, N2), N2) --> N0
+ if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && N1.getOperand(0) == N0 &&
+ N1.getOperand(1) == N2)
+ return N0;
+
// Simplify scalar inserts into an undef vector:
// insert_subvector undef, (splat X), N2 -> splat X
if (N0.isUndef() && N1.getOpcode() == ISD::SPLAT_VECTOR)
@@ -27846,14 +27911,10 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
: (LSN->getAddressingMode() == ISD::PRE_DEC)
? -1 * C->getSExtValue()
: 0;
- LocationSize Size =
- MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize());
- return {LSN->isVolatile(),
- LSN->isAtomic(),
- LSN->getBasePtr(),
- Offset /*base offset*/,
- Size,
- LSN->getMemOperand()};
+ TypeSize Size = LSN->getMemoryVT().getStoreSize();
+ return {LSN->isVolatile(), LSN->isAtomic(),
+ LSN->getBasePtr(), Offset /*base offset*/,
+ LocationSize::precise(Size), LSN->getMemOperand()};
}
if (const auto *LN = cast<LifetimeSDNode>(N))
return {false /*isVolatile*/,
@@ -27895,6 +27956,13 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
return false;
}
+ // If NumBytes is scalable and offset is not 0, conservatively return may
+ // alias
+ if ((MUC0.NumBytes.hasValue() && MUC0.NumBytes.isScalable() &&
+ MUC0.Offset != 0) ||
+ (MUC1.NumBytes.hasValue() && MUC1.NumBytes.isScalable() &&
+ MUC1.Offset != 0))
+ return true;
// Try to prove that there is aliasing, or that there is no aliasing. Either
// way, we can return now. If nothing can be proved, proceed with more tests.
bool IsAlias;
@@ -27925,18 +27993,22 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
Align OrigAlignment1 = MUC1.MMO->getBaseAlign();
LocationSize Size0 = MUC0.NumBytes;
LocationSize Size1 = MUC1.NumBytes;
+
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
- Size0.hasValue() && Size1.hasValue() && Size0 == Size1 &&
- OrigAlignment0 > Size0.getValue() &&
- SrcValOffset0 % Size0.getValue() == 0 &&
- SrcValOffset1 % Size1.getValue() == 0) {
+ Size0.hasValue() && Size1.hasValue() && !Size0.isScalable() &&
+ !Size1.isScalable() && Size0 == Size1 &&
+ OrigAlignment0 > Size0.getValue().getKnownMinValue() &&
+ SrcValOffset0 % Size0.getValue().getKnownMinValue() == 0 &&
+ SrcValOffset1 % Size1.getValue().getKnownMinValue() == 0) {
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();
// There is no overlap between these relatively aligned accesses of
// similar size. Return no alias.
- if ((OffAlign0 + (int64_t)Size0.getValue()) <= OffAlign1 ||
- (OffAlign1 + (int64_t)Size1.getValue()) <= OffAlign0)
+ if ((OffAlign0 + static_cast<int64_t>(
+ Size0.getValue().getKnownMinValue())) <= OffAlign1 ||
+ (OffAlign1 + static_cast<int64_t>(
+ Size1.getValue().getKnownMinValue())) <= OffAlign0)
return false;
}
@@ -27953,12 +28025,18 @@ bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {
Size0.hasValue() && Size1.hasValue()) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
- int64_t Overlap0 = Size0.getValue() + SrcValOffset0 - MinOffset;
- int64_t Overlap1 = Size1.getValue() + SrcValOffset1 - MinOffset;
+ int64_t Overlap0 =
+ Size0.getValue().getKnownMinValue() + SrcValOffset0 - MinOffset;
+ int64_t Overlap1 =
+ Size1.getValue().getKnownMinValue() + SrcValOffset1 - MinOffset;
+ LocationSize Loc0 =
+ Size0.isScalable() ? Size0 : LocationSize::precise(Overlap0);
+ LocationSize Loc1 =
+ Size1.isScalable() ? Size1 : LocationSize::precise(Overlap1);
if (AA->isNoAlias(
- MemoryLocation(MUC0.MMO->getValue(), Overlap0,
+ MemoryLocation(MUC0.MMO->getValue(), Loc0,
UseTBAA ? MUC0.MMO->getAAInfo() : AAMDNodes()),
- MemoryLocation(MUC1.MMO->getValue(), Overlap1,
+ MemoryLocation(MUC1.MMO->getValue(), Loc1,
UseTBAA ? MUC1.MMO->getAAInfo() : AAMDNodes())))
return false;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index ce5a7ea09d47..dae827883b29 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -752,17 +752,25 @@ FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
}
bool FastISel::selectPatchpoint(const CallInst *I) {
- // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
- // i32 <numBytes>,
- // i8* <target>,
- // i32 <numArgs>,
- // [Args...],
- // [live variables...])
+ // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
+ // i32 <numBytes>,
+ // i8* <target>,
+ // i32 <numArgs>,
+ // [Args...],
+ // [live variables...])
CallingConv::ID CC = I->getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !I->getType()->isVoidTy();
Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
+ // Check if we can lower the return type when using anyregcc.
+ MVT ValueType;
+ if (IsAnyRegCC && HasDef) {
+ ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
+ if (ValueType == MVT::Other)
+ return false;
+ }
+
// Get the real number of arguments participating in the call <numArgs>
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
"Expected a constant integer.");
@@ -790,7 +798,8 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
// Add an explicit result reg if we use the anyreg calling convention.
if (IsAnyRegCC && HasDef) {
assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
- CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
+ assert(ValueType.isValid());
+ CLI.ResultReg = createResultReg(TLI.getRegClassFor(ValueType));
CLI.NumResultRegs = 1;
Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
}
@@ -1468,7 +1477,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
case Intrinsic::experimental_stackmap:
return selectStackmap(II);
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
return selectPatchpoint(II);
case Intrinsic::xray_customevent:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 808e3c622033..24f69ea1b742 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -118,14 +118,7 @@ private:
void LegalizeLoadOps(SDNode *Node);
void LegalizeStoreOps(SDNode *Node);
- /// Some targets cannot handle a variable
- /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
- /// is necessary to spill the vector being inserted into to memory, perform
- /// the insert there, and then read the result back.
- SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
- const SDLoc &dl);
- SDValue ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx,
- const SDLoc &dl);
+ SDValue ExpandINSERT_VECTOR_ELT(SDValue Op);
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
@@ -378,49 +371,12 @@ SDValue SelectionDAGLegalize::ExpandConstant(ConstantSDNode *CP) {
return Result;
}
-/// Some target cannot handle a variable insertion index for the
-/// INSERT_VECTOR_ELT instruction. In this case, it
-/// is necessary to spill the vector being inserted into to memory, perform
-/// the insert there, and then read the result back.
-SDValue SelectionDAGLegalize::PerformInsertVectorEltInMemory(SDValue Vec,
- SDValue Val,
- SDValue Idx,
- const SDLoc &dl) {
- SDValue Tmp1 = Vec;
- SDValue Tmp2 = Val;
- SDValue Tmp3 = Idx;
-
- // If the target doesn't support this, we have to spill the input vector
- // to a temporary stack slot, update the element, then reload it. This is
- // badness. We could also load the value into a vector register (either
- // with a "move to register" or "extload into register" instruction, then
- // permute it into place, if the idx is a constant and if the idx is
- // supported by the target.
- EVT VT = Tmp1.getValueType();
- EVT EltVT = VT.getVectorElementType();
- SDValue StackPtr = DAG.CreateStackTemporary(VT);
-
- int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
-
- // Store the vector.
- SDValue Ch = DAG.getStore(
- DAG.getEntryNode(), dl, Tmp1, StackPtr,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI));
-
- SDValue StackPtr2 = TLI.getVectorElementPointer(DAG, StackPtr, VT, Tmp3);
-
- // Store the scalar value.
- Ch = DAG.getTruncStore(
- Ch, dl, Tmp2, StackPtr2,
- MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), EltVT);
- // Load the updated vector.
- return DAG.getLoad(VT, dl, Ch, StackPtr, MachinePointerInfo::getFixedStack(
- DAG.getMachineFunction(), SPFI));
-}
+SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Op) {
+ SDValue Vec = Op.getOperand(0);
+ SDValue Val = Op.getOperand(1);
+ SDValue Idx = Op.getOperand(2);
+ SDLoc dl(Op);
-SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
- SDValue Idx,
- const SDLoc &dl) {
if (ConstantSDNode *InsertPos = dyn_cast<ConstantSDNode>(Idx)) {
// SCALAR_TO_VECTOR requires that the type of the value being inserted
// match the element type of the vector being created, except for
@@ -442,7 +398,7 @@ SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val,
return DAG.getVectorShuffle(Vec.getValueType(), dl, Vec, ScVec, ShufOps);
}
}
- return PerformInsertVectorEltInMemory(Vec, Val, Idx, dl);
+ return ExpandInsertToVectorThroughStack(Op);
}
SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
@@ -1490,7 +1446,7 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
// Store the value to a temporary stack slot, then LOAD the returned part.
EVT VecVT = Vec.getValueType();
- EVT SubVecVT = Part.getValueType();
+ EVT PartVT = Part.getValueType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo =
@@ -1499,14 +1455,28 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
// First store the whole vector.
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo);
+ // Freeze the index so we don't poison the clamping code we're about to emit.
+ Idx = DAG.getFreeze(Idx);
+
// Then store the inserted part.
- SDValue SubStackPtr =
- TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, SubVecVT, Idx);
+ if (PartVT.isVector()) {
+ SDValue SubStackPtr =
+ TLI.getVectorSubVecPointer(DAG, StackPtr, VecVT, PartVT, Idx);
+
+ // Store the subvector.
+ Ch = DAG.getStore(
+ Ch, dl, Part, SubStackPtr,
+ MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
+ } else {
+ SDValue SubStackPtr =
+ TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
- // Store the subvector.
- Ch = DAG.getStore(
- Ch, dl, Part, SubStackPtr,
- MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
+ // Store the scalar value.
+ Ch = DAG.getTruncStore(
+ Ch, dl, Part, SubStackPtr,
+ MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
+ VecVT.getVectorElementType());
+ }
// Finally, load the updated vector.
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo);
@@ -3420,9 +3390,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
Results.push_back(ExpandSCALAR_TO_VECTOR(Node));
break;
case ISD::INSERT_VECTOR_ELT:
- Results.push_back(ExpandINSERT_VECTOR_ELT(Node->getOperand(0),
- Node->getOperand(1),
- Node->getOperand(2), dl));
+ Results.push_back(ExpandINSERT_VECTOR_ELT(SDValue(Node, 0)));
break;
case ISD::VECTOR_SHUFFLE: {
SmallVector<int, 32> NewMask;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9d73a42df2a4..e8d1ac1d3a91 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2031,7 +2031,8 @@ SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
return getStepVector(DL, ResVT, One);
}
-SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) {
+SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT,
+ const APInt &StepVal) {
assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
if (ResVT.isScalableVector())
return getNode(
@@ -5042,8 +5043,9 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
// If Op can't create undef/poison and none of its operands are undef/poison
// then Op is never undef/poison.
- // NOTE: TargetNodes should handle this in themselves in
- // isGuaranteedNotToBeUndefOrPoisonForTargetNode.
+ // NOTE: TargetNodes can handle this in themselves in
+ // isGuaranteedNotToBeUndefOrPoisonForTargetNode or let
+ // TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode handle it.
return !canCreateUndefOrPoison(Op, PoisonOnly, /*ConsiderFlags*/ true,
Depth) &&
all_of(Op->ops(), [&](SDValue V) {
@@ -5360,10 +5362,44 @@ bool SelectionDAG::isKnownNeverZero(SDValue Op, unsigned Depth) const {
return isKnownNeverZero(Op.getOperand(1), Depth + 1) ||
isKnownNeverZero(Op.getOperand(0), Depth + 1);
- // TODO for smin/smax: If either operand is known negative/positive
+ // For smin/smax: If either operand is known negative/positive
// respectively we don't need the other to be known at all.
- case ISD::SMAX:
- case ISD::SMIN:
+ case ISD::SMAX: {
+ KnownBits Op1 = computeKnownBits(Op.getOperand(1), Depth + 1);
+ if (Op1.isStrictlyPositive())
+ return true;
+
+ KnownBits Op0 = computeKnownBits(Op.getOperand(0), Depth + 1);
+ if (Op0.isStrictlyPositive())
+ return true;
+
+ if (Op1.isNonZero() && Op0.isNonZero())
+ return true;
+
+ if (KnownBits::smax(Op0, Op1).isNonZero())
+ return true;
+
+ return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
+ isKnownNeverZero(Op.getOperand(0), Depth + 1);
+ }
+ case ISD::SMIN: {
+ KnownBits Op1 = computeKnownBits(Op.getOperand(1), Depth + 1);
+ if (Op1.isNegative())
+ return true;
+
+ KnownBits Op0 = computeKnownBits(Op.getOperand(0), Depth + 1);
+ if (Op0.isNegative())
+ return true;
+
+ if (Op1.isNonZero() && Op0.isNonZero())
+ return true;
+
+ if (KnownBits::smin(Op0, Op1).isNonZero())
+ return true;
+
+ return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
+ isKnownNeverZero(Op.getOperand(0), Depth + 1);
+ }
case ISD::UMIN:
return isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
isKnownNeverZero(Op.getOperand(0), Depth + 1);
@@ -8404,9 +8440,7 @@ SDValue SelectionDAG::getMemIntrinsicNode(
EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
MachineMemOperand::Flags Flags, LocationSize Size,
const AAMDNodes &AAInfo) {
- if (Size.hasValue() && MemVT.isScalableVector())
- Size = LocationSize::beforeOrAfterPointer();
- else if (Size.hasValue() && !Size.getValue())
+ if (Size.hasValue() && !Size.getValue())
Size = LocationSize::precise(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
@@ -8569,7 +8603,7 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
- LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
+ LocationSize Size = LocationSize::precise(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
Alignment, AAInfo, Ranges);
@@ -8690,8 +8724,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
MachineFunction &MF = getMachineFunction();
- LocationSize Size =
- MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
+ LocationSize Size = LocationSize::precise(Val.getValueType().getStoreSize());
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
return getStore(Chain, dl, Val, Ptr, MMO);
@@ -8744,8 +8777,8 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
- Alignment, AAInfo);
+ PtrInfo, MMOFlags, LocationSize::precise(SVT.getStoreSize()), Alignment,
+ AAInfo);
return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
}
@@ -8839,7 +8872,7 @@ SDValue SelectionDAG::getLoadVP(
if (PtrInfo.V.isNull())
PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
- LocationSize Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
+ LocationSize Size = LocationSize::precise(MemVT.getStoreSize());
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
Alignment, AAInfo, Ranges);
@@ -8992,8 +9025,8 @@ SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MF.getMachineMemOperand(
- PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
- Alignment, AAInfo);
+ PtrInfo, MMOFlags, LocationSize::precise(SVT.getStoreSize()), Alignment,
+ AAInfo);
return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
IsCompressing);
}
@@ -11732,10 +11765,9 @@ MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
// We check here that the size of the memory operand fits within the size of
// the MMO. This is because the MMO might indicate only a possible address
// range instead of specifying the affected memory addresses precisely.
- // TODO: Make MachineMemOperands aware of scalable vectors.
assert(
(!MMO->getType().isValid() ||
- memvt.getStoreSize().getKnownMinValue() <= MMO->getSize().getValue()) &&
+ TypeSize::isKnownLE(memvt.getStoreSize(), MMO->getSize().getValue())) &&
"Size mismatch!");
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
index 9670c3ac8430..f2ab88851b78 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
@@ -106,8 +106,6 @@ bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
int64_t PtrDiff;
if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff)) {
// If the size of memory access is unknown, do not use it to analysis.
- // One example of unknown size memory access is to load/store scalable
- // vector objects on the stack.
// BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
// following situations arise:
if (PtrDiff >= 0 && NumBytes0.hasValue() && !NumBytes0.isScalable()) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 51c9af6d62d9..618bdee7f405 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3331,7 +3331,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
EHPadMBB->setMachineBlockAddressTaken();
break;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
visitPatchpoint(I, EHPadBB);
break;
case Intrinsic::experimental_gc_statepoint:
@@ -4962,7 +4962,8 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOLoad,
- LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(), Ranges);
+ LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
+ Ranges);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
@@ -6006,12 +6007,15 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
// incorrect hoisting of the DBG_VALUE to the function entry).
// Notice that we allow one dbg.value per IR level argument, to accommodate
// for the situation with fragments above.
+ // If there is no node for the value being handled, we return true to skip
+ // the normal generation of debug info, as it would kill existing debug
+ // info for the parameter in case of duplicates.
if (VariableIsFunctionInputArg) {
unsigned ArgNo = Arg->getArgNo();
if (ArgNo >= FuncInfo.DescribedArgs.size())
FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
- return false;
+ return !NodeMap[V].getNode();
FuncInfo.DescribedArgs.set(ArgNo);
}
}
@@ -7449,7 +7453,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
visitStackmap(I);
return;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
visitPatchpoint(I);
return;
case Intrinsic::experimental_gc_statepoint:
@@ -8608,8 +8612,6 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
auto *Token = Bundle->Inputs[0].get();
ConvControlToken = getValue(Token);
- } else {
- ConvControlToken = DAG.getUNDEF(MVT::Untyped);
}
TargetLowering::CallLoweringInfo CLI(DAG);
@@ -10261,12 +10263,12 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
/// Lower llvm.experimental.patchpoint directly to its target opcode.
void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
const BasicBlock *EHPadBB) {
- // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
- // i32 <numBytes>,
- // i8* <target>,
- // i32 <numArgs>,
- // [Args...],
- // [live variables...])
+ // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
+ // i32 <numBytes>,
+ // i8* <target>,
+ // i32 <numArgs>,
+ // [Args...],
+ // [live variables...])
CallingConv::ID CC = CB.getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
@@ -10505,14 +10507,14 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
CLI.Ins.clear();
Type *OrigRetTy = CLI.RetTy;
SmallVector<EVT, 4> RetTys;
- SmallVector<uint64_t, 4> Offsets;
+ SmallVector<TypeSize, 4> Offsets;
auto &DL = CLI.DAG.getDataLayout();
- ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
+ ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
if (CLI.IsPostTypeLegalization) {
// If we are lowering a libcall after legalization, split the return type.
SmallVector<EVT, 4> OldRetTys;
- SmallVector<uint64_t, 4> OldOffsets;
+ SmallVector<TypeSize, 4> OldOffsets;
RetTys.swap(OldRetTys);
Offsets.swap(OldOffsets);
@@ -10524,7 +10526,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
RetTys.append(NumRegs, RegisterVT);
for (unsigned j = 0; j != NumRegs; ++j)
- Offsets.push_back(Offset + j * RegisterVTByteSZ);
+ Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 57f8fc409de4..962f0d98e3be 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3786,7 +3786,15 @@ bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op"
" is a target node!");
- return false;
+
+ // If Op can't create undef/poison and none of its operands are undef/poison
+ // then Op is never undef/poison.
+ return !canCreateUndefOrPoisonForTargetNode(Op, DemandedElts, DAG, PoisonOnly,
+ /*ConsiderFlags*/ true, Depth) &&
+ all_of(Op->ops(), [&](SDValue V) {
+ return DAG.isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly,
+ Depth + 1);
+ });
}
bool TargetLowering::canCreateUndefOrPoisonForTargetNode(
@@ -6908,6 +6916,11 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
// Q = floor((2 * A) / (2^K))
APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
+ assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) &&
+ "We are expecting that A is always less than all-ones for SVT");
+ assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) &&
+ "We are expecting that K is always less than all-ones for ShSVT");
+
// If D was a power of two, apply the alternate constant derivation.
if (D0.isOne()) {
// A = 2^(W-1)
@@ -6916,11 +6929,6 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
Q = APInt::getAllOnes(W - K).zext(W);
}
- assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) &&
- "We are expecting that A is always less than all-ones for SVT");
- assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) &&
- "We are expecting that K is always less than all-ones for ShSVT");
-
// If the divisor is 1 the result can be constant-folded. Likewise, we
// don't care about INT_MIN lanes, those can be set to undef if appropriate.
if (D.isOne()) {
@@ -8702,11 +8710,21 @@ SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const {
}
// v = (v * 0x01010101...) >> (Len - 8)
- SDValue Mask01 =
- DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
- return DAG.getNode(ISD::SRL, dl, VT,
- DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
- DAG.getConstant(Len - 8, dl, ShVT));
+ SDValue V;
+ if (isOperationLegalOrCustomOrPromote(
+ ISD::MUL, getTypeToTransformTo(*DAG.getContext(), VT))) {
+ SDValue Mask01 =
+ DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
+ V = DAG.getNode(ISD::MUL, dl, VT, Op, Mask01);
+ } else {
+ V = Op;
+ for (unsigned Shift = 8; Shift < Len; Shift *= 2) {
+ SDValue ShiftC = DAG.getShiftAmountConstant(Shift, VT, dl);
+ V = DAG.getNode(ISD::ADD, dl, VT, V,
+ DAG.getNode(ISD::SHL, dl, VT, V, ShiftC));
+ }
+ }
+ return DAG.getNode(ISD::SRL, dl, VT, V, DAG.getConstant(Len - 8, dl, ShVT));
}
SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const {
@@ -8759,10 +8777,22 @@ SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const {
return Op;
// v = (v * 0x01010101...) >> (Len - 8)
- SDValue Mask01 =
- DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
- return DAG.getNode(ISD::VP_LSHR, dl, VT,
- DAG.getNode(ISD::VP_MUL, dl, VT, Op, Mask01, Mask, VL),
+ SDValue V;
+ if (isOperationLegalOrCustomOrPromote(
+ ISD::VP_MUL, getTypeToTransformTo(*DAG.getContext(), VT))) {
+ SDValue Mask01 =
+ DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT);
+ V = DAG.getNode(ISD::VP_MUL, dl, VT, Op, Mask01, Mask, VL);
+ } else {
+ V = Op;
+ for (unsigned Shift = 8; Shift < Len; Shift *= 2) {
+ SDValue ShiftC = DAG.getShiftAmountConstant(Shift, VT, dl);
+ V = DAG.getNode(ISD::VP_ADD, dl, VT, V,
+ DAG.getNode(ISD::VP_SHL, dl, VT, V, ShiftC, Mask, VL),
+ Mask, VL);
+ }
+ }
+ return DAG.getNode(ISD::VP_LSHR, dl, VT, V,
DAG.getConstant(Len - 8, dl, ShVT), Mask, VL);
}
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 9990556f89ed..f64ded4f2cf9 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -823,6 +823,12 @@ void TargetLoweringBase::initActions() {
std::fill(std::begin(TargetDAGCombineArray),
std::end(TargetDAGCombineArray), 0);
+ // Let extending atomic loads be unsupported by default.
+ for (MVT ValVT : MVT::all_valuetypes())
+ for (MVT MemVT : MVT::all_valuetypes())
+ setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, ValVT, MemVT,
+ Expand);
+
// We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
// remove this and targets should individually set these types if not legal.
for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END,
@@ -2073,7 +2079,8 @@ void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
// FreeBSD has "__stack_chk_guard" defined externally on libc.so
if (M.getDirectAccessExternalData() &&
!TM.getTargetTriple().isWindowsGNUEnvironment() &&
- !TM.getTargetTriple().isOSFreeBSD() &&
+ !(TM.getTargetTriple().isPPC64() &&
+ TM.getTargetTriple().isOSFreeBSD()) &&
(!TM.getTargetTriple().isOSDarwin() ||
TM.getRelocationModel() == Reloc::Static))
GV->setDSOLocal(true);
diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index c9503fcb77bb..4e06393f4cc1 100644
--- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -50,20 +50,16 @@ static cl::opt<unsigned>
"high compile time cost in global splitting."),
cl::init(5000));
-TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
- regclass_iterator RCB, regclass_iterator RCE,
- const char *const *SRINames,
- const LaneBitmask *SRILaneMasks,
- LaneBitmask SRICoveringLanes,
- const RegClassInfo *const RCIs,
- const MVT::SimpleValueType *const RCVTLists,
- unsigned Mode)
- : InfoDesc(ID), SubRegIndexNames(SRINames),
- SubRegIndexLaneMasks(SRILaneMasks),
- RegClassBegin(RCB), RegClassEnd(RCE),
- CoveringLanes(SRICoveringLanes),
- RCInfos(RCIs), RCVTLists(RCVTLists), HwMode(Mode) {
-}
+TargetRegisterInfo::TargetRegisterInfo(
+ const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
+ regclass_iterator RCE, const char *const *SRINames,
+ const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks,
+ LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs,
+ const MVT::SimpleValueType *const RCVTLists, unsigned Mode)
+ : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges),
+ SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE),
+ CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists),
+ HwMode(Mode) {}
TargetRegisterInfo::~TargetRegisterInfo() = default;
@@ -596,6 +592,18 @@ bool TargetRegisterInfo::getCoveringSubRegIndexes(
return BestIdx;
}
+unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
+ assert(Idx && Idx < getNumSubRegIndices() &&
+ "This is not a subregister index");
+ return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
+}
+
+unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
+ assert(Idx && Idx < getNumSubRegIndices() &&
+ "This is not a subregister index");
+ return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
+}
+
Register
TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
const MachineRegisterInfo *MRI) const {
diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp
index b0830308908d..89aea3a29161 100644
--- a/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/llvm/lib/CodeGen/TypePromotion.cpp
@@ -643,7 +643,7 @@ void IRPromoter::ConvertTruncs() {
ConstantInt *Mask =
ConstantInt::get(SrcTy, APInt::getMaxValue(NumBits).getZExtValue());
Value *Masked = Builder.CreateAnd(Trunc->getOperand(0), Mask);
- if (SrcTy != ExtTy)
+ if (SrcTy->getBitWidth() > ExtTy->getBitWidth())
Masked = Builder.CreateTrunc(Masked, ExtTy);
if (auto *I = dyn_cast<Instruction>(Masked))
diff --git a/llvm/lib/DebugInfo/LogicalView/Readers/LVDWARFReader.cpp b/llvm/lib/DebugInfo/LogicalView/Readers/LVDWARFReader.cpp
index 91e5a037054d..6a97bed9e3a8 100644
--- a/llvm/lib/DebugInfo/LogicalView/Readers/LVDWARFReader.cpp
+++ b/llvm/lib/DebugInfo/LogicalView/Readers/LVDWARFReader.cpp
@@ -878,7 +878,7 @@ Error LVDWARFReader::createScopes() {
// Additional discussions here:
// https://www.mail-archive.com/dwarf-discuss@lists.dwarfstd.org/msg00883.html
- // The ELF Reader is expecting the files are 1-indexed, so using
+ // The DWARF reader is expecting the files are 1-indexed, so using
// the .debug_line header information decide if the indexed require
// an internal adjustment.
@@ -918,7 +918,7 @@ Error LVDWARFReader::createScopes() {
// DWARF-5 -> Increment index.
return true;
};
- // The ELF reader expects the indexes as 1-indexed.
+ // The DWARF reader expects the indexes as 1-indexed.
IncrementFileIndex = DeduceIncrementFileIndex();
DWARFDie UnitDie = CU->getUnitDIE();
diff --git a/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h b/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
index 159880e4b152..4a492ee2f7d0 100644
--- a/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
+++ b/llvm/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
@@ -108,6 +108,48 @@ createDefineExternalSectionStartAndEndSymbolsPass(
std::forward<SymbolIdentifierFunction>(F));
}
+/// ELF section start/end symbol detection.
+inline SectionRangeSymbolDesc
+identifyELFSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
+ constexpr StringRef StartSymbolPrefix = "__start_";
+ constexpr StringRef EndSymbolPrefix = "__stop_";
+
+ auto SymName = Sym.getName();
+ if (SymName.starts_with(StartSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(StartSymbolPrefix.size())))
+ return {*Sec, true};
+ } else if (SymName.starts_with(EndSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(EndSymbolPrefix.size())))
+ return {*Sec, false};
+ }
+ return {};
+}
+
+/// MachO section start/end symbol detection.
+inline SectionRangeSymbolDesc
+identifyMachOSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
+ constexpr StringRef StartSymbolPrefix = "section$start$";
+ constexpr StringRef EndSymbolPrefix = "section$end$";
+
+ auto SymName = Sym.getName();
+ if (SymName.starts_with(StartSymbolPrefix)) {
+ auto [SegName, SecName] =
+ SymName.drop_front(StartSymbolPrefix.size()).split('$');
+ std::string SectionName = (SegName + "," + SecName).str();
+ if (auto *Sec = G.findSectionByName(SectionName))
+ return {*Sec, true};
+ } else if (SymName.starts_with(EndSymbolPrefix)) {
+ auto [SegName, SecName] =
+ SymName.drop_front(EndSymbolPrefix.size()).split('$');
+ std::string SectionName = (SegName + "," + SecName).str();
+ if (auto *Sec = G.findSectionByName(SectionName))
+ return {*Sec, false};
+ }
+ return {};
+}
+
} // end namespace jitlink
} // end namespace llvm
diff --git a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
index e1b11dfcfc21..5dae60062939 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
+++ b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
@@ -193,7 +193,7 @@ ELFLinkGraphBuilder<ELFT>::ELFLinkGraphBuilder(
StringRef FileName, LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
: ELFLinkGraphBuilderBase(std::make_unique<LinkGraph>(
FileName.str(), Triple(std::move(TT)), std::move(Features),
- ELFT::Is64Bits ? 8 : 4, llvm::endianness(ELFT::TargetEndianness),
+ ELFT::Is64Bits ? 8 : 4, llvm::endianness(ELFT::Endianness),
std::move(GetEdgeKindName))),
Obj(Obj) {
LLVM_DEBUG(
diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
index f17b2c626ac2..6b03bb3c90b2 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -11,15 +11,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/JITLink/ELF_aarch64.h"
-#include "EHFrameSupportImpl.h"
-#include "ELFLinkGraphBuilder.h"
-#include "JITLinkGeneric.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
#include "llvm/ExecutionEngine/JITLink/aarch64.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/Endian.h"
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
#define DEBUG_TYPE "jitlink"
using namespace llvm;
@@ -611,6 +613,11 @@ void link_ELF_aarch64(std::unique_ptr<LinkGraph> G,
else
Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyELFSectionStartAndEndSymbols));
+
// Add an in-place GOT/TLS/Stubs build pass.
Config.PostPrunePasses.push_back(buildTables_ELF_aarch64);
}
diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
index a1fe9c5fcd73..52dd83d9040f 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -343,24 +343,6 @@ createLinkGraphFromELFObject_x86_64(MemoryBufferRef ObjectBuffer) {
.buildGraph();
}
-static SectionRangeSymbolDesc
-identifyELFSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
- constexpr StringRef StartSymbolPrefix = "__start";
- constexpr StringRef EndSymbolPrefix = "__end";
-
- auto SymName = Sym.getName();
- if (SymName.starts_with(StartSymbolPrefix)) {
- if (auto *Sec =
- G.findSectionByName(SymName.drop_front(StartSymbolPrefix.size())))
- return {*Sec, true};
- } else if (SymName.starts_with(EndSymbolPrefix)) {
- if (auto *Sec =
- G.findSectionByName(SymName.drop_front(EndSymbolPrefix.size())))
- return {*Sec, false};
- }
- return {};
-}
-
void link_ELF_x86_64(std::unique_ptr<LinkGraph> G,
std::unique_ptr<JITLinkContext> Ctx) {
PassConfiguration Config;
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
index 7f743dba60a9..b103a9ca98e1 100644
--- a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -338,7 +338,8 @@ void LinkGraph::dump(raw_ostream &OS) {
OS << "\nExternal symbols:\n";
if (!external_symbols().empty()) {
for (auto *Sym : external_symbols())
- OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
+ OS << " " << Sym->getAddress() << ": " << *Sym
+ << (Sym->isWeaklyReferenced() ? " (weakly referenced)" : "") << "\n";
} else
OS << " none\n";
}
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
index 556031baaaed..8733306bab6b 100644
--- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -14,6 +14,7 @@
#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "DefineExternalSectionStartAndEndSymbols.h"
#include "MachOLinkGraphBuilder.h"
#define DEBUG_TYPE "jitlink"
@@ -593,6 +594,11 @@ void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_arm64());
Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_arm64());
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyMachOSectionStartAndEndSymbols));
+
// Add an in-place GOT/Stubs pass.
Config.PostPrunePasses.push_back(buildTables_MachO_arm64);
}
diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
index eeca27771ad6..2c69d61316a8 100644
--- a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -14,6 +14,7 @@
#include "llvm/ExecutionEngine/JITLink/DWARFRecordSectionSplitter.h"
#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "DefineExternalSectionStartAndEndSymbols.h"
#include "MachOLinkGraphBuilder.h"
#define DEBUG_TYPE "jitlink"
@@ -516,6 +517,11 @@ void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
else
Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyMachOSectionStartAndEndSymbols));
+
// Add an in-place GOT/Stubs pass.
Config.PostPrunePasses.push_back(buildGOTAndStubs_MachO_x86_64);
diff --git a/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp b/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
index 88cc3b04fb64..ec2187bad0f2 100644
--- a/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
@@ -7,8 +7,11 @@
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
#include "llvm/Support/Error.h"
+#define DEBUG_TYPE "orc"
+
namespace llvm {
namespace orc {
@@ -31,6 +34,11 @@ Error EPCDynamicLibrarySearchGenerator::tryToGenerate(
if (Symbols.empty())
return Error::success();
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator trying to generate "
+ << Symbols << "\n";
+ });
+
SymbolLookupSet LookupSymbols;
for (auto &KV : Symbols) {
@@ -44,8 +52,12 @@ Error EPCDynamicLibrarySearchGenerator::tryToGenerate(
// Copy-capture LookupSymbols, since LookupRequest keeps a reference.
EPC.lookupSymbolsAsync(Request, [this, &JD, LS = std::move(LS),
LookupSymbols](auto Result) mutable {
- if (!Result)
+ if (!Result) {
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator lookup failed due to error";
+ });
return LS.continueLookup(Result.takeError());
+ }
assert(Result->size() == 1 && "Results for more than one library returned");
assert(Result->front().size() == LookupSymbols.size() &&
@@ -59,6 +71,11 @@ Error EPCDynamicLibrarySearchGenerator::tryToGenerate(
++ResultI;
}
+ LLVM_DEBUG({
+ dbgs() << "EPCDynamicLibrarySearchGenerator lookup returned "
+ << NewSymbols << "\n";
+ });
+
// If there were no resolved symbols bail out.
if (NewSymbols.empty())
return LS.continueLookup(Error::success());
diff --git a/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
index 3952445bb1aa..670c8cf996fd 100644
--- a/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -545,7 +545,7 @@ DLLImportDefinitionGenerator::getTargetPointerSize(const Triple &TT) {
}
Expected<llvm::endianness>
-DLLImportDefinitionGenerator::getTargetEndianness(const Triple &TT) {
+DLLImportDefinitionGenerator::getEndianness(const Triple &TT) {
switch (TT.getArch()) {
case Triple::x86_64:
return llvm::endianness::little;
@@ -562,7 +562,7 @@ DLLImportDefinitionGenerator::createStubsGraph(const SymbolMap &Resolved) {
auto PointerSize = getTargetPointerSize(TT);
if (!PointerSize)
return PointerSize.takeError();
- auto Endianness = getTargetEndianness(TT);
+ auto Endianness = getEndianness(TT);
if (!Endianness)
return Endianness.takeError();
diff --git a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
index 8a4145a6b02a..7529d9cef67e 100644
--- a/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
@@ -26,11 +26,13 @@ extern "C" {
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
// debugger checks the version before we can set it during runtime.
+LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
struct jit_descriptor __jit_debug_descriptor = {JitDescriptorVersion, 0,
nullptr, nullptr};
// Debuggers that implement the GDB JIT interface put a special breakpoint in
// this function.
+LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
// The noinline and the asm prevent calls to this function from being
// optimized out.
diff --git a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
index fec1bdbe9d8c..7241d15ed1c6 100644
--- a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
+++ b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
@@ -186,57 +186,62 @@ GlobalVariable *createBinDesc(Module &M, ArrayRef<ArrayRef<char>> Bufs,
".omp_offloading.descriptor" + Suffix);
}
-void createRegisterFunction(Module &M, GlobalVariable *BinDesc,
- StringRef Suffix) {
+Function *createUnregisterFunction(Module &M, GlobalVariable *BinDesc,
+ StringRef Suffix) {
LLVMContext &C = M.getContext();
auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false);
- auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage,
- ".omp_offloading.descriptor_reg" + Suffix, &M);
+ auto *Func =
+ Function::Create(FuncTy, GlobalValue::InternalLinkage,
+ ".omp_offloading.descriptor_unreg" + Suffix, &M);
Func->setSection(".text.startup");
- // Get __tgt_register_lib function declaration.
- auto *RegFuncTy = FunctionType::get(Type::getVoidTy(C), getBinDescPtrTy(M),
- /*isVarArg*/ false);
- FunctionCallee RegFuncC =
- M.getOrInsertFunction("__tgt_register_lib", RegFuncTy);
+ // Get __tgt_unregister_lib function declaration.
+ auto *UnRegFuncTy = FunctionType::get(Type::getVoidTy(C), getBinDescPtrTy(M),
+ /*isVarArg*/ false);
+ FunctionCallee UnRegFuncC =
+ M.getOrInsertFunction("__tgt_unregister_lib", UnRegFuncTy);
// Construct function body
IRBuilder<> Builder(BasicBlock::Create(C, "entry", Func));
- Builder.CreateCall(RegFuncC, BinDesc);
+ Builder.CreateCall(UnRegFuncC, BinDesc);
Builder.CreateRetVoid();
- // Add this function to constructors.
- // Set priority to 1 so that __tgt_register_lib is executed AFTER
- // __tgt_register_requires (we want to know what requirements have been
- // asked for before we load a libomptarget plugin so that by the time the
- // plugin is loaded it can report how many devices there are which can
- // satisfy these requirements).
- appendToGlobalCtors(M, Func, /*Priority*/ 1);
+ return Func;
}
-void createUnregisterFunction(Module &M, GlobalVariable *BinDesc,
- StringRef Suffix) {
+void createRegisterFunction(Module &M, GlobalVariable *BinDesc,
+ StringRef Suffix) {
LLVMContext &C = M.getContext();
auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false);
- auto *Func =
- Function::Create(FuncTy, GlobalValue::InternalLinkage,
- ".omp_offloading.descriptor_unreg" + Suffix, &M);
+ auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage,
+ ".omp_offloading.descriptor_reg" + Suffix, &M);
Func->setSection(".text.startup");
- // Get __tgt_unregister_lib function declaration.
- auto *UnRegFuncTy = FunctionType::get(Type::getVoidTy(C), getBinDescPtrTy(M),
- /*isVarArg*/ false);
- FunctionCallee UnRegFuncC =
- M.getOrInsertFunction("__tgt_unregister_lib", UnRegFuncTy);
+ // Get __tgt_register_lib function declaration.
+ auto *RegFuncTy = FunctionType::get(Type::getVoidTy(C), getBinDescPtrTy(M),
+ /*isVarArg*/ false);
+ FunctionCallee RegFuncC =
+ M.getOrInsertFunction("__tgt_register_lib", RegFuncTy);
+
+ auto *AtExitTy = FunctionType::get(
+ Type::getInt32Ty(C), PointerType::getUnqual(C), /*isVarArg=*/false);
+ FunctionCallee AtExit = M.getOrInsertFunction("atexit", AtExitTy);
+
+ Function *UnregFunc = createUnregisterFunction(M, BinDesc, Suffix);
// Construct function body
IRBuilder<> Builder(BasicBlock::Create(C, "entry", Func));
- Builder.CreateCall(UnRegFuncC, BinDesc);
+
+ // Register the destructors with 'atexit'. This is expected by the CUDA
+ // runtime and ensures that we clean up before dynamic objects are destroyed.
+ // This needs to be done before the runtime is called and registers its own.
+ Builder.CreateCall(AtExit, UnregFunc);
+
+ Builder.CreateCall(RegFuncC, BinDesc);
Builder.CreateRetVoid();
- // Add this function to global destructors.
- // Match priority of __tgt_register_lib
- appendToGlobalDtors(M, Func, /*Priority*/ 1);
+ // Add this function to constructors.
+ appendToGlobalCtors(M, Func, /*Priority=*/101);
}
// struct fatbin_wrapper {
@@ -578,7 +583,7 @@ void createRegisterFatbinFunction(Module &M, GlobalVariable *FatbinDesc,
DtorBuilder.CreateRetVoid();
// Add this function to constructors.
- appendToGlobalCtors(M, CtorFunc, /*Priority*/ 1);
+ appendToGlobalCtors(M, CtorFunc, /*Priority=*/101);
}
} // namespace
@@ -591,7 +596,6 @@ Error offloading::wrapOpenMPBinaries(Module &M, ArrayRef<ArrayRef<char>> Images,
return createStringError(inconvertibleErrorCode(),
"No binary descriptors created.");
createRegisterFunction(M, Desc, Suffix);
- createUnregisterFunction(M, Desc, Suffix);
return Error::success();
}
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 38c191a2dec6..b778a14158ef 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -363,6 +363,9 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::AMDGPU_KERNEL: Out << "amdgpu_kernel"; break;
case CallingConv::AMDGPU_Gfx: Out << "amdgpu_gfx"; break;
case CallingConv::M68k_RTD: Out << "m68k_rtdcc"; break;
+ case CallingConv::RISCV_VectorCall:
+ Out << "riscv_vector_cc";
+ break;
}
}
@@ -1421,6 +1424,11 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
} else if (const auto *NNI = dyn_cast<PossiblyNonNegInst>(U)) {
if (NNI->hasNonNeg())
Out << " nneg";
+ } else if (const auto *TI = dyn_cast<TruncInst>(U)) {
+ if (TI->hasNoUnsignedWrap())
+ Out << " nuw";
+ if (TI->hasNoSignedWrap())
+ Out << " nsw";
}
}
diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp
index f088c7a2cc4e..ae99267f5ba8 100644
--- a/llvm/lib/IR/BasicBlock.cpp
+++ b/llvm/lib/IR/BasicBlock.cpp
@@ -39,7 +39,7 @@ cl::opt<bool>
bool WriteNewDbgInfoFormatToBitcode /*set default value in cl::init() below*/;
cl::opt<bool, true> WriteNewDbgInfoFormatToBitcode2(
"write-experimental-debuginfo-iterators-to-bitcode", cl::Hidden,
- cl::location(WriteNewDbgInfoFormatToBitcode), cl::init(false));
+ cl::location(WriteNewDbgInfoFormatToBitcode), cl::init(true));
DbgMarker *BasicBlock::createMarker(Instruction *I) {
assert(IsNewDbgInfoFormat &&
diff --git a/llvm/lib/IR/ConstantRange.cpp b/llvm/lib/IR/ConstantRange.cpp
index 3394a1ec8dc4..59e7a9f5eb11 100644
--- a/llvm/lib/IR/ConstantRange.cpp
+++ b/llvm/lib/IR/ConstantRange.cpp
@@ -746,7 +746,7 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
Min = Min.zext(ResultBitWidth);
Max = Max.zext(ResultBitWidth);
}
- return ConstantRange(std::move(Min), std::move(Max));
+ return getNonEmpty(std::move(Min), std::move(Max) + 1);
}
case Instruction::SIToFP: {
// TODO: use input range if available
@@ -757,7 +757,7 @@ ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
SMin = SMin.sext(ResultBitWidth);
SMax = SMax.sext(ResultBitWidth);
}
- return ConstantRange(std::move(SMin), std::move(SMax));
+ return getNonEmpty(std::move(SMin), std::move(SMax) + 1);
}
case Instruction::FPTrunc:
case Instruction::FPExt:
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index c17419b529ac..a5fb497f54ed 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -2520,10 +2520,10 @@ Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
return pImpl->ExprConstants.getOrCreate(ShufTy, Key);
}
-Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) {
+Constant *ConstantExpr::getNeg(Constant *C, bool HasNSW) {
assert(C->getType()->isIntOrIntVectorTy() &&
"Cannot NEG a nonintegral value!");
- return getSub(ConstantInt::get(C->getType(), 0), C, HasNUW, HasNSW);
+ return getSub(ConstantInt::get(C->getType(), 0), C, /*HasNUW=*/false, HasNSW);
}
Constant *ConstantExpr::getNot(Constant *C) {
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 023cabc46911..8ce9c5ca63be 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -990,6 +990,20 @@ char* LLVMPrintValueToString(LLVMValueRef Val) {
return strdup(buf.c_str());
}
+char *LLVMPrintDbgRecordToString(LLVMDbgRecordRef Record) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ if (unwrap(Record))
+ unwrap(Record)->print(os);
+ else
+ os << "Printing <null> DbgRecord";
+
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal) {
unwrap(OldVal)->replaceAllUsesWith(unwrap(NewVal));
}
@@ -1648,7 +1662,7 @@ LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal) {
}
LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal) {
- return wrap(ConstantExpr::getNUWNeg(unwrap<Constant>(ConstantVal)));
+ return wrap(ConstantExpr::getNeg(unwrap<Constant>(ConstantVal)));
}
@@ -3557,7 +3571,10 @@ LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V,
LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V,
const char *Name) {
- return wrap(unwrap(B)->CreateNUWNeg(unwrap(V), Name));
+ Value *Neg = unwrap(B)->CreateNeg(unwrap(V), Name);
+ if (auto *I = dyn_cast<BinaryOperator>(Neg))
+ I->setHasNoUnsignedWrap();
+ return wrap(Neg);
}
LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index 09bce9df1f33..4206162d1768 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -1665,12 +1665,12 @@ LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
unwrapDI<MDNode>(Decl), nullptr, AlignInBits));
}
-LLVMValueRef
+LLVMDbgRecordRef
LLVMDIBuilderInsertDeclareBefore(LLVMDIBuilderRef Builder, LLVMValueRef Storage,
LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
LLVMMetadataRef DL, LLVMValueRef Instr) {
- return LLVMDIBuilderInsertDeclareIntrinsicBefore(Builder, Storage, VarInfo,
- Expr, DL, Instr);
+ return LLVMDIBuilderInsertDeclareRecordBefore(Builder, Storage, VarInfo, Expr,
+ DL, Instr);
}
LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
@@ -1679,27 +1679,38 @@ LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicBefore(
unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
unwrap<Instruction>(Instr));
+ // This assert will fail if the module is in the new debug info format.
+ // This function should only be called if the module is in the old
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
assert(isa<Instruction *>(DbgInst) &&
- "Inserted a DbgRecord into function using old debug info mode");
+ "Function unexpectedly in new debug info format");
return wrap(cast<Instruction *>(DbgInst));
}
LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DL, LLVMValueRef Instr) {
- return wrap(
- unwrap(Builder)
- ->insertDeclare(unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
- unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
- unwrap<Instruction>(Instr))
- .get<DbgRecord *>());
+ DbgInstPtr DbgInst = unwrap(Builder)->insertDeclare(
+ unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
+ unwrap<Instruction>(Instr));
+ // This assert will fail if the module is in the old debug info format.
+ // This function should only be called if the module is in the new
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
+ assert(isa<DbgRecord *>(DbgInst) &&
+ "Function unexpectedly in old debug info format");
+ return wrap(cast<DbgRecord *>(DbgInst));
}
-LLVMValueRef
+LLVMDbgRecordRef
LLVMDIBuilderInsertDeclareAtEnd(LLVMDIBuilderRef Builder, LLVMValueRef Storage,
LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
LLVMMetadataRef DL, LLVMBasicBlockRef Block) {
- return LLVMDIBuilderInsertDeclareIntrinsicAtEnd(Builder, Storage, VarInfo,
- Expr, DL, Block);
+ return LLVMDIBuilderInsertDeclareRecordAtEnd(Builder, Storage, VarInfo, Expr,
+ DL, Block);
}
LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
@@ -1707,26 +1718,36 @@ LLVMValueRef LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
DbgInstPtr DbgInst = unwrap(Builder)->insertDeclare(
unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
unwrap<DIExpression>(Expr), unwrap<DILocation>(DL), unwrap(Block));
+ // This assert will fail if the module is in the new debug info format.
+ // This function should only be called if the module is in the old
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
assert(isa<Instruction *>(DbgInst) &&
- "Inserted a DbgRecord into function using old debug info mode");
+ "Function unexpectedly in new debug info format");
return wrap(cast<Instruction *>(DbgInst));
}
LLVMDbgRecordRef LLVMDIBuilderInsertDeclareRecordAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DL, LLVMBasicBlockRef Block) {
- return wrap(unwrap(Builder)
- ->insertDeclare(unwrap(Storage),
- unwrap<DILocalVariable>(VarInfo),
- unwrap<DIExpression>(Expr),
- unwrap<DILocation>(DL), unwrap(Block))
- .get<DbgRecord *>());
+ DbgInstPtr DbgInst = unwrap(Builder)->insertDeclare(
+ unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DL), unwrap(Block));
+ // This assert will fail if the module is in the old debug info format.
+ // This function should only be called if the module is in the new
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
+ assert(isa<DbgRecord *>(DbgInst) &&
+ "Function unexpectedly in old debug info format");
+ return wrap(cast<DbgRecord *>(DbgInst));
}
-LLVMValueRef LLVMDIBuilderInsertDbgValueBefore(
+LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr) {
- return LLVMDIBuilderInsertDbgValueIntrinsicBefore(Builder, Val, VarInfo, Expr,
- DebugLoc, Instr);
+ return LLVMDIBuilderInsertDbgValueRecordBefore(Builder, Val, VarInfo, Expr,
+ DebugLoc, Instr);
}
LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
@@ -1734,26 +1755,36 @@ LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicBefore(
DbgInstPtr DbgInst = unwrap(Builder)->insertDbgValueIntrinsic(
unwrap(Val), unwrap<DILocalVariable>(VarInfo), unwrap<DIExpression>(Expr),
unwrap<DILocation>(DebugLoc), unwrap<Instruction>(Instr));
+ // This assert will fail if the module is in the new debug info format.
+ // This function should only be called if the module is in the old
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
assert(isa<Instruction *>(DbgInst) &&
- "Inserted a DbgRecord into function using old debug info mode");
+ "Function unexpectedly in new debug info format");
return wrap(cast<Instruction *>(DbgInst));
}
LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueRecordBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr) {
- return wrap(unwrap(Builder)
- ->insertDbgValueIntrinsic(
- unwrap(Val), unwrap<DILocalVariable>(VarInfo),
- unwrap<DIExpression>(Expr), unwrap<DILocation>(DebugLoc),
- unwrap<Instruction>(Instr))
- .get<DbgRecord *>());
+ DbgInstPtr DbgInst = unwrap(Builder)->insertDbgValueIntrinsic(
+ unwrap(Val), unwrap<DILocalVariable>(VarInfo), unwrap<DIExpression>(Expr),
+ unwrap<DILocation>(DebugLoc), unwrap<Instruction>(Instr));
+ // This assert will fail if the module is in the old debug info format.
+ // This function should only be called if the module is in the new
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
+ assert(isa<DbgRecord *>(DbgInst) &&
+ "Function unexpectedly in old debug info format");
+ return wrap(cast<DbgRecord *>(DbgInst));
}
-LLVMValueRef LLVMDIBuilderInsertDbgValueAtEnd(
+LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block) {
- return LLVMDIBuilderInsertDbgValueIntrinsicAtEnd(Builder, Val, VarInfo, Expr,
- DebugLoc, Block);
+ return LLVMDIBuilderInsertDbgValueRecordAtEnd(Builder, Val, VarInfo, Expr,
+ DebugLoc, Block);
}
LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
@@ -1761,19 +1792,29 @@ LLVMValueRef LLVMDIBuilderInsertDbgValueIntrinsicAtEnd(
DbgInstPtr DbgInst = unwrap(Builder)->insertDbgValueIntrinsic(
unwrap(Val), unwrap<DILocalVariable>(VarInfo), unwrap<DIExpression>(Expr),
unwrap<DILocation>(DebugLoc), unwrap(Block));
+ // This assert will fail if the module is in the new debug info format.
+ // This function should only be called if the module is in the old
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
assert(isa<Instruction *>(DbgInst) &&
- "Inserted a DbgRecord into function using old debug info mode");
+ "Function unexpectedly in new debug info format");
return wrap(cast<Instruction *>(DbgInst));
}
LLVMDbgRecordRef LLVMDIBuilderInsertDbgValueRecordAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Val, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block) {
- return wrap(unwrap(Builder)
- ->insertDbgValueIntrinsic(
- unwrap(Val), unwrap<DILocalVariable>(VarInfo),
- unwrap<DIExpression>(Expr), unwrap<DILocation>(DebugLoc),
- unwrap(Block))
- .get<DbgRecord *>());
+ DbgInstPtr DbgInst = unwrap(Builder)->insertDbgValueIntrinsic(
+ unwrap(Val), unwrap<DILocalVariable>(VarInfo), unwrap<DIExpression>(Expr),
+ unwrap<DILocation>(DebugLoc), unwrap(Block));
+ // This assert will fail if the module is in the old debug info format.
+ // This function should only be called if the module is in the new
+ // debug info format.
+ // See https://llvm.org/docs/RemoveDIsDebugInfo.html#c-api-changes,
+ // LLVMIsNewDbgInfoFormat, and LLVMSetIsNewDbgInfoFormat for more info.
+ assert(isa<DbgRecord *>(DbgInst) &&
+ "Function unexpectedly in old debug info format");
+ return wrap(cast<DbgRecord *>(DbgInst));
}
LLVMMetadataRef LLVMDIBuilderCreateAutoVariable(
diff --git a/llvm/lib/IR/Globals.cpp b/llvm/lib/IR/Globals.cpp
index 481a1d802e66..40f854a2c906 100644
--- a/llvm/lib/IR/Globals.cpp
+++ b/llvm/lib/IR/Globals.cpp
@@ -243,6 +243,13 @@ void GlobalValue::removeSanitizerMetadata() {
HasSanitizerMetadata = false;
}
+void GlobalValue::setNoSanitizeMetadata() {
+ SanitizerMetadata Meta;
+ Meta.NoAddress = true;
+ Meta.NoHWAddress = true;
+ setSanitizerMetadata(Meta);
+}
+
StringRef GlobalObject::getSectionImpl() const {
assert(hasSection());
return getContext().pImpl->GlobalObjectSections[this];
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 47a7f2c9de79..0602a55b9fe7 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -370,11 +370,17 @@ bool Instruction::isOnlyUserOfAnyOperand() {
}
void Instruction::setHasNoUnsignedWrap(bool b) {
- cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
+ if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
+ Inst->setHasNoUnsignedWrap(b);
+ else
+ cast<TruncInst>(this)->setHasNoUnsignedWrap(b);
}
void Instruction::setHasNoSignedWrap(bool b) {
- cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
+ if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
+ Inst->setHasNoSignedWrap(b);
+ else
+ cast<TruncInst>(this)->setHasNoSignedWrap(b);
}
void Instruction::setIsExact(bool b) {
@@ -388,11 +394,17 @@ void Instruction::setNonNeg(bool b) {
}
bool Instruction::hasNoUnsignedWrap() const {
- return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
+ if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
+ return Inst->hasNoUnsignedWrap();
+
+ return cast<TruncInst>(this)->hasNoUnsignedWrap();
}
bool Instruction::hasNoSignedWrap() const {
- return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
+ if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
+ return Inst->hasNoSignedWrap();
+
+ return cast<TruncInst>(this)->hasNoSignedWrap();
}
bool Instruction::hasNonNeg() const {
@@ -432,6 +444,11 @@ void Instruction::dropPoisonGeneratingFlags() {
case Instruction::ZExt:
setNonNeg(false);
break;
+
+ case Instruction::Trunc:
+ cast<TruncInst>(this)->setHasNoUnsignedWrap(false);
+ cast<TruncInst>(this)->setHasNoSignedWrap(false);
+ break;
}
if (isa<FPMathOperator>(this)) {
@@ -626,6 +643,13 @@ void Instruction::andIRFlags(const Value *V) {
}
}
+ if (auto *TI = dyn_cast<TruncInst>(V)) {
+ if (isa<TruncInst>(this)) {
+ setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
+ setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
+ }
+ }
+
if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
if (isa<PossiblyExactOperator>(this))
setIsExact(isExact() && PE->isExact());
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 494d50f89e37..cec02e2ce633 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -833,15 +833,6 @@ CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
// of S/T. The meaning of "branch_weights" meta data for call instruction is
// transfered to represent call count.
void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
- auto *ProfileData = getMetadata(LLVMContext::MD_prof);
- if (ProfileData == nullptr)
- return;
-
- auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
- if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
- !ProfDataName->getString().equals("VP")))
- return;
-
if (T == 0) {
LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
"div by 0. Ignoring. Likely the function "
@@ -850,42 +841,7 @@ void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
"with non-zero prof info.");
return;
}
-
- MDBuilder MDB(getContext());
- SmallVector<Metadata *, 3> Vals;
- Vals.push_back(ProfileData->getOperand(0));
- APInt APS(128, S), APT(128, T);
- if (ProfDataName->getString().equals("branch_weights") &&
- ProfileData->getNumOperands() > 0) {
- // Using APInt::div may be expensive, but most cases should fit 64 bits.
- APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
- ->getValue()
- .getZExtValue());
- Val *= APS;
- Vals.push_back(MDB.createConstant(
- ConstantInt::get(Type::getInt32Ty(getContext()),
- Val.udiv(APT).getLimitedValue(UINT32_MAX))));
- } else if (ProfDataName->getString().equals("VP"))
- for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
- // The first value is the key of the value profile, which will not change.
- Vals.push_back(ProfileData->getOperand(i));
- uint64_t Count =
- mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
- ->getValue()
- .getZExtValue();
- // Don't scale the magic number.
- if (Count == NOMORE_ICP_MAGICNUM) {
- Vals.push_back(ProfileData->getOperand(i + 1));
- continue;
- }
- // Using APInt::div may be expensive, but most cases should fit 64 bits.
- APInt Val(128, Count);
- Val *= APS;
- Vals.push_back(MDB.createConstant(
- ConstantInt::get(Type::getInt64Ty(getContext()),
- Val.udiv(APT).getLimitedValue())));
- }
- setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
+ scaleProfData(*this, S, T);
}
//===----------------------------------------------------------------------===//
@@ -3372,18 +3328,6 @@ BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
}
-BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
- Instruction *InsertBefore) {
- Value *Zero = ConstantInt::get(Op->getType(), 0);
- return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
-}
-
-BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
- BasicBlock *InsertAtEnd) {
- Value *Zero = ConstantInt::get(Op->getType(), 0);
- return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
-}
-
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
BasicBlock::iterator InsertBefore) {
Constant *C = Constant::getAllOnesValue(Op->getType());
@@ -4623,6 +4567,16 @@ CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
S1, S2, Name);
}
+CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
+ Value *S2,
+ const Instruction *FlagsSource,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
+ Inst->copyIRFlags(FlagsSource);
+ return Inst;
+}
+
void CmpInst::swapOperands() {
if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
IC->swapOperands();
diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h
index 58e0f21244f7..7c67e191348e 100644
--- a/llvm/lib/IR/LLVMContextImpl.h
+++ b/llvm/lib/IR/LLVMContextImpl.h
@@ -441,7 +441,7 @@ template <> struct MDNodeKeyImpl<DIEnumerator> {
bool IsUnsigned;
MDNodeKeyImpl(APInt Value, bool IsUnsigned, MDString *Name)
- : Value(Value), Name(Name), IsUnsigned(IsUnsigned) {}
+ : Value(std::move(Value)), Name(Name), IsUnsigned(IsUnsigned) {}
MDNodeKeyImpl(int64_t Value, bool IsUnsigned, MDString *Name)
: Value(APInt(64, Value, !IsUnsigned)), Name(Name),
IsUnsigned(IsUnsigned) {}
diff --git a/llvm/lib/IR/Mangler.cpp b/llvm/lib/IR/Mangler.cpp
index 3acac2c3e3db..72e2bc1f24ac 100644
--- a/llvm/lib/IR/Mangler.cpp
+++ b/llvm/lib/IR/Mangler.cpp
@@ -20,6 +20,7 @@
#include "llvm/IR/Module.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
+
using namespace llvm;
namespace {
@@ -192,7 +193,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
// Check if the name needs quotes to be safe for the linker to interpret.
static bool canBeUnquotedInDirective(char C) {
- return isAlnum(C) || C == '_' || C == '@';
+ return isAlnum(C) || C == '_' || C == '@' || C == '#';
}
static bool canBeUnquotedInDirective(StringRef Name) {
@@ -233,6 +234,16 @@ void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
} else {
Mangler.getNameWithPrefix(OS, GV, false);
}
+ if (TT.isWindowsArm64EC()) {
+ // Use EXPORTAS for mangled ARM64EC symbols.
+ // FIXME: During LTO, we're invoked prior to the EC lowering pass,
+ // so symbols are not yet mangled. Emitting the unmangled name
+ // typically functions correctly; the linker can resolve the export
+ // with the demangled alias.
+ if (std::optional<std::string> demangledName =
+ getArm64ECDemangledFunctionName(GV->getName()))
+ OS << ",EXPORTAS," << *demangledName;
+ }
if (NeedQuotes)
OS << "\"";
@@ -279,3 +290,42 @@ void llvm::emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
OS << "\"";
}
+std::optional<std::string> llvm::getArm64ECMangledFunctionName(StringRef Name) {
+ bool IsCppFn = Name[0] == '?';
+ if (IsCppFn && Name.find("$$h") != std::string::npos)
+ return std::nullopt;
+ if (!IsCppFn && Name[0] == '#')
+ return std::nullopt;
+
+ StringRef Prefix = "$$h";
+ size_t InsertIdx = 0;
+ if (IsCppFn) {
+ InsertIdx = Name.find("@@");
+ size_t ThreeAtSignsIdx = Name.find("@@@");
+ if (InsertIdx != std::string::npos && InsertIdx != ThreeAtSignsIdx) {
+ InsertIdx += 2;
+ } else {
+ InsertIdx = Name.find("@");
+ if (InsertIdx != std::string::npos)
+ InsertIdx++;
+ }
+ } else {
+ Prefix = "#";
+ }
+
+ return std::optional<std::string>(
+ (Name.substr(0, InsertIdx) + Prefix + Name.substr(InsertIdx)).str());
+}
+
+std::optional<std::string>
+llvm::getArm64ECDemangledFunctionName(StringRef Name) {
+ if (Name[0] == '#')
+ return std::optional<std::string>(Name.substr(1));
+ if (Name[0] != '?')
+ return std::nullopt;
+
+ std::pair<StringRef, StringRef> Pair = Name.split("$$h");
+ if (Pair.second.empty())
+ return std::nullopt;
+ return std::optional<std::string>((Pair.first + Pair.second).str());
+}
diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp
index b9cd219d94dc..495769279e33 100644
--- a/llvm/lib/IR/Operator.cpp
+++ b/llvm/lib/IR/Operator.cpp
@@ -27,6 +27,10 @@ bool Operator::hasPoisonGeneratingFlags() const {
auto *OBO = cast<OverflowingBinaryOperator>(this);
return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
}
+ case Instruction::Trunc: {
+ auto *TI = dyn_cast<TruncInst>(this);
+ return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
+ }
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::AShr:
diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp
index b1a10d0ce5a5..dc86f4204b1a 100644
--- a/llvm/lib/IR/ProfDataUtils.cpp
+++ b/llvm/lib/IR/ProfDataUtils.cpp
@@ -190,4 +190,52 @@ void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights) {
I.setMetadata(LLVMContext::MD_prof, BranchWeights);
}
+void scaleProfData(Instruction &I, uint64_t S, uint64_t T) {
+ assert(T != 0 && "Caller should guarantee");
+ auto *ProfileData = I.getMetadata(LLVMContext::MD_prof);
+ if (ProfileData == nullptr)
+ return;
+
+ auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
+ if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
+ !ProfDataName->getString().equals("VP")))
+ return;
+
+ LLVMContext &C = I.getContext();
+
+ MDBuilder MDB(C);
+ SmallVector<Metadata *, 3> Vals;
+ Vals.push_back(ProfileData->getOperand(0));
+ APInt APS(128, S), APT(128, T);
+ if (ProfDataName->getString().equals("branch_weights") &&
+ ProfileData->getNumOperands() > 0) {
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(ConstantInt::get(
+ Type::getInt32Ty(C), Val.udiv(APT).getLimitedValue(UINT32_MAX))));
+ } else if (ProfDataName->getString().equals("VP"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
+ // The first value is the key of the value profile, which will not change.
+ Vals.push_back(ProfileData->getOperand(i));
+ uint64_t Count =
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
+ ->getValue()
+ .getZExtValue();
+ // Don't scale the magic number.
+ if (Count == NOMORE_ICP_MAGICNUM) {
+ Vals.push_back(ProfileData->getOperand(i + 1));
+ continue;
+ }
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128, Count);
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(ConstantInt::get(
+ Type::getInt64Ty(C), Val.udiv(APT).getLimitedValue())));
+ }
+ I.setMetadata(LLVMContext::MD_prof, MDNode::get(C, Vals));
+}
+
} // namespace llvm
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 819722566831..33f358440a31 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5016,7 +5016,7 @@ void Verifier::visitInstruction(Instruction &I) {
F->getIntrinsicID() == Intrinsic::coro_await_suspend_handle ||
F->getIntrinsicID() ==
Intrinsic::experimental_patchpoint_void ||
- F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
+ F->getIntrinsicID() == Intrinsic::experimental_patchpoint ||
F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
IsAttachedCallOperand(F, CBI, i),
@@ -5661,6 +5661,13 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
break;
}
+ case Intrinsic::experimental_patchpoint: {
+ if (Call.getCallingConv() == CallingConv::AnyReg) {
+ Check(Call.getType()->isSingleValueType(),
+ "patchpoint: invalid return type used with anyregcc", Call);
+ }
+ break;
+ }
case Intrinsic::eh_exceptioncode:
case Intrinsic::eh_exceptionpointer: {
Check(isa<CatchPadInst>(Call.getArgOperand(0)),
diff --git a/llvm/lib/InterfaceStub/ELFObjHandler.cpp b/llvm/lib/InterfaceStub/ELFObjHandler.cpp
index c1256563d0d6..9c81a8832c0f 100644
--- a/llvm/lib/InterfaceStub/ELFObjHandler.cpp
+++ b/llvm/lib/InterfaceStub/ELFObjHandler.cpp
@@ -57,7 +57,7 @@ static void initELFHeader(typename ELFT::Ehdr &ElfHeader, uint16_t Machine) {
ElfHeader.e_ident[EI_MAG2] = ElfMagic[EI_MAG2];
ElfHeader.e_ident[EI_MAG3] = ElfMagic[EI_MAG3];
ElfHeader.e_ident[EI_CLASS] = ELFT::Is64Bits ? ELFCLASS64 : ELFCLASS32;
- bool IsLittleEndian = ELFT::TargetEndianness == llvm::endianness::little;
+ bool IsLittleEndian = ELFT::Endianness == llvm::endianness::little;
ElfHeader.e_ident[EI_DATA] = IsLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
ElfHeader.e_ident[EI_VERSION] = EV_CURRENT;
ElfHeader.e_ident[EI_OSABI] = ELFOSABI_NONE;
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index b58418c64a11..53060df7f503 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -579,7 +579,9 @@ LTO::RegularLTOState::RegularLTOState(unsigned ParallelCodeGenParallelismLevel,
const Config &Conf)
: ParallelCodeGenParallelismLevel(ParallelCodeGenParallelismLevel),
Ctx(Conf), CombinedModule(std::make_unique<Module>("ld-temp.o", Ctx)),
- Mover(std::make_unique<IRMover>(*CombinedModule)) {}
+ Mover(std::make_unique<IRMover>(*CombinedModule)) {
+ CombinedModule->IsNewDbgInfoFormat = UseNewDbgInfoFormat;
+}
LTO::ThinLTOState::ThinLTOState(ThinBackend Backend)
: Backend(Backend), CombinedIndex(/*HaveGVs*/ false) {
diff --git a/llvm/lib/MC/ConstantPools.cpp b/llvm/lib/MC/ConstantPools.cpp
index f895cc6413d7..824d2463f30f 100644
--- a/llvm/lib/MC/ConstantPools.cpp
+++ b/llvm/lib/MC/ConstantPools.cpp
@@ -43,14 +43,15 @@ const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context,
// Check if there is existing entry for the same constant. If so, reuse it.
if (C) {
- auto CItr = CachedConstantEntries.find(C->getValue());
+ auto CItr = CachedConstantEntries.find(std::make_pair(C->getValue(), Size));
if (CItr != CachedConstantEntries.end())
return CItr->second;
}
// Check if there is existing entry for the same symbol. If so, reuse it.
if (S) {
- auto SItr = CachedSymbolEntries.find(&(S->getSymbol()));
+ auto SItr =
+ CachedSymbolEntries.find(std::make_pair(&(S->getSymbol()), Size));
if (SItr != CachedSymbolEntries.end())
return SItr->second;
}
@@ -60,9 +61,9 @@ const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context,
Entries.push_back(ConstantPoolEntry(CPEntryLabel, Value, Size, Loc));
const auto SymRef = MCSymbolRefExpr::create(CPEntryLabel, Context);
if (C)
- CachedConstantEntries[C->getValue()] = SymRef;
+ CachedConstantEntries[std::make_pair(C->getValue(), Size)] = SymRef;
if (S)
- CachedSymbolEntries[&(S->getSymbol())] = SymRef;
+ CachedSymbolEntries[std::make_pair(&(S->getSymbol()), Size)] = SymRef;
return SymRef;
}
diff --git a/llvm/lib/MC/DXContainerPSVInfo.cpp b/llvm/lib/MC/DXContainerPSVInfo.cpp
index 48182fcd31df..aeff69380139 100644
--- a/llvm/lib/MC/DXContainerPSVInfo.cpp
+++ b/llvm/lib/MC/DXContainerPSVInfo.cpp
@@ -81,13 +81,18 @@ void PSVRuntimeInfo::write(raw_ostream &OS, uint32_t Version) const {
BindingSize = sizeof(dxbc::PSV::v0::ResourceBindInfo);
break;
case 2:
- default:
InfoSize = sizeof(dxbc::PSV::v2::RuntimeInfo);
BindingSize = sizeof(dxbc::PSV::v2::ResourceBindInfo);
+ break;
+ case 3:
+ default:
+ InfoSize = sizeof(dxbc::PSV::v3::RuntimeInfo);
+ BindingSize = sizeof(dxbc::PSV::v2::ResourceBindInfo);
}
- // Write the size of the info.
+ // Write the size of the info.
support::endian::write(OS, InfoSize, llvm::endianness::little);
+
// Write the info itself.
OS.write(reinterpret_cast<const char *>(&BaseData), InfoSize);
@@ -104,32 +109,12 @@ void PSVRuntimeInfo::write(raw_ostream &OS, uint32_t Version) const {
if (Version == 0)
return;
- StringTableBuilder StrTabBuilder((StringTableBuilder::DXContainer));
- SmallVector<uint32_t, 64> IndexBuffer;
- SmallVector<v0::SignatureElement, 32> SignatureElements;
- SmallVector<StringRef, 32> SemanticNames;
-
- ProcessElementList(StrTabBuilder, IndexBuffer, SignatureElements,
- SemanticNames, InputElements);
- ProcessElementList(StrTabBuilder, IndexBuffer, SignatureElements,
- SemanticNames, OutputElements);
- ProcessElementList(StrTabBuilder, IndexBuffer, SignatureElements,
- SemanticNames, PatchOrPrimElements);
-
- StrTabBuilder.finalize();
- for (auto ElAndName : zip(SignatureElements, SemanticNames)) {
- v0::SignatureElement &El = std::get<0>(ElAndName);
- StringRef Name = std::get<1>(ElAndName);
- El.NameOffset = static_cast<uint32_t>(StrTabBuilder.getOffset(Name));
- if (sys::IsBigEndianHost)
- El.swapBytes();
- }
-
- support::endian::write(OS, static_cast<uint32_t>(StrTabBuilder.getSize()),
+ support::endian::write(OS,
+ static_cast<uint32_t>(DXConStrTabBuilder.getSize()),
llvm::endianness::little);
// Write the string table.
- StrTabBuilder.write(OS);
+ DXConStrTabBuilder.write(OS);
// Write the index table size, then table.
support::endian::write(OS, static_cast<uint32_t>(IndexBuffer.size()),
@@ -162,6 +147,46 @@ void PSVRuntimeInfo::write(raw_ostream &OS, uint32_t Version) const {
llvm::endianness::little);
}
+void PSVRuntimeInfo::finalize(Triple::EnvironmentType Stage) {
+ IsFinalized = true;
+ BaseData.SigInputElements = static_cast<uint32_t>(InputElements.size());
+ BaseData.SigOutputElements = static_cast<uint32_t>(OutputElements.size());
+ BaseData.SigPatchOrPrimElements =
+ static_cast<uint32_t>(PatchOrPrimElements.size());
+
+ SmallVector<StringRef, 32> SemanticNames;
+
+ // Build a string table and set associated offsets to be written when
+ // write() is called
+ ProcessElementList(DXConStrTabBuilder, IndexBuffer, SignatureElements,
+ SemanticNames, InputElements);
+ ProcessElementList(DXConStrTabBuilder, IndexBuffer, SignatureElements,
+ SemanticNames, OutputElements);
+ ProcessElementList(DXConStrTabBuilder, IndexBuffer, SignatureElements,
+ SemanticNames, PatchOrPrimElements);
+
+ DXConStrTabBuilder.add(EntryName);
+
+ DXConStrTabBuilder.finalize();
+ for (auto ElAndName : zip(SignatureElements, SemanticNames)) {
+ llvm::dxbc::PSV::v0::SignatureElement &El = std::get<0>(ElAndName);
+ StringRef Name = std::get<1>(ElAndName);
+ El.NameOffset = static_cast<uint32_t>(DXConStrTabBuilder.getOffset(Name));
+ if (sys::IsBigEndianHost)
+ El.swapBytes();
+ }
+
+ BaseData.EntryNameOffset =
+ static_cast<uint32_t>(DXConStrTabBuilder.getOffset(EntryName));
+
+ if (!sys::IsBigEndianHost)
+ return;
+ BaseData.swapBytes();
+ BaseData.swapBytes(Stage);
+ for (auto &Res : Resources)
+ Res.swapBytes();
+}
+
void Signature::write(raw_ostream &OS) {
SmallVector<dxbc::ProgramSignatureElement> SigParams;
SigParams.reserve(Params.size());
diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp
index 3c4d3ab9a508..005521bad6e0 100644
--- a/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/llvm/lib/MC/ELFObjectWriter.cpp
@@ -141,7 +141,6 @@ struct ELFWriter {
// TargetObjectWriter wrappers.
bool is64Bit() const;
- bool usesRela(const MCSectionELF &Sec) const;
uint64_t align(Align Alignment);
@@ -260,6 +259,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) override;
+ bool usesRela(const MCSectionELF &Sec) const;
void executePostLayoutBinding(MCAssembler &Asm,
const MCAsmLayout &Layout) override;
@@ -394,11 +394,6 @@ bool ELFWriter::is64Bit() const {
return OWriter.TargetObjectWriter->is64Bit();
}
-bool ELFWriter::usesRela(const MCSectionELF &Sec) const {
- return OWriter.hasRelocationAddend() &&
- Sec.getType() != ELF::SHT_LLVM_CALL_GRAPH_PROFILE;
-}
-
// Emit the ELF header.
void ELFWriter::writeHeader(const MCAssembler &Asm) {
// ELF Header
@@ -825,24 +820,22 @@ MCSectionELF *ELFWriter::createRelocationSection(MCContext &Ctx,
if (OWriter.Relocations[&Sec].empty())
return nullptr;
- const StringRef SectionName = Sec.getName();
- bool Rela = usesRela(Sec);
- std::string RelaSectionName = Rela ? ".rela" : ".rel";
- RelaSectionName += SectionName;
+ unsigned Flags = ELF::SHF_INFO_LINK;
+ if (Sec.getFlags() & ELF::SHF_GROUP)
+ Flags = ELF::SHF_GROUP;
+ const StringRef SectionName = Sec.getName();
+ const bool Rela = OWriter.usesRela(Sec);
unsigned EntrySize;
if (Rela)
EntrySize = is64Bit() ? sizeof(ELF::Elf64_Rela) : sizeof(ELF::Elf32_Rela);
else
EntrySize = is64Bit() ? sizeof(ELF::Elf64_Rel) : sizeof(ELF::Elf32_Rel);
- unsigned Flags = ELF::SHF_INFO_LINK;
- if (Sec.getFlags() & ELF::SHF_GROUP)
- Flags = ELF::SHF_GROUP;
-
- MCSectionELF *RelaSection = Ctx.createELFRelSection(
- RelaSectionName, Rela ? ELF::SHT_RELA : ELF::SHT_REL, Flags, EntrySize,
- Sec.getGroup(), &Sec);
+ MCSectionELF *RelaSection =
+ Ctx.createELFRelSection(((Rela ? ".rela" : ".rel") + SectionName),
+ Rela ? ELF::SHT_RELA : ELF::SHT_REL, Flags,
+ EntrySize, Sec.getGroup(), &Sec);
RelaSection->setAlignment(is64Bit() ? Align(8) : Align(4));
return RelaSection;
}
@@ -938,52 +931,33 @@ void ELFWriter::WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
void ELFWriter::writeRelocations(const MCAssembler &Asm,
const MCSectionELF &Sec) {
std::vector<ELFRelocationEntry> &Relocs = OWriter.Relocations[&Sec];
-
- // We record relocations by pushing to the end of a vector. Reverse the vector
- // to get the relocations in the order they were created.
- // In most cases that is not important, but it can be for special sections
- // (.eh_frame) or specific relocations (TLS optimizations on SystemZ).
- std::reverse(Relocs.begin(), Relocs.end());
+ const bool Rela = OWriter.usesRela(Sec);
// Sort the relocation entries. MIPS needs this.
OWriter.TargetObjectWriter->sortRelocs(Asm, Relocs);
- const bool Rela = usesRela(Sec);
- for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
- const ELFRelocationEntry &Entry = Relocs[e - i - 1];
- unsigned Index = Entry.Symbol ? Entry.Symbol->getIndex() : 0;
-
- if (is64Bit()) {
- write(Entry.Offset);
- if (OWriter.TargetObjectWriter->getEMachine() == ELF::EM_MIPS) {
- write(uint32_t(Index));
-
+ if (OWriter.TargetObjectWriter->getEMachine() == ELF::EM_MIPS) {
+ for (const ELFRelocationEntry &Entry : Relocs) {
+ uint32_t Symidx = Entry.Symbol ? Entry.Symbol->getIndex() : 0;
+ if (is64Bit()) {
+ write(Entry.Offset);
+ write(uint32_t(Symidx));
write(OWriter.TargetObjectWriter->getRSsym(Entry.Type));
write(OWriter.TargetObjectWriter->getRType3(Entry.Type));
write(OWriter.TargetObjectWriter->getRType2(Entry.Type));
write(OWriter.TargetObjectWriter->getRType(Entry.Type));
+ if (Rela)
+ write(Entry.Addend);
} else {
- struct ELF::Elf64_Rela ERE64;
- ERE64.setSymbolAndType(Index, Entry.Type);
- write(ERE64.r_info);
- }
- if (Rela)
- write(Entry.Addend);
- } else {
- write(uint32_t(Entry.Offset));
-
- struct ELF::Elf32_Rela ERE32;
- ERE32.setSymbolAndType(Index, Entry.Type);
- write(ERE32.r_info);
-
- if (Rela)
- write(uint32_t(Entry.Addend));
-
- if (OWriter.TargetObjectWriter->getEMachine() == ELF::EM_MIPS) {
+ write(uint32_t(Entry.Offset));
+ ELF::Elf32_Rela ERE32;
+ ERE32.setSymbolAndType(Symidx, Entry.Type);
+ write(ERE32.r_info);
+ if (Rela)
+ write(uint32_t(Entry.Addend));
if (uint32_t RType =
OWriter.TargetObjectWriter->getRType2(Entry.Type)) {
write(uint32_t(Entry.Offset));
-
ERE32.setSymbolAndType(0, RType);
write(ERE32.r_info);
write(uint32_t(0));
@@ -991,13 +965,31 @@ void ELFWriter::writeRelocations(const MCAssembler &Asm,
if (uint32_t RType =
OWriter.TargetObjectWriter->getRType3(Entry.Type)) {
write(uint32_t(Entry.Offset));
-
ERE32.setSymbolAndType(0, RType);
write(ERE32.r_info);
write(uint32_t(0));
}
}
}
+ return;
+ }
+ for (const ELFRelocationEntry &Entry : Relocs) {
+ uint32_t Symidx = Entry.Symbol ? Entry.Symbol->getIndex() : 0;
+ if (is64Bit()) {
+ write(Entry.Offset);
+ ELF::Elf64_Rela ERE;
+ ERE.setSymbolAndType(Symidx, Entry.Type);
+ write(ERE.r_info);
+ if (Rela)
+ write(Entry.Addend);
+ } else {
+ write(uint32_t(Entry.Offset));
+ ELF::Elf32_Rela ERE;
+ ERE.setSymbolAndType(Symidx, Entry.Type);
+ write(ERE.r_info);
+ if (Rela)
+ write(uint32_t(Entry.Addend));
+ }
}
}
@@ -1500,7 +1492,7 @@ void ELFObjectWriter::recordRelocation(MCAssembler &Asm,
FixedValue = !RelocateWithSymbol && SymA && !SymA->isUndefined()
? C + Layout.getSymbolOffset(*SymA)
: C;
- if (hasRelocationAddend()) {
+ if (usesRela(FixupSection)) {
Addend = FixedValue;
FixedValue = 0;
}
@@ -1529,6 +1521,11 @@ void ELFObjectWriter::recordRelocation(MCAssembler &Asm,
Relocations[&FixupSection].push_back(Rec);
}
+bool ELFObjectWriter::usesRela(const MCSectionELF &Sec) const {
+ return hasRelocationAddend() &&
+ Sec.getType() != ELF::SHT_LLVM_CALL_GRAPH_PROFILE;
+}
+
bool ELFObjectWriter::isSymbolRefDifferenceFullyResolvedImpl(
const MCAssembler &Asm, const MCSymbol &SA, const MCFragment &FB,
bool InSet, bool IsPCRel) const {
diff --git a/llvm/lib/MC/MCDwarf.cpp b/llvm/lib/MC/MCDwarf.cpp
index d0face9140de..2ee0c3eb27b9 100644
--- a/llvm/lib/MC/MCDwarf.cpp
+++ b/llvm/lib/MC/MCDwarf.cpp
@@ -360,7 +360,12 @@ void MCDwarfLineStr::emitRef(MCStreamer *MCOS, StringRef Path) {
size_t Offset = addString(Path);
if (UseRelocs) {
MCContext &Ctx = MCOS->getContext();
- MCOS->emitValue(makeStartPlusIntExpr(Ctx, *LineStrLabel, Offset), RefSize);
+ if (Ctx.getAsmInfo()->needsDwarfSectionOffsetDirective()) {
+ MCOS->emitCOFFSecRel32(LineStrLabel, Offset);
+ } else {
+ MCOS->emitValue(makeStartPlusIntExpr(Ctx, *LineStrLabel, Offset),
+ RefSize);
+ }
} else
MCOS->emitIntValue(Offset, RefSize);
}
diff --git a/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp b/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
index 0db5fb36f795..665d92eb9a21 100644
--- a/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
+++ b/llvm/lib/MC/MCParser/MCTargetAsmParser.cpp
@@ -8,6 +8,7 @@
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCRegister.h"
using namespace llvm;
@@ -48,3 +49,8 @@ ParseStatus MCTargetAsmParser::parseDirective(AsmToken DirectiveID) {
return ParseStatus::Failure;
return ParseStatus::NoMatch;
}
+
+bool MCTargetAsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
+ const MCParsedAsmOperand &Op2) const {
+ return Op1.isReg() && Op2.isReg() && Op1.getReg() == Op2.getReg();
+}
diff --git a/llvm/lib/MC/MCRegisterInfo.cpp b/llvm/lib/MC/MCRegisterInfo.cpp
index a2c1737e2964..334655616d8d 100644
--- a/llvm/lib/MC/MCRegisterInfo.cpp
+++ b/llvm/lib/MC/MCRegisterInfo.cpp
@@ -57,18 +57,6 @@ unsigned MCRegisterInfo::getSubRegIndex(MCRegister Reg,
return 0;
}
-unsigned MCRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
- assert(Idx && Idx < getNumSubRegIndices() &&
- "This is not a subregister index");
- return SubRegIdxRanges[Idx].Size;
-}
-
-unsigned MCRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
- assert(Idx && Idx < getNumSubRegIndices() &&
- "This is not a subregister index");
- return SubRegIdxRanges[Idx].Offset;
-}
-
int MCRegisterInfo::getDwarfRegNum(MCRegister RegNum, bool isEH) const {
const DwarfLLVMRegPair *M = isEH ? EHL2DwarfRegs : L2DwarfRegs;
unsigned Size = isEH ? EHL2DwarfRegsSize : L2DwarfRegsSize;
diff --git a/llvm/lib/MC/MCStreamer.cpp b/llvm/lib/MC/MCStreamer.cpp
index d0395770ae8b..176d55aa890b 100644
--- a/llvm/lib/MC/MCStreamer.cpp
+++ b/llvm/lib/MC/MCStreamer.cpp
@@ -141,7 +141,7 @@ void MCStreamer::emitIntValue(uint64_t Value, unsigned Size) {
unsigned Index = IsLittleEndian ? 0 : 8 - Size;
emitBytes(StringRef(reinterpret_cast<char *>(&Swapped) + Index, Size));
}
-void MCStreamer::emitIntValue(APInt Value) {
+void MCStreamer::emitIntValue(const APInt &Value) {
if (Value.getNumWords() == 1) {
emitIntValue(Value.getLimitedValue(), Value.getBitWidth() / 8);
return;
diff --git a/llvm/lib/MC/WinCOFFObjectWriter.cpp b/llvm/lib/MC/WinCOFFObjectWriter.cpp
index f265fafa59e7..3c9ff71b6b06 100644
--- a/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -266,7 +266,7 @@ WinCOFFWriter::WinCOFFWriter(WinCOFFObjectWriter &OWriter,
// limited range for the immediate offset (+/- 1 MB); create extra offset
// label symbols with regular intervals to allow referencing a
// non-temporary symbol that is close enough.
- UseOffsetLabels = Header.Machine == COFF::IMAGE_FILE_MACHINE_ARM64;
+ UseOffsetLabels = COFF::isAnyArm64(Header.Machine);
}
COFFSymbol *WinCOFFWriter::createSymbol(StringRef Name) {
@@ -954,7 +954,7 @@ void WinCOFFWriter::recordRelocation(MCAssembler &Asm,
Reloc.Data.Type == COFF::IMAGE_REL_I386_REL32) ||
(Header.Machine == COFF::IMAGE_FILE_MACHINE_ARMNT &&
Reloc.Data.Type == COFF::IMAGE_REL_ARM_REL32) ||
- (Header.Machine == COFF::IMAGE_FILE_MACHINE_ARM64 &&
+ (COFF::isAnyArm64(Header.Machine) &&
Reloc.Data.Type == COFF::IMAGE_REL_ARM64_REL32))
FixedValue += 4;
diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp
index d46bbaf75765..a7c3818d598b 100644
--- a/llvm/lib/MC/XCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/XCOFFObjectWriter.cpp
@@ -736,11 +736,26 @@ void XCOFFObjectWriter::recordRelocation(MCAssembler &Asm,
} else {
// The FixedValue should be the TOC entry offset from the TOC-base plus
// any constant offset value.
- const int64_t TOCEntryOffset = SectionMap[SymASec]->Address -
- TOCCsects.front().Address +
- Target.getConstant();
+ int64_t TOCEntryOffset = SectionMap[SymASec]->Address -
+ TOCCsects.front().Address + Target.getConstant();
+ // For small code model, if the TOCEntryOffset overflows the 16-bit value,
+ // we truncate it back down to 16 bits. The linker will be able to insert
+ // fix-up code when needed.
+ // For non toc-data symbols, we already did the truncation in
+ // PPCAsmPrinter.cpp through setting Target.getConstant() in the
+ // expression above by calling getTOCEntryLoadingExprForXCOFF for the
+ // various TOC PseudoOps.
+ // For toc-data symbols, we were not able to calculate the offset from
+ // the TOC in PPCAsmPrinter.cpp since the TOC has not been finalized at
+ // that point, so we are adjusting it here though
+ // llvm::SignExtend64<16>(TOCEntryOffset);
+ // TODO: Since the time that the handling for offsets over 16-bits was
+ // added in PPCAsmPrinter.cpp using getTOCEntryLoadingExprForXCOFF, the
+ // system assembler and linker have been updated to be able to handle the
+ // overflowing offsets, so we no longer need to keep
+ // getTOCEntryLoadingExprForXCOFF.
if (Type == XCOFF::RelocationType::R_TOC && !isInt<16>(TOCEntryOffset))
- report_fatal_error("TOCEntryOffset overflows in small code model mode");
+ TOCEntryOffset = llvm::SignExtend64<16>(TOCEntryOffset);
FixedValue = TOCEntryOffset;
}
diff --git a/llvm/lib/ObjCopy/ConfigManager.cpp b/llvm/lib/ObjCopy/ConfigManager.cpp
index e46b595a56dc..6442f1b958fb 100644
--- a/llvm/lib/ObjCopy/ConfigManager.cpp
+++ b/llvm/lib/ObjCopy/ConfigManager.cpp
@@ -15,7 +15,7 @@ namespace objcopy {
Expected<const COFFConfig &> ConfigManager::getCOFFConfig() const {
if (!Common.SplitDWO.empty() || !Common.SymbolsPrefix.empty() ||
- !Common.SymbolsPrefixRemove.empty() ||
+ !Common.SymbolsPrefixRemove.empty() || !Common.SymbolsToSkip.empty() ||
!Common.AllocSectionsPrefix.empty() || !Common.KeepSection.empty() ||
!Common.SymbolsToGlobalize.empty() || !Common.SymbolsToKeep.empty() ||
!Common.SymbolsToLocalize.empty() || !Common.SymbolsToWeaken.empty() ||
@@ -34,7 +34,7 @@ Expected<const COFFConfig &> ConfigManager::getCOFFConfig() const {
Expected<const MachOConfig &> ConfigManager::getMachOConfig() const {
if (!Common.SplitDWO.empty() || !Common.SymbolsPrefix.empty() ||
- !Common.SymbolsPrefixRemove.empty() ||
+ !Common.SymbolsPrefixRemove.empty() || !Common.SymbolsToSkip.empty() ||
!Common.AllocSectionsPrefix.empty() || !Common.KeepSection.empty() ||
!Common.SymbolsToGlobalize.empty() || !Common.SymbolsToKeep.empty() ||
!Common.SymbolsToLocalize.empty() ||
@@ -56,7 +56,7 @@ Expected<const MachOConfig &> ConfigManager::getMachOConfig() const {
Expected<const WasmConfig &> ConfigManager::getWasmConfig() const {
if (!Common.AddGnuDebugLink.empty() || Common.ExtractPartition ||
!Common.SplitDWO.empty() || !Common.SymbolsPrefix.empty() ||
- !Common.SymbolsPrefixRemove.empty() ||
+ !Common.SymbolsPrefixRemove.empty() || !Common.SymbolsToSkip.empty() ||
!Common.AllocSectionsPrefix.empty() ||
Common.DiscardMode != DiscardType::None || !Common.SymbolsToAdd.empty() ||
!Common.SymbolsToGlobalize.empty() || !Common.SymbolsToLocalize.empty() ||
@@ -77,7 +77,7 @@ Expected<const WasmConfig &> ConfigManager::getWasmConfig() const {
Expected<const XCOFFConfig &> ConfigManager::getXCOFFConfig() const {
if (!Common.AddGnuDebugLink.empty() || Common.ExtractPartition ||
!Common.SplitDWO.empty() || !Common.SymbolsPrefix.empty() ||
- !Common.SymbolsPrefixRemove.empty() ||
+ !Common.SymbolsPrefixRemove.empty() || !Common.SymbolsToSkip.empty() ||
!Common.AllocSectionsPrefix.empty() ||
Common.DiscardMode != DiscardType::None || !Common.AddSection.empty() ||
!Common.DumpSection.empty() || !Common.SymbolsToAdd.empty() ||
diff --git a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
index e4d6e02f3aa6..205bc1ef5b1a 100644
--- a/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
+++ b/llvm/lib/ObjCopy/ELF/ELFObjcopy.cpp
@@ -291,6 +291,9 @@ static Error updateAndRemoveSymbols(const CommonConfig &Config,
return Error::success();
Obj.SymbolTable->updateSymbols([&](Symbol &Sym) {
+ if (Config.SymbolsToSkip.matches(Sym.Name))
+ return;
+
// Common and undefined symbols don't make sense as local symbols, and can
// even cause crashes if we localize those, so skip them.
if (!Sym.isCommon() && Sym.getShndx() != SHN_UNDEF &&
diff --git a/llvm/lib/ObjCopy/ELF/ELFObject.cpp b/llvm/lib/ObjCopy/ELF/ELFObject.cpp
index 9547cc10d2a0..8b6a0035dae3 100644
--- a/llvm/lib/ObjCopy/ELF/ELFObject.cpp
+++ b/llvm/lib/ObjCopy/ELF/ELFObject.cpp
@@ -33,6 +33,7 @@ using namespace llvm;
using namespace llvm::ELF;
using namespace llvm::objcopy::elf;
using namespace llvm::object;
+using namespace llvm::support;
template <class ELFT> void ELFWriter<ELFT>::writePhdr(const Segment &Seg) {
uint8_t *B = reinterpret_cast<uint8_t *>(Buf->getBufferStart()) +
@@ -1175,9 +1176,9 @@ template <class ELFT>
Error ELFSectionWriter<ELFT>::visit(const GroupSection &Sec) {
ELF::Elf32_Word *Buf =
reinterpret_cast<ELF::Elf32_Word *>(Out.getBufferStart() + Sec.Offset);
- support::endian::write32<ELFT::TargetEndianness>(Buf++, Sec.FlagWord);
+ endian::write32<ELFT::Endianness>(Buf++, Sec.FlagWord);
for (SectionBase *S : Sec.GroupMembers)
- support::endian::write32<ELFT::TargetEndianness>(Buf++, S->Index);
+ endian::write32<ELFT::Endianness>(Buf++, S->Index);
return Error::success();
}
@@ -1522,10 +1523,9 @@ Error ELFBuilder<ELFT>::initGroupSection(GroupSection *GroupSec) {
reinterpret_cast<const ELF::Elf32_Word *>(GroupSec->Contents.data());
const ELF::Elf32_Word *End =
Word + GroupSec->Contents.size() / sizeof(ELF::Elf32_Word);
- GroupSec->setFlagWord(
- support::endian::read32<ELFT::TargetEndianness>(Word++));
+ GroupSec->setFlagWord(endian::read32<ELFT::Endianness>(Word++));
for (; Word != End; ++Word) {
- uint32_t Index = support::endian::read32<ELFT::TargetEndianness>(Word);
+ uint32_t Index = support::endian::read32<ELFT::Endianness>(Word);
Expected<SectionBase *> Sec = SecTable.getSection(
Index, "group member index " + Twine(Index) + " in section '" +
GroupSec->Name + "' is invalid");
@@ -1993,9 +1993,8 @@ template <class ELFT> void ELFWriter<ELFT>::writeEhdr() {
Ehdr.e_ident[EI_MAG2] = 'L';
Ehdr.e_ident[EI_MAG3] = 'F';
Ehdr.e_ident[EI_CLASS] = ELFT::Is64Bits ? ELFCLASS64 : ELFCLASS32;
- Ehdr.e_ident[EI_DATA] = ELFT::TargetEndianness == llvm::endianness::big
- ? ELFDATA2MSB
- : ELFDATA2LSB;
+ Ehdr.e_ident[EI_DATA] =
+ ELFT::Endianness == llvm::endianness::big ? ELFDATA2MSB : ELFDATA2LSB;
Ehdr.e_ident[EI_VERSION] = EV_CURRENT;
Ehdr.e_ident[EI_OSABI] = Obj.OSABI;
Ehdr.e_ident[EI_ABIVERSION] = Obj.ABIVersion;
diff --git a/llvm/lib/Object/COFFImportFile.cpp b/llvm/lib/Object/COFFImportFile.cpp
index 46c8e702581e..48c3ea0ed8f4 100644
--- a/llvm/lib/Object/COFFImportFile.cpp
+++ b/llvm/lib/Object/COFFImportFile.cpp
@@ -84,6 +84,27 @@ StringRef COFFImportFile::getExportName() const {
return name;
}
+Error COFFImportFile::printSymbolName(raw_ostream &OS, DataRefImpl Symb) const {
+ switch (Symb.p) {
+ case ImpSymbol:
+ OS << "__imp_";
+ break;
+ case ECAuxSymbol:
+ OS << "__imp_aux_";
+ break;
+ }
+ const char *Name = Data.getBufferStart() + sizeof(coff_import_header);
+ if (Symb.p != ECThunkSymbol && COFF::isArm64EC(getMachine())) {
+ if (std::optional<std::string> DemangledName =
+ getArm64ECDemangledFunctionName(Name)) {
+ OS << StringRef(*DemangledName);
+ return Error::success();
+ }
+ }
+ OS << StringRef(Name);
+ return Error::success();
+}
+
static uint16_t getImgRelRelocation(MachineTypes Machine) {
switch (Machine) {
default:
@@ -626,8 +647,11 @@ Error writeImportLibrary(StringRef ImportName, StringRef Path,
MachineTypes Machine, bool MinGW,
ArrayRef<COFFShortExport> NativeExports) {
- MachineTypes NativeMachine =
- isArm64EC(Machine) ? IMAGE_FILE_MACHINE_ARM64 : Machine;
+ MachineTypes NativeMachine = Machine;
+ if (isArm64EC(Machine)) {
+ NativeMachine = IMAGE_FILE_MACHINE_ARM64;
+ Machine = IMAGE_FILE_MACHINE_ARM64EC;
+ }
std::vector<NewArchiveMember> Members;
ObjectFactory OF(llvm::sys::path::filename(ImportName), NativeMachine);
@@ -687,12 +711,12 @@ Error writeImportLibrary(StringRef ImportName, StringRef Path,
if (ImportType == IMPORT_CODE && isArm64EC(M)) {
if (std::optional<std::string> MangledName =
getArm64ECMangledFunctionName(Name)) {
- if (ExportName.empty()) {
+ if (!E.Noname && ExportName.empty()) {
NameType = IMPORT_NAME_EXPORTAS;
ExportName.swap(Name);
}
Name = std::move(*MangledName);
- } else if (ExportName.empty()) {
+ } else if (!E.Noname && ExportName.empty()) {
NameType = IMPORT_NAME_EXPORTAS;
ExportName = std::move(*getArm64ECDemangledFunctionName(Name));
}
diff --git a/llvm/lib/Object/DXContainer.cpp b/llvm/lib/Object/DXContainer.cpp
index 935749afe338..3b1a6203a1f8 100644
--- a/llvm/lib/Object/DXContainer.cpp
+++ b/llvm/lib/Object/DXContainer.cpp
@@ -247,7 +247,14 @@ Error DirectX::PSVRuntimeInfo::parse(uint16_t ShaderKind) {
const uint32_t PSVVersion = getVersion();
// Detect the PSVVersion by looking at the size field.
- if (PSVVersion == 2) {
+ if (PSVVersion == 3) {
+ v3::RuntimeInfo Info;
+ if (Error Err = readStruct(PSVInfoData, Current, Info))
+ return Err;
+ if (sys::IsBigEndianHost)
+ Info.swapBytes(ShaderStage);
+ BasicInfo = Info;
+ } else if (PSVVersion == 2) {
v2::RuntimeInfo Info;
if (Error Err = readStruct(PSVInfoData, Current, Info))
return Err;
@@ -425,6 +432,8 @@ Error DirectX::PSVRuntimeInfo::parse(uint16_t ShaderKind) {
}
uint8_t DirectX::PSVRuntimeInfo::getSigInputCount() const {
+ if (const auto *P = std::get_if<dxbc::PSV::v3::RuntimeInfo>(&BasicInfo))
+ return P->SigInputElements;
if (const auto *P = std::get_if<dxbc::PSV::v2::RuntimeInfo>(&BasicInfo))
return P->SigInputElements;
if (const auto *P = std::get_if<dxbc::PSV::v1::RuntimeInfo>(&BasicInfo))
@@ -433,6 +442,8 @@ uint8_t DirectX::PSVRuntimeInfo::getSigInputCount() const {
}
uint8_t DirectX::PSVRuntimeInfo::getSigOutputCount() const {
+ if (const auto *P = std::get_if<dxbc::PSV::v3::RuntimeInfo>(&BasicInfo))
+ return P->SigOutputElements;
if (const auto *P = std::get_if<dxbc::PSV::v2::RuntimeInfo>(&BasicInfo))
return P->SigOutputElements;
if (const auto *P = std::get_if<dxbc::PSV::v1::RuntimeInfo>(&BasicInfo))
@@ -441,6 +452,8 @@ uint8_t DirectX::PSVRuntimeInfo::getSigOutputCount() const {
}
uint8_t DirectX::PSVRuntimeInfo::getSigPatchOrPrimCount() const {
+ if (const auto *P = std::get_if<dxbc::PSV::v3::RuntimeInfo>(&BasicInfo))
+ return P->SigPatchOrPrimElements;
if (const auto *P = std::get_if<dxbc::PSV::v2::RuntimeInfo>(&BasicInfo))
return P->SigPatchOrPrimElements;
if (const auto *P = std::get_if<dxbc::PSV::v1::RuntimeInfo>(&BasicInfo))
diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp
index 55dd0c8e06c0..0ac4e7a57759 100644
--- a/llvm/lib/Object/ELF.cpp
+++ b/llvm/lib/Object/ELF.cpp
@@ -560,7 +560,11 @@ Expected<typename ELFT::DynRange> ELFFile<ELFT>::dynamicEntries() const {
for (const Elf_Phdr &Phdr : *ProgramHeadersOrError) {
if (Phdr.p_type == ELF::PT_DYNAMIC) {
- Dyn = ArrayRef(reinterpret_cast<const Elf_Dyn *>(base() + Phdr.p_offset),
+ const uint8_t *DynOffset = base() + Phdr.p_offset;
+ if (DynOffset > end())
+ return createError(
+ "dynamic section offset past file size: corrupted ELF");
+ Dyn = ArrayRef(reinterpret_cast<const Elf_Dyn *>(DynOffset),
Phdr.p_filesz / sizeof(Elf_Dyn));
break;
}
diff --git a/llvm/lib/Object/OffloadBinary.cpp b/llvm/lib/Object/OffloadBinary.cpp
index 4ab6536dc90b..6e9f8bed513c 100644
--- a/llvm/lib/Object/OffloadBinary.cpp
+++ b/llvm/lib/Object/OffloadBinary.cpp
@@ -189,7 +189,10 @@ OffloadBinary::create(MemoryBufferRef Buf) {
return errorCodeToError(object_error::parse_failed);
if (TheHeader->Size > Buf.getBufferSize() ||
- TheHeader->EntryOffset > TheHeader->Size - sizeof(Entry) ||
+ TheHeader->Size < sizeof(Entry) || TheHeader->Size < sizeof(Header))
+ return errorCodeToError(object_error::unexpected_eof);
+
+ if (TheHeader->EntryOffset > TheHeader->Size - sizeof(Entry) ||
TheHeader->EntrySize > TheHeader->Size - sizeof(Header))
return errorCodeToError(object_error::unexpected_eof);
diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
index 09a5e41c7123..f3a518df3175 100644
--- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
+++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp
@@ -198,8 +198,9 @@ void DXContainerWriter::writeParts(raw_ostream &OS) {
if (!P.Info.has_value())
continue;
mcdxbc::PSVRuntimeInfo PSV;
- memcpy(&PSV.BaseData, &P.Info->Info, sizeof(dxbc::PSV::v2::RuntimeInfo));
+ memcpy(&PSV.BaseData, &P.Info->Info, sizeof(dxbc::PSV::v3::RuntimeInfo));
PSV.Resources = P.Info->Resources;
+ PSV.EntryName = P.Info->EntryName;
for (auto El : P.Info->SigInputElements)
PSV.InputElements.push_back(mcdxbc::PSVSignatureElement{
diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
index a6871e7855e4..38063670aee6 100644
--- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp
+++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp
@@ -74,6 +74,16 @@ DXContainerYAML::PSVInfo::PSVInfo(const dxbc::PSV::v2::RuntimeInfo *P)
memcpy(&Info, P, sizeof(dxbc::PSV::v2::RuntimeInfo));
}
+DXContainerYAML::PSVInfo::PSVInfo(const dxbc::PSV::v3::RuntimeInfo *P,
+ StringRef StringTable)
+ : Version(3),
+ EntryName(StringTable.substr(P->EntryNameOffset,
+ StringTable.find('\0', P->EntryNameOffset) -
+ P->EntryNameOffset)) {
+ memset(&Info, 0, sizeof(Info));
+ memcpy(&Info, P, sizeof(dxbc::PSV::v3::RuntimeInfo));
+}
+
namespace yaml {
void MappingTraits<DXContainerYAML::VersionTuple>::mapping(
@@ -348,6 +358,11 @@ void DXContainerYAML::PSVInfo::mapInfoForVersion(yaml::IO &IO) {
IO.mapRequired("NumThreadsX", Info.NumThreadsX);
IO.mapRequired("NumThreadsY", Info.NumThreadsY);
IO.mapRequired("NumThreadsZ", Info.NumThreadsZ);
+
+ if (Version == 2)
+ return;
+
+ IO.mapRequired("EntryName", EntryName);
}
} // namespace llvm
diff --git a/llvm/lib/ObjectYAML/ELFEmitter.cpp b/llvm/lib/ObjectYAML/ELFEmitter.cpp
index 58a725f8d877..b7118a543fae 100644
--- a/llvm/lib/ObjectYAML/ELFEmitter.cpp
+++ b/llvm/lib/ObjectYAML/ELFEmitter.cpp
@@ -1314,7 +1314,7 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
if (!ELFT::Is64Bits && E > UINT32_MAX)
reportError(Section.Name + ": the value is too large for 32-bits: 0x" +
Twine::utohexstr(E));
- CBA.write<uintX_t>(E, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(E, ELFT::Endianness);
}
SHeader.sh_size = sizeof(uintX_t) * Section.Entries->size();
@@ -1333,7 +1333,7 @@ void ELFState<ELFT>::writeSectionContent(
return;
for (uint32_t E : *Shndx.Entries)
- CBA.write<uint32_t>(E, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(E, ELFT::Endianness);
SHeader.sh_size = Shndx.Entries->size() * SHeader.sh_entsize;
}
@@ -1357,7 +1357,7 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
SectionIndex = llvm::ELF::GRP_COMDAT;
else
SectionIndex = toSectionIndex(Member.sectionNameOrType, Section.Name);
- CBA.write<uint32_t>(SectionIndex, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(SectionIndex, ELFT::Endianness);
}
SHeader.sh_size = SHeader.sh_entsize * Section.Members->size();
}
@@ -1370,7 +1370,7 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
return;
for (uint16_t Version : *Section.Entries)
- CBA.write<uint16_t>(Version, ELFT::TargetEndianness);
+ CBA.write<uint16_t>(Version, ELFT::Endianness);
SHeader.sh_size = Section.Entries->size() * SHeader.sh_entsize;
}
@@ -1382,7 +1382,7 @@ void ELFState<ELFT>::writeSectionContent(
return;
for (const ELFYAML::StackSizeEntry &E : *Section.Entries) {
- CBA.write<uintX_t>(E.Address, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(E.Address, ELFT::Endianness);
SHeader.sh_size += sizeof(uintX_t) + CBA.writeULEB128(E.Size);
}
}
@@ -1444,7 +1444,7 @@ void ELFState<ELFT>::writeSectionContent(
uint64_t TotalNumBlocks = 0;
for (const ELFYAML::BBAddrMapEntry::BBRangeEntry &BBR : *E.BBRanges) {
// Write the base address of the range.
- CBA.write<uintX_t>(BBR.BaseAddress, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(BBR.BaseAddress, ELFT::Endianness);
// Write number of BBEntries (number of basic blocks in this basic block
// range). This is overridden by the 'NumBlocks' YAML field when
// specified.
@@ -1558,7 +1558,7 @@ void ELFState<ELFT>::writeSectionContent(
return;
for (const ELFYAML::CallGraphEntryWeight &E : *Section.Entries) {
- CBA.write<uint64_t>(E.Weight, ELFT::TargetEndianness);
+ CBA.write<uint64_t>(E.Weight, ELFT::Endianness);
SHeader.sh_size += sizeof(object::Elf_CGProfile_Impl<ELFT>);
}
}
@@ -1572,15 +1572,15 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
CBA.write<uint32_t>(
Section.NBucket.value_or(llvm::yaml::Hex64(Section.Bucket->size())),
- ELFT::TargetEndianness);
+ ELFT::Endianness);
CBA.write<uint32_t>(
Section.NChain.value_or(llvm::yaml::Hex64(Section.Chain->size())),
- ELFT::TargetEndianness);
+ ELFT::Endianness);
for (uint32_t Val : *Section.Bucket)
- CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Val, ELFT::Endianness);
for (uint32_t Val : *Section.Chain)
- CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Val, ELFT::Endianness);
SHeader.sh_size = (2 + Section.Bucket->size() + Section.Chain->size()) * 4;
}
@@ -1687,8 +1687,8 @@ void ELFState<ELFT>::writeSectionContent(
return;
for (const ELFYAML::ARMIndexTableEntry &E : *Section.Entries) {
- CBA.write<uint32_t>(E.Offset, ELFT::TargetEndianness);
- CBA.write<uint32_t>(E.Value, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(E.Offset, ELFT::Endianness);
+ CBA.write<uint32_t>(E.Value, ELFT::Endianness);
}
SHeader.sh_size = Section.Entries->size() * 8;
}
@@ -1729,8 +1729,8 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
return;
for (const ELFYAML::DynamicEntry &DE : *Section.Entries) {
- CBA.write<uintX_t>(DE.Tag, ELFT::TargetEndianness);
- CBA.write<uintX_t>(DE.Val, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(DE.Tag, ELFT::Endianness);
+ CBA.write<uintX_t>(DE.Val, ELFT::Endianness);
}
SHeader.sh_size = 2 * sizeof(uintX_t) * Section.Entries->size();
}
@@ -1758,18 +1758,18 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
for (const ELFYAML::NoteEntry &NE : *Section.Notes) {
// Write name size.
if (NE.Name.empty())
- CBA.write<uint32_t>(0, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(0, ELFT::Endianness);
else
- CBA.write<uint32_t>(NE.Name.size() + 1, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(NE.Name.size() + 1, ELFT::Endianness);
// Write description size.
if (NE.Desc.binary_size() == 0)
- CBA.write<uint32_t>(0, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(0, ELFT::Endianness);
else
- CBA.write<uint32_t>(NE.Desc.binary_size(), ELFT::TargetEndianness);
+ CBA.write<uint32_t>(NE.Desc.binary_size(), ELFT::Endianness);
// Write type.
- CBA.write<uint32_t>(NE.Type, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(NE.Type, ELFT::Endianness);
// Write name, null terminator and padding.
if (!NE.Name.empty()) {
@@ -1803,35 +1803,35 @@ void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
// be used to override this field, which is useful for producing broken
// objects.
if (Section.Header->NBuckets)
- CBA.write<uint32_t>(*Section.Header->NBuckets, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(*Section.Header->NBuckets, ELFT::Endianness);
else
- CBA.write<uint32_t>(Section.HashBuckets->size(), ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Section.HashBuckets->size(), ELFT::Endianness);
// Write the index of the first symbol in the dynamic symbol table accessible
// via the hash table.
- CBA.write<uint32_t>(Section.Header->SymNdx, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Section.Header->SymNdx, ELFT::Endianness);
// Write the number of words in the Bloom filter. As above, the "MaskWords"
// property can be used to set this field to any value.
if (Section.Header->MaskWords)
- CBA.write<uint32_t>(*Section.Header->MaskWords, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(*Section.Header->MaskWords, ELFT::Endianness);
else
- CBA.write<uint32_t>(Section.BloomFilter->size(), ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Section.BloomFilter->size(), ELFT::Endianness);
// Write the shift constant used by the Bloom filter.
- CBA.write<uint32_t>(Section.Header->Shift2, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Section.Header->Shift2, ELFT::Endianness);
// We've finished writing the header. Now write the Bloom filter.
for (llvm::yaml::Hex64 Val : *Section.BloomFilter)
- CBA.write<uintX_t>(Val, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(Val, ELFT::Endianness);
// Write an array of hash buckets.
for (llvm::yaml::Hex32 Val : *Section.HashBuckets)
- CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Val, ELFT::Endianness);
// Write an array of hash values.
for (llvm::yaml::Hex32 Val : *Section.HashValues)
- CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(Val, ELFT::Endianness);
SHeader.sh_size = 16 /*Header size*/ +
Section.BloomFilter->size() * sizeof(typename ELFT::uint) +
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 9d98ae7dde52..57975e34d426 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -365,6 +365,33 @@ public:
static StringRef name() { return "TriggerVerifierErrorPass"; }
};
+// A pass requires all MachineFunctionProperties.
+// DO NOT USE THIS EXCEPT FOR TESTING!
+class RequireAllMachineFunctionPropertiesPass
+ : public MachinePassInfoMixin<RequireAllMachineFunctionPropertiesPass> {
+public:
+ PreservedAnalyses run(MachineFunction &, MachineFunctionAnalysisManager &) {
+ return PreservedAnalyses::none();
+ }
+
+ static MachineFunctionProperties getRequiredProperties() {
+ MachineFunctionProperties MFProps;
+ MFProps.set(MachineFunctionProperties::Property::FailedISel);
+ MFProps.set(MachineFunctionProperties::Property::FailsVerification);
+ MFProps.set(MachineFunctionProperties::Property::IsSSA);
+ MFProps.set(MachineFunctionProperties::Property::Legalized);
+ MFProps.set(MachineFunctionProperties::Property::NoPHIs);
+ MFProps.set(MachineFunctionProperties::Property::NoVRegs);
+ MFProps.set(MachineFunctionProperties::Property::RegBankSelected);
+ MFProps.set(MachineFunctionProperties::Property::Selected);
+ MFProps.set(MachineFunctionProperties::Property::TiedOpsRewritten);
+ MFProps.set(MachineFunctionProperties::Property::TracksDebugUserValues);
+ MFProps.set(MachineFunctionProperties::Property::TracksLiveness);
+ return MFProps;
+ }
+ static StringRef name() { return "RequireAllMachineFunctionPropertiesPass"; }
+};
+
} // namespace
PassBuilder::PassBuilder(TargetMachine *TM, PipelineTuningOptions PTO,
@@ -514,6 +541,26 @@ static std::optional<OptimizationLevel> parseOptLevel(StringRef S) {
.Default(std::nullopt);
}
+Expected<bool> PassBuilder::parseSinglePassOption(StringRef Params,
+ StringRef OptionName,
+ StringRef PassName) {
+ bool Result = false;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ if (ParamName == OptionName) {
+ Result = true;
+ } else {
+ return make_error<StringError>(
+ formatv("invalid {1} pass parameter '{0}' ", ParamName, PassName)
+ .str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
namespace {
/// Parser of parameters for HardwareLoops pass.
@@ -600,44 +647,29 @@ Expected<LoopUnrollOptions> parseLoopUnrollOptions(StringRef Params) {
return UnrollOpts;
}
-Expected<bool> parseSinglePassOption(StringRef Params, StringRef OptionName,
- StringRef PassName) {
- bool Result = false;
- while (!Params.empty()) {
- StringRef ParamName;
- std::tie(ParamName, Params) = Params.split(';');
-
- if (ParamName == OptionName) {
- Result = true;
- } else {
- return make_error<StringError>(
- formatv("invalid {1} pass parameter '{0}' ", ParamName, PassName)
- .str(),
- inconvertibleErrorCode());
- }
- }
- return Result;
-}
-
Expected<bool> parseGlobalDCEPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "vfe-linkage-unit-visibility", "GlobalDCE");
+ return PassBuilder::parseSinglePassOption(
+ Params, "vfe-linkage-unit-visibility", "GlobalDCE");
}
Expected<bool> parseCGProfilePassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "in-lto-post-link", "CGProfile");
+ return PassBuilder::parseSinglePassOption(Params, "in-lto-post-link",
+ "CGProfile");
}
Expected<bool> parseInlinerPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "only-mandatory", "InlinerPass");
+ return PassBuilder::parseSinglePassOption(Params, "only-mandatory",
+ "InlinerPass");
}
Expected<bool> parseCoroSplitPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "reuse-storage", "CoroSplitPass");
+ return PassBuilder::parseSinglePassOption(Params, "reuse-storage",
+ "CoroSplitPass");
}
Expected<bool> parsePostOrderFunctionAttrsPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "skip-non-recursive-function-attrs",
- "PostOrderFunctionAttrs");
+ return PassBuilder::parseSinglePassOption(
+ Params, "skip-non-recursive-function-attrs", "PostOrderFunctionAttrs");
}
Expected<CFGuardPass::Mechanism> parseCFGuardPassOptions(StringRef Params) {
@@ -661,19 +693,21 @@ Expected<CFGuardPass::Mechanism> parseCFGuardPassOptions(StringRef Params) {
}
Expected<bool> parseEarlyCSEPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "memssa", "EarlyCSE");
+ return PassBuilder::parseSinglePassOption(Params, "memssa", "EarlyCSE");
}
Expected<bool> parseEntryExitInstrumenterPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "post-inline", "EntryExitInstrumenter");
+ return PassBuilder::parseSinglePassOption(Params, "post-inline",
+ "EntryExitInstrumenter");
}
Expected<bool> parseLoopExtractorPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "single", "LoopExtractor");
+ return PassBuilder::parseSinglePassOption(Params, "single", "LoopExtractor");
}
Expected<bool> parseLowerMatrixIntrinsicsPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "minimal", "LowerMatrixIntrinsics");
+ return PassBuilder::parseSinglePassOption(Params, "minimal",
+ "LowerMatrixIntrinsics");
}
Expected<AddressSanitizerOptions> parseASanPassOptions(StringRef Params) {
@@ -1013,13 +1047,13 @@ parseStackLifetimeOptions(StringRef Params) {
}
Expected<bool> parseDependenceAnalysisPrinterOptions(StringRef Params) {
- return parseSinglePassOption(Params, "normalized-results",
- "DependenceAnalysisPrinter");
+ return PassBuilder::parseSinglePassOption(Params, "normalized-results",
+ "DependenceAnalysisPrinter");
}
Expected<bool> parseSeparateConstOffsetFromGEPPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "lower-gep",
- "SeparateConstOffsetFromGEP");
+ return PassBuilder::parseSinglePassOption(Params, "lower-gep",
+ "SeparateConstOffsetFromGEP");
}
Expected<OptimizationLevel>
@@ -1035,13 +1069,13 @@ parseFunctionSimplificationPipelineOptions(StringRef Params) {
}
Expected<bool> parseMemorySSAPrinterPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "no-ensure-optimized-uses",
- "MemorySSAPrinterPass");
+ return PassBuilder::parseSinglePassOption(Params, "no-ensure-optimized-uses",
+ "MemorySSAPrinterPass");
}
Expected<bool> parseSpeculativeExecutionPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "only-if-divergent-target",
- "SpeculativeExecutionPass");
+ return PassBuilder::parseSinglePassOption(Params, "only-if-divergent-target",
+ "SpeculativeExecutionPass");
}
Expected<std::string> parseMemProfUsePassOptions(StringRef Params) {
@@ -1062,13 +1096,13 @@ Expected<std::string> parseMemProfUsePassOptions(StringRef Params) {
}
Expected<bool> parseStructuralHashPrinterPassOptions(StringRef Params) {
- return parseSinglePassOption(Params, "detailed",
- "StructuralHashPrinterPass");
+ return PassBuilder::parseSinglePassOption(Params, "detailed",
+ "StructuralHashPrinterPass");
}
Expected<bool> parseWinEHPrepareOptions(StringRef Params) {
- return parseSinglePassOption(Params, "demote-catchswitch-only",
- "WinEHPreparePass");
+ return PassBuilder::parseSinglePassOption(Params, "demote-catchswitch-only",
+ "WinEHPreparePass");
}
Expected<GlobalMergeOptions> parseGlobalMergeOptions(StringRef Params) {
diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
index d32846051083..445b48067a97 100644
--- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
+++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
@@ -894,31 +894,34 @@ static Error readCoverageMappingData(
Expected<std::unique_ptr<BinaryCoverageReader>>
BinaryCoverageReader::createCoverageReaderFromBuffer(
StringRef Coverage, FuncRecordsStorage &&FuncRecords,
- InstrProfSymtab &&ProfileNames, uint8_t BytesInAddress,
+ std::unique_ptr<InstrProfSymtab> ProfileNamesPtr, uint8_t BytesInAddress,
llvm::endianness Endian, StringRef CompilationDir) {
- std::unique_ptr<BinaryCoverageReader> Reader(
- new BinaryCoverageReader(std::move(FuncRecords)));
- Reader->ProfileNames = std::move(ProfileNames);
+ if (ProfileNamesPtr == nullptr)
+ return make_error<CoverageMapError>(coveragemap_error::malformed,
+ "Caller must provide ProfileNames");
+ std::unique_ptr<BinaryCoverageReader> Reader(new BinaryCoverageReader(
+ std::move(ProfileNamesPtr), std::move(FuncRecords)));
+ InstrProfSymtab &ProfileNames = *Reader->ProfileNames;
StringRef FuncRecordsRef = Reader->FuncRecords->getBuffer();
if (BytesInAddress == 4 && Endian == llvm::endianness::little) {
if (Error E = readCoverageMappingData<uint32_t, llvm::endianness::little>(
- Reader->ProfileNames, Coverage, FuncRecordsRef,
- Reader->MappingRecords, CompilationDir, Reader->Filenames))
+ ProfileNames, Coverage, FuncRecordsRef, Reader->MappingRecords,
+ CompilationDir, Reader->Filenames))
return std::move(E);
} else if (BytesInAddress == 4 && Endian == llvm::endianness::big) {
if (Error E = readCoverageMappingData<uint32_t, llvm::endianness::big>(
- Reader->ProfileNames, Coverage, FuncRecordsRef,
- Reader->MappingRecords, CompilationDir, Reader->Filenames))
+ ProfileNames, Coverage, FuncRecordsRef, Reader->MappingRecords,
+ CompilationDir, Reader->Filenames))
return std::move(E);
} else if (BytesInAddress == 8 && Endian == llvm::endianness::little) {
if (Error E = readCoverageMappingData<uint64_t, llvm::endianness::little>(
- Reader->ProfileNames, Coverage, FuncRecordsRef,
- Reader->MappingRecords, CompilationDir, Reader->Filenames))
+ ProfileNames, Coverage, FuncRecordsRef, Reader->MappingRecords,
+ CompilationDir, Reader->Filenames))
return std::move(E);
} else if (BytesInAddress == 8 && Endian == llvm::endianness::big) {
if (Error E = readCoverageMappingData<uint64_t, llvm::endianness::big>(
- Reader->ProfileNames, Coverage, FuncRecordsRef,
- Reader->MappingRecords, CompilationDir, Reader->Filenames))
+ ProfileNames, Coverage, FuncRecordsRef, Reader->MappingRecords,
+ CompilationDir, Reader->Filenames))
return std::move(E);
} else
return make_error<CoverageMapError>(
@@ -963,8 +966,8 @@ loadTestingFormat(StringRef Data, StringRef CompilationDir) {
if (Data.size() < ProfileNamesSize)
return make_error<CoverageMapError>(coveragemap_error::malformed,
"the size of ProfileNames is too big");
- InstrProfSymtab ProfileNames;
- if (Error E = ProfileNames.create(Data.substr(0, ProfileNamesSize), Address))
+ auto ProfileNames = std::make_unique<InstrProfSymtab>();
+ if (Error E = ProfileNames->create(Data.substr(0, ProfileNamesSize), Address))
return std::move(E);
Data = Data.substr(ProfileNamesSize);
@@ -1099,7 +1102,7 @@ loadBinaryFormat(std::unique_ptr<Binary> Bin, StringRef Arch,
OF->isLittleEndian() ? llvm::endianness::little : llvm::endianness::big;
// Look for the sections that we are interested in.
- InstrProfSymtab ProfileNames;
+ auto ProfileNames = std::make_unique<InstrProfSymtab>();
std::vector<SectionRef> NamesSectionRefs;
// If IPSK_name is not found, fallback to search for IPK_covname, which is
// used when binary correlation is enabled.
@@ -1116,7 +1119,7 @@ loadBinaryFormat(std::unique_ptr<Binary> Bin, StringRef Arch,
return make_error<CoverageMapError>(
coveragemap_error::malformed,
"the size of coverage mapping section is not one");
- if (Error E = ProfileNames.create(NamesSectionRefs.back()))
+ if (Error E = ProfileNames->create(NamesSectionRefs.back()))
return std::move(E);
auto CoverageSection = lookupSections(*OF, IPSK_covmap);
diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp
index 31b742bca14d..a275d4852c15 100644
--- a/llvm/lib/ProfileData/InstrProfReader.cpp
+++ b/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/VirtualFileSystem.h"
@@ -1230,10 +1231,39 @@ Error IndexedInstrProfReader::readHeader() {
Header->MemProfOffset);
const unsigned char *Ptr = Start + MemProfOffset;
- // The value returned from RecordTableGenerator.Emit.
- const uint64_t RecordTableOffset =
+
+ // Read the first 64-bit word, which may be RecordTableOffset in
+ // memprof::MemProfVersion0 or the MemProf version number in
+ // memprof::MemProfVersion1.
+ const uint64_t FirstWord =
support::endian::readNext<uint64_t, llvm::endianness::little,
unaligned>(Ptr);
+
+ memprof::IndexedVersion Version = memprof::Version0;
+ if (FirstWord == memprof::Version1) {
+ // Everything is good. We can proceed to deserialize the rest.
+ Version = memprof::Version1;
+ } else if (FirstWord >= 24) {
+ // This is a heuristic/hack to detect memprof::MemProfVersion0,
+ // which does not have a version field in the header.
+ // In memprof::MemProfVersion0, FirstWord will be RecordTableOffset,
+ // which should be at least 24 because of the MemProf header size.
+ Version = memprof::Version0;
+ } else {
+ return make_error<InstrProfError>(
+ instrprof_error::unsupported_version,
+ formatv("MemProf version {} not supported; "
+ "requires version between {} and {}, inclusive",
+ FirstWord, memprof::MinimumSupportedVersion,
+ memprof::MaximumSupportedVersion));
+ }
+
+ // The value returned from RecordTableGenerator.Emit.
+ const uint64_t RecordTableOffset =
+ Version == memprof::Version0
+ ? FirstWord
+ : support::endian::readNext<uint64_t, llvm::endianness::little,
+ unaligned>(Ptr);
// The offset in the stream right before invoking
// FrameTableGenerator.Emit.
const uint64_t FramePayloadOffset =
@@ -1261,6 +1291,14 @@ Error IndexedInstrProfReader::readHeader() {
/*Buckets=*/Start + FrameTableOffset,
/*Payload=*/Start + FramePayloadOffset,
/*Base=*/Start, memprof::FrameLookupTrait()));
+
+#ifdef EXPENSIVE_CHECKS
+ // Go through all the records and verify that CSId has been correctly
+ // populated. Do this only under EXPENSIVE_CHECKS. Otherwise, we
+ // would defeat the purpose of OnDiskIterableChainedHashTable.
+ for (const auto &Record : MemProfRecordTable->data())
+ verifyIndexedMemProfRecord(Record);
+#endif
}
// BinaryIdOffset field in the header is only valid when the format version
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index d9fe88a00bdf..8f067f8d05e2 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/raw_ostream.h"
@@ -179,14 +180,15 @@ public:
} // end namespace llvm
-InstrProfWriter::InstrProfWriter(bool Sparse,
- uint64_t TemporalProfTraceReservoirSize,
- uint64_t MaxTemporalProfTraceLength,
- bool WritePrevVersion)
+InstrProfWriter::InstrProfWriter(
+ bool Sparse, uint64_t TemporalProfTraceReservoirSize,
+ uint64_t MaxTemporalProfTraceLength, bool WritePrevVersion,
+ memprof::IndexedVersion MemProfVersionRequested)
: Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
InfoObj(new InstrProfRecordWriterTrait()),
- WritePrevVersion(WritePrevVersion) {}
+ WritePrevVersion(WritePrevVersion),
+ MemProfVersionRequested(MemProfVersionRequested) {}
InstrProfWriter::~InstrProfWriter() { delete InfoObj; }
@@ -516,6 +518,7 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
// Write the MemProf profile data if we have it. This includes a simple schema
// with the format described below followed by the hashtable:
+ // uint64_t Version
// uint64_t RecordTableOffset = RecordTableGenerator.Emit
// uint64_t FramePayloadOffset = Stream offset before emitting the frame table
// uint64_t FrameTableOffset = FrameTableGenerator.Emit
@@ -528,7 +531,21 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
// OnDiskChainedHashTable MemProfFrameData
uint64_t MemProfSectionStart = 0;
if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
+ if (MemProfVersionRequested < memprof::MinimumSupportedVersion ||
+ MemProfVersionRequested > memprof::MaximumSupportedVersion) {
+ return make_error<InstrProfError>(
+ instrprof_error::unsupported_version,
+ formatv("MemProf version {} not supported; "
+ "requires version between {} and {}, inclusive",
+ MemProfVersionRequested, memprof::MinimumSupportedVersion,
+ memprof::MaximumSupportedVersion));
+ }
+
MemProfSectionStart = OS.tell();
+
+ if (MemProfVersionRequested >= memprof::Version1)
+ OS.write(MemProfVersionRequested);
+
OS.write(0ULL); // Reserve space for the memprof record table offset.
OS.write(0ULL); // Reserve space for the memprof frame payload offset.
OS.write(0ULL); // Reserve space for the memprof frame table offset.
@@ -570,12 +587,13 @@ Error InstrProfWriter::writeImpl(ProfOStream &OS) {
uint64_t FrameTableOffset = FrameTableGenerator.Emit(OS.OS, *FrameWriter);
- PatchItem PatchItems[] = {
- {MemProfSectionStart, &RecordTableOffset, 1},
- {MemProfSectionStart + sizeof(uint64_t), &FramePayloadOffset, 1},
- {MemProfSectionStart + 2 * sizeof(uint64_t), &FrameTableOffset, 1},
- };
- OS.patch(PatchItems);
+ uint64_t Header[] = {RecordTableOffset, FramePayloadOffset,
+ FrameTableOffset};
+ uint64_t HeaderUpdatePos = MemProfSectionStart;
+ if (MemProfVersionRequested >= memprof::Version1)
+ // The updates go just after the version field.
+ HeaderUpdatePos += sizeof(uint64_t);
+ OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
}
// BinaryIdSection has two parts:
diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp
index 0461f0e9f840..6c419811d59e 100644
--- a/llvm/lib/ProfileData/MemProf.cpp
+++ b/llvm/lib/ProfileData/MemProf.cpp
@@ -3,8 +3,10 @@
#include "llvm/IR/Function.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/BLAKE3.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/HashBuilder.h"
namespace llvm {
namespace memprof {
@@ -51,6 +53,7 @@ IndexedMemProfRecord::deserialize(const MemProfSchema &Schema,
endian::readNext<FrameId, llvm::endianness::little, unaligned>(Ptr);
Node.CallStack.push_back(Id);
}
+ Node.CSId = hashCallStack(Node.CallStack);
Node.Info.deserialize(Schema, Ptr);
Ptr += PortableMemInfoBlock::serializedSize();
Record.AllocSites.push_back(Node);
@@ -117,5 +120,32 @@ Expected<MemProfSchema> readMemProfSchema(const unsigned char *&Buffer) {
return Result;
}
+CallStackId hashCallStack(ArrayRef<FrameId> CS) {
+ llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::endianness::little>
+ HashBuilder;
+ for (FrameId F : CS)
+ HashBuilder.add(F);
+ llvm::BLAKE3Result<8> Hash = HashBuilder.final();
+ CallStackId CSId;
+ std::memcpy(&CSId, Hash.data(), sizeof(Hash));
+ return CSId;
+}
+
+void verifyIndexedMemProfRecord(const IndexedMemProfRecord &Record) {
+ for (const auto &AS : Record.AllocSites) {
+ assert(AS.CSId == hashCallStack(AS.CallStack));
+ (void)AS;
+ }
+}
+
+void verifyFunctionProfileData(
+ const llvm::MapVector<GlobalValue::GUID, IndexedMemProfRecord>
+ &FunctionProfileData) {
+ for (const auto &[GUID, Record] : FunctionProfileData) {
+ (void)GUID;
+ verifyIndexedMemProfRecord(Record);
+ }
+}
+
} // namespace memprof
} // namespace llvm
diff --git a/llvm/lib/ProfileData/RawMemProfReader.cpp b/llvm/lib/ProfileData/RawMemProfReader.cpp
index 0e2b8668bab7..5dc1ff897815 100644
--- a/llvm/lib/ProfileData/RawMemProfReader.cpp
+++ b/llvm/lib/ProfileData/RawMemProfReader.cpp
@@ -127,6 +127,7 @@ CallStackMap readStackInfo(const char *Ptr) {
endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
SmallVector<uint64_t> CallStack;
+ CallStack.reserve(NumPCs);
for (uint64_t J = 0; J < NumPCs; J++) {
CallStack.push_back(
endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr));
@@ -445,6 +446,8 @@ Error RawMemProfReader::mapRawProfileToRecords() {
Callstack.append(Frames.begin(), Frames.end());
}
+ CallStackId CSId = hashCallStack(Callstack);
+
// We attach the memprof record to each function bottom-up including the
// first non-inline frame.
for (size_t I = 0; /*Break out using the condition below*/; I++) {
@@ -452,7 +455,7 @@ Error RawMemProfReader::mapRawProfileToRecords() {
auto Result =
FunctionProfileData.insert({F.Function, IndexedMemProfRecord()});
IndexedMemProfRecord &Record = Result.first->second;
- Record.AllocSites.emplace_back(Callstack, Entry.second);
+ Record.AllocSites.emplace_back(Callstack, CSId, Entry.second);
if (!F.IsInlineFrame)
break;
@@ -470,6 +473,8 @@ Error RawMemProfReader::mapRawProfileToRecords() {
}
}
+ verifyFunctionProfileData(FunctionProfileData);
+
return Error::success();
}
diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index f4254b50d26c..141f0034a23f 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -136,7 +136,7 @@ void BalancedPartitioning::bisect(const FunctionNodeRange Nodes,
// Split into two and assign to the left and right buckets
split(Nodes, LeftBucket);
- runIterations(Nodes, RecDepth, LeftBucket, RightBucket, RNG);
+ runIterations(Nodes, LeftBucket, RightBucket, RNG);
// Split nodes wrt the resulting buckets
auto NodesMid =
@@ -163,7 +163,7 @@ void BalancedPartitioning::bisect(const FunctionNodeRange Nodes,
}
void BalancedPartitioning::runIterations(const FunctionNodeRange Nodes,
- unsigned RecDepth, unsigned LeftBucket,
+ unsigned LeftBucket,
unsigned RightBucket,
std::mt19937 &RNG) const {
unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
diff --git a/llvm/lib/Support/FormattedStream.cpp b/llvm/lib/Support/FormattedStream.cpp
index c0d284350995..c50530e76efc 100644
--- a/llvm/lib/Support/FormattedStream.cpp
+++ b/llvm/lib/Support/FormattedStream.cpp
@@ -94,6 +94,9 @@ void formatted_raw_ostream::UpdatePosition(const char *Ptr, size_t Size) {
/// ComputePosition - Examine the current output and update line and column
/// counts.
void formatted_raw_ostream::ComputePosition(const char *Ptr, size_t Size) {
+ if (DisableScan)
+ return;
+
// If our previous scan pointer is inside the buffer, assume we already
// scanned those bytes. This depends on raw_ostream to not change our buffer
// in unexpected ways.
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index 39235ace4724..67e6e5b962b1 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -36,6 +36,11 @@ struct RISCVSupportedExtension {
}
};
+struct RISCVProfile {
+ StringLiteral Name;
+ StringLiteral MArch;
+};
+
} // end anonymous namespace
static constexpr StringLiteral AllStdExts = "mafdqlcbkjtpvnh";
@@ -244,6 +249,42 @@ static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
};
// clang-format on
+static constexpr RISCVProfile SupportedProfiles[] = {
+ {"rvi20u32", "rv32i"},
+ {"rvi20u64", "rv64i"},
+ {"rva20u64", "rv64imafdc_ziccamoa_ziccif_zicclsm_ziccrse_zicntr_za128rs"},
+ {"rva20s64", "rv64imafdc_ziccamoa_ziccif_zicclsm_ziccrse_zicntr_zifencei_"
+ "za128rs_ssccptr_sstvala_sstvecd_svade_svbare"},
+ {"rva22u64",
+ "rv64imafdc_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_ziccrse_"
+ "zicntr_zihintpause_zihpm_za64rs_zfhmin_zba_zbb_zbs_zkt"},
+ {"rva22s64",
+ "rv64imafdc_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_ziccrse_"
+ "zicntr_zifencei_zihintpause_zihpm_za64rs_zfhmin_zba_zbb_zbs_zkt_ssccptr_"
+ "sscounterenw_sstvala_sstvecd_svade_svbare_svinval_svpbmt"},
+ {"rva23u64",
+ "rv64imafdcv_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_ziccrse_"
+ "zicntr_zicond_zihintntl_zihintpause_zihpm_zimop0p1_za64rs_zawrs_zfa_"
+ "zfhmin_zcb_zcmop0p2_zba_zbb_zbs_zkt_zvbb_zvfhmin_zvkt"},
+ {"rva23s64",
+ "rv64imafdcvh_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_ziccrse_"
+ "zicntr_zicond_zifencei_zihintntl_zihintpause_zihpm_zimop0p1_za64rs_zawrs_"
+ "zfa_zfhmin_zcb_zcmop0p2_zba_zbb_zbs_zkt_zvbb_zvfhmin_zvkt_shcounterenw_"
+ "shgatpa_shtvala_shvsatpa_shvstvala_shvstvecd_ssccptr_sscofpmf_"
+ "sscounterenw_ssnpm0p8_ssstateen_sstc_sstvala_sstvecd_ssu64xl_svade_"
+ "svbare_svinval_svnapot_svpbmt"},
+ {"rvb23u64", "rv64imafdc_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_"
+ "zicclsm_ziccrse_zicntr_zicond_zihintntl_zihintpause_zihpm_"
+ "zimop0p1_za64rs_zawrs_zfa_zcb_zcmop0p2_zba_zbb_zbs_zkt"},
+ {"rvb23s64",
+ "rv64imafdc_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_ziccrse_"
+ "zicntr_zicond_zifencei_zihintntl_zihintpause_zihpm_zimop0p1_za64rs_zawrs_"
+ "zfa_zcb_zcmop0p2_zba_zbb_zbs_zkt_ssccptr_sscofpmf_sscounterenw_sstc_"
+ "sstvala_sstvecd_ssu64xl_svade_svbare_svinval_svnapot_svpbmt"},
+ {"rvm23u32", "rv32im_zicbop_zicond_zicsr_zihintntl_zihintpause_zimop0p1_"
+ "zca_zcb_zce_zcmop0p2_zcmp_zcmt_zba_zbb_zbs"},
+};
+
static void verifyTables() {
#ifndef NDEBUG
static std::atomic<bool> TableChecked(false);
@@ -857,6 +898,29 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
"string must be lowercase");
}
+ if (Arch.starts_with("rvi") || Arch.starts_with("rva") ||
+ Arch.starts_with("rvb") || Arch.starts_with("rvm")) {
+ const auto *FoundProfile =
+ llvm::find_if(SupportedProfiles, [Arch](const RISCVProfile &Profile) {
+ return Arch.starts_with(Profile.Name);
+ });
+
+ if (FoundProfile == std::end(SupportedProfiles))
+ return createStringError(errc::invalid_argument, "unsupported profile");
+
+ std::string NewArch = FoundProfile->MArch.str();
+ StringRef ArchWithoutProfile = Arch.substr(FoundProfile->Name.size());
+ if (!ArchWithoutProfile.empty()) {
+ if (!ArchWithoutProfile.starts_with("_"))
+ return createStringError(
+ errc::invalid_argument,
+ "additional extensions must be after separator '_'");
+ NewArch += ArchWithoutProfile.str();
+ }
+ return parseArchString(NewArch, EnableExperimentalExtension,
+ ExperimentalExtensionVersionCheck, IgnoreUnknown);
+ }
+
bool HasRV64 = Arch.starts_with("rv64");
// ISA string must begin with rv32 or rv64.
if (!(Arch.starts_with("rv32") || HasRV64) || (Arch.size() < 5)) {
diff --git a/llvm/lib/Support/Windows/Path.inc b/llvm/lib/Support/Windows/Path.inc
index 3e4c1f74161c..854d531ab371 100644
--- a/llvm/lib/Support/Windows/Path.inc
+++ b/llvm/lib/Support/Windows/Path.inc
@@ -623,6 +623,10 @@ std::error_code access(const Twine &Path, AccessMode Mode) {
DWORD Attributes = ::GetFileAttributesW(PathUtf16.begin());
if (Attributes == INVALID_FILE_ATTRIBUTES) {
+ // Avoid returning unexpected error codes when querying for existence.
+ if (Mode == AccessMode::Exist)
+ return errc::no_such_file_or_directory;
+
// See if the file didn't actually exist.
DWORD LastError = ::GetLastError();
if (LastError != ERROR_FILE_NOT_FOUND && LastError != ERROR_PATH_NOT_FOUND)
diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index f147ded2ab70..3bf6283b79e9 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -23,6 +23,7 @@
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/InitializePasses.h"
#include "llvm/Object/COFF.h"
#include "llvm/Pass.h"
@@ -30,7 +31,7 @@
#include "llvm/TargetParser/Triple.h"
using namespace llvm;
-using namespace llvm::object;
+using namespace llvm::COFF;
using OperandBundleDef = OperandBundleDefT<Value *>;
@@ -45,8 +46,6 @@ static cl::opt<bool> GenerateThunks("arm64ec-generate-thunks", cl::Hidden,
namespace {
-enum class ThunkType { GuestExit, Entry, Exit };
-
class AArch64Arm64ECCallLowering : public ModulePass {
public:
static char ID;
@@ -73,15 +72,15 @@ private:
Type *I64Ty;
Type *VoidTy;
- void getThunkType(FunctionType *FT, AttributeList AttrList, ThunkType TT,
- raw_ostream &Out, FunctionType *&Arm64Ty,
- FunctionType *&X64Ty);
+ void getThunkType(FunctionType *FT, AttributeList AttrList,
+ Arm64ECThunkType TT, raw_ostream &Out,
+ FunctionType *&Arm64Ty, FunctionType *&X64Ty);
void getThunkRetType(FunctionType *FT, AttributeList AttrList,
raw_ostream &Out, Type *&Arm64RetTy, Type *&X64RetTy,
SmallVectorImpl<Type *> &Arm64ArgTypes,
SmallVectorImpl<Type *> &X64ArgTypes, bool &HasSretPtr);
- void getThunkArgTypes(FunctionType *FT, AttributeList AttrList, ThunkType TT,
- raw_ostream &Out,
+ void getThunkArgTypes(FunctionType *FT, AttributeList AttrList,
+ Arm64ECThunkType TT, raw_ostream &Out,
SmallVectorImpl<Type *> &Arm64ArgTypes,
SmallVectorImpl<Type *> &X64ArgTypes, bool HasSretPtr);
void canonicalizeThunkType(Type *T, Align Alignment, bool Ret,
@@ -91,13 +90,11 @@ private:
} // end anonymous namespace
-void AArch64Arm64ECCallLowering::getThunkType(FunctionType *FT,
- AttributeList AttrList,
- ThunkType TT, raw_ostream &Out,
- FunctionType *&Arm64Ty,
- FunctionType *&X64Ty) {
- Out << (TT == ThunkType::Entry ? "$ientry_thunk$cdecl$"
- : "$iexit_thunk$cdecl$");
+void AArch64Arm64ECCallLowering::getThunkType(
+ FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
+ raw_ostream &Out, FunctionType *&Arm64Ty, FunctionType *&X64Ty) {
+ Out << (TT == Arm64ECThunkType::Entry ? "$ientry_thunk$cdecl$"
+ : "$iexit_thunk$cdecl$");
Type *Arm64RetTy;
Type *X64RetTy;
@@ -108,7 +105,7 @@ void AArch64Arm64ECCallLowering::getThunkType(FunctionType *FT,
// The first argument to a thunk is the called function, stored in x9.
// For exit thunks, we pass the called function down to the emulator;
// for entry/guest exit thunks, we just call the Arm64 function directly.
- if (TT == ThunkType::Exit)
+ if (TT == Arm64ECThunkType::Exit)
Arm64ArgTypes.push_back(PtrTy);
X64ArgTypes.push_back(PtrTy);
@@ -125,8 +122,8 @@ void AArch64Arm64ECCallLowering::getThunkType(FunctionType *FT,
}
void AArch64Arm64ECCallLowering::getThunkArgTypes(
- FunctionType *FT, AttributeList AttrList, ThunkType TT, raw_ostream &Out,
- SmallVectorImpl<Type *> &Arm64ArgTypes,
+ FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
+ raw_ostream &Out, SmallVectorImpl<Type *> &Arm64ArgTypes,
SmallVectorImpl<Type *> &X64ArgTypes, bool HasSretPtr) {
Out << "$";
@@ -163,7 +160,7 @@ void AArch64Arm64ECCallLowering::getThunkArgTypes(
X64ArgTypes.push_back(PtrTy);
// x5
Arm64ArgTypes.push_back(I64Ty);
- if (TT != ThunkType::Entry) {
+ if (TT != Arm64ECThunkType::Entry) {
// FIXME: x5 isn't actually used by the x64 side; revisit once we
// have proper isel for varargs
X64ArgTypes.push_back(I64Ty);
@@ -348,7 +345,8 @@ Function *AArch64Arm64ECCallLowering::buildExitThunk(FunctionType *FT,
SmallString<256> ExitThunkName;
llvm::raw_svector_ostream ExitThunkStream(ExitThunkName);
FunctionType *Arm64Ty, *X64Ty;
- getThunkType(FT, Attrs, ThunkType::Exit, ExitThunkStream, Arm64Ty, X64Ty);
+ getThunkType(FT, Attrs, Arm64ECThunkType::Exit, ExitThunkStream, Arm64Ty,
+ X64Ty);
if (Function *F = M->getFunction(ExitThunkName))
return F;
@@ -451,8 +449,8 @@ Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
SmallString<256> EntryThunkName;
llvm::raw_svector_ostream EntryThunkStream(EntryThunkName);
FunctionType *Arm64Ty, *X64Ty;
- getThunkType(F->getFunctionType(), F->getAttributes(), ThunkType::Entry,
- EntryThunkStream, Arm64Ty, X64Ty);
+ getThunkType(F->getFunctionType(), F->getAttributes(),
+ Arm64ECThunkType::Entry, EntryThunkStream, Arm64Ty, X64Ty);
if (Function *F = M->getFunction(EntryThunkName))
return F;
@@ -543,8 +541,8 @@ Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
Function *AArch64Arm64ECCallLowering::buildGuestExitThunk(Function *F) {
llvm::raw_null_ostream NullThunkName;
FunctionType *Arm64Ty, *X64Ty;
- getThunkType(F->getFunctionType(), F->getAttributes(), ThunkType::GuestExit,
- NullThunkName, Arm64Ty, X64Ty);
+ getThunkType(F->getFunctionType(), F->getAttributes(),
+ Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty);
auto MangledName = getArm64ECMangledFunctionName(F->getName().str());
assert(MangledName && "Can't guest exit to function that's already native");
std::string ThunkName = *MangledName;
@@ -679,7 +677,7 @@ bool AArch64Arm64ECCallLowering::runOnModule(Module &Mod) {
struct ThunkInfo {
Constant *Src;
Constant *Dst;
- unsigned Kind;
+ Arm64ECThunkType Kind;
};
SmallVector<ThunkInfo> ThunkMapping;
for (Function &F : Mod) {
@@ -688,14 +686,17 @@ bool AArch64Arm64ECCallLowering::runOnModule(Module &Mod) {
F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64) {
if (!F.hasComdat())
F.setComdat(Mod.getOrInsertComdat(F.getName()));
- ThunkMapping.push_back({&F, buildEntryThunk(&F), 1});
+ ThunkMapping.push_back(
+ {&F, buildEntryThunk(&F), Arm64ECThunkType::Entry});
}
}
for (Function *F : DirectCalledFns) {
ThunkMapping.push_back(
- {F, buildExitThunk(F->getFunctionType(), F->getAttributes()), 4});
+ {F, buildExitThunk(F->getFunctionType(), F->getAttributes()),
+ Arm64ECThunkType::Exit});
if (!F->hasDLLImportStorageClass())
- ThunkMapping.push_back({buildGuestExitThunk(F), F, 0});
+ ThunkMapping.push_back(
+ {buildGuestExitThunk(F), F, Arm64ECThunkType::GuestExit});
}
if (!ThunkMapping.empty()) {
@@ -704,7 +705,7 @@ bool AArch64Arm64ECCallLowering::runOnModule(Module &Mod) {
ThunkMappingArrayElems.push_back(ConstantStruct::getAnon(
{ConstantExpr::getBitCast(Thunk.Src, PtrTy),
ConstantExpr::getBitCast(Thunk.Dst, PtrTy),
- ConstantInt::get(M->getContext(), APInt(32, Thunk.Kind))}));
+ ConstantInt::get(M->getContext(), APInt(32, uint8_t(Thunk.Kind)))}));
}
Constant *ThunkMappingArray = ConstantArray::get(
llvm::ArrayType::get(ThunkMappingArrayElems[0]->getType(),
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7fab274ab957..f552f9192920 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6895,6 +6895,11 @@ AArch64TargetLowering::allocateLazySaveBuffer(SDValue &Chain, const SDLoc &DL,
return TPIDR2Obj;
}
+static bool isPassedInFPR(EVT VT) {
+ return VT.isFixedLengthVector() ||
+ (VT.isFloatingPoint() && !VT.isScalableVector());
+}
+
SDValue AArch64TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
@@ -7031,6 +7036,13 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// This will be the new Chain/Root node.
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT, Glue);
Glue = ArgValue.getValue(2);
+ if (isPassedInFPR(ArgValue.getValueType())) {
+ ArgValue =
+ DAG.getNode(AArch64ISD::COALESCER_BARRIER, DL,
+ DAG.getVTList(ArgValue.getValueType(), MVT::Glue),
+ {ArgValue, Glue});
+ Glue = ArgValue.getValue(1);
+ }
} else
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
@@ -7402,11 +7414,6 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
}
}
-static bool isPassedInFPR(EVT VT) {
- return VT.isFixedLengthVector() ||
- (VT.isFloatingPoint() && !VT.isScalableVector());
-}
-
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue AArch64TargetLowering::LowerCallResult(
@@ -8632,6 +8639,10 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SmallVector<SDValue, 4> RetOps(1, Chain);
for (auto &RetVal : RetVals) {
+ if (FuncAttrs.hasStreamingBody() && !FuncAttrs.hasStreamingInterface() &&
+ isPassedInFPR(RetVal.second.getValueType()))
+ RetVal.second = DAG.getNode(AArch64ISD::COALESCER_BARRIER, DL,
+ RetVal.second.getValueType(), RetVal.second);
Chain = DAG.getCopyToReg(Chain, DL, RetVal.first, RetVal.second, Glue);
Glue = Chain.getValue(1);
RetOps.push_back(
@@ -18629,12 +18640,16 @@ static SDValue performConcatVectorsCombine(SDNode *N,
}
}
- if (N->getOperand(0).getValueType() == MVT::v4i8) {
+ if (N->getOperand(0).getValueType() == MVT::v4i8 ||
+ N->getOperand(0).getValueType() == MVT::v2i16 ||
+ N->getOperand(0).getValueType() == MVT::v2i8) {
+ EVT SrcVT = N->getOperand(0).getValueType();
// If we have a concat of v4i8 loads, convert them to a buildvector of f32
// loads to prevent having to go through the v4i8 load legalization that
// needs to extend each element into a larger type.
- if (N->getNumOperands() % 2 == 0 && all_of(N->op_values(), [](SDValue V) {
- if (V.getValueType() != MVT::v4i8)
+ if (N->getNumOperands() % 2 == 0 &&
+ all_of(N->op_values(), [SrcVT](SDValue V) {
+ if (V.getValueType() != SrcVT)
return false;
if (V.isUndef())
return true;
@@ -18642,19 +18657,18 @@ static SDValue performConcatVectorsCombine(SDNode *N,
return LD && V.hasOneUse() && LD->isSimple() && !LD->isIndexed() &&
LD->getExtensionType() == ISD::NON_EXTLOAD;
})) {
- EVT NVT =
- EVT::getVectorVT(*DAG.getContext(), MVT::f32, N->getNumOperands());
+ EVT FVT = SrcVT == MVT::v2i8 ? MVT::f16 : MVT::f32;
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), FVT, N->getNumOperands());
SmallVector<SDValue> Ops;
for (unsigned i = 0; i < N->getNumOperands(); i++) {
SDValue V = N->getOperand(i);
if (V.isUndef())
- Ops.push_back(DAG.getUNDEF(MVT::f32));
+ Ops.push_back(DAG.getUNDEF(FVT));
else {
LoadSDNode *LD = cast<LoadSDNode>(V);
- SDValue NewLoad =
- DAG.getLoad(MVT::f32, dl, LD->getChain(), LD->getBasePtr(),
- LD->getMemOperand());
+ SDValue NewLoad = DAG.getLoad(FVT, dl, LD->getChain(),
+ LD->getBasePtr(), LD->getMemOperand());
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
Ops.push_back(NewLoad);
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 02943b8a4ab1..d0c5e6b99e9e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2687,10 +2687,7 @@ bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
return false;
// The maximum vscale is 16 under AArch64, return the maximal extent for the
// vector.
- Width = WidthN.isScalable()
- ? WidthN.getKnownMinValue() * AArch64::SVEMaxBitsPerVector /
- AArch64::SVEBitsPerBlock
- : WidthN.getKnownMinValue();
+ Width = LocationSize::precise(WidthN);
BaseOps.push_back(BaseOp);
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index b4b975cce007..b1f514f75207 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -8436,6 +8436,9 @@ def ubsan_trap_xform : SDNodeXForm<timm, [{
return CurDAG->getTargetConstant(N->getZExtValue() | ('U' << 8), SDLoc(N), MVT::i32);
}]>;
+def gi_ubsan_trap_xform : GICustomOperandRenderer<"renderUbsanTrap">,
+ GISDNodeXFormEquiv<ubsan_trap_xform>;
+
def ubsan_trap_imm : TImmLeaf<i32, [{
return isUInt<8>(Imm);
}], ubsan_trap_xform>;
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 6865850cf04f..22da7ddef98a 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -680,9 +680,11 @@ bool AArch64MIPeepholeOpt::visitFMOVDr(MachineInstr &MI) {
// Let's remove MIs for high 64-bits.
Register OldDef = MI.getOperand(0).getReg();
Register NewDef = MI.getOperand(1).getReg();
+ LLVM_DEBUG(dbgs() << "Removing: " << MI << "\n");
+ MRI->clearKillFlags(OldDef);
+ MRI->clearKillFlags(NewDef);
MRI->constrainRegClass(NewDef, MRI->getRegClass(OldDef));
MRI->replaceRegWith(OldDef, NewDef);
- LLVM_DEBUG(dbgs() << "Removed: " << MI << "\n");
MI.eraseFromParent();
return true;
diff --git a/bolt/lib/Profile/ProfileReaderBase.cpp b/llvm/lib/Target/AArch64/AArch64PassRegistry.def
index ee6166b1da36..ca944579f93a 100644
--- a/bolt/lib/Profile/ProfileReaderBase.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PassRegistry.def
@@ -1,4 +1,4 @@
-//===- bolt/Profile/ProfileReaderBase.cpp ---------------------------------===//
+//===- AArch64PassRegistry.def - Registry of AArch64 passes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,18 +6,15 @@
//
//===----------------------------------------------------------------------===//
//
-// Interface to be implemented by all profile readers.
+// This file is used as the registry of passes that are part of the
+// AArch64 backend.
//
//===----------------------------------------------------------------------===//
-#include "bolt/Profile/ProfileReaderBase.h"
+// NOTE: NO INCLUDE GUARD DESIRED!
-namespace llvm {
-namespace bolt {
-
-bool ProfileReaderBase::mayHaveProfileData(const BinaryFunction &BF) {
- return true;
-}
-
-} // namespace bolt
-} // namespace llvm
+#ifndef LOOP_PASS
+#define LOOP_PASS(NAME, CREATE_PASS)
+#endif
+LOOP_PASS("aarch64-lit", AArch64LoopIdiomTransformPass())
+#undef LOOP_PASS
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index b65c67e70a4e..2db0fa253434 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -29,7 +29,7 @@ def AArch64_save_zt : SDNode<"AArch64ISD::SAVE_ZT", SDTypeProfile<0, 2,
[SDTCisInt<0>, SDTCisPtrTy<1>]>,
[SDNPHasChain, SDNPSideEffect, SDNPMayStore]>;
def AArch64CoalescerBarrier
- : SDNode<"AArch64ISD::COALESCER_BARRIER", SDTypeProfile<1, 1, []>, []>;
+ : SDNode<"AArch64ISD::COALESCER_BARRIER", SDTypeProfile<1, 1, []>, [SDNPOptInGlue, SDNPOutGlue]>;
//===----------------------------------------------------------------------===//
// Instruction naming conventions.
diff --git a/llvm/lib/Target/AArch64/AArch64SchedAmpere1.td b/llvm/lib/Target/AArch64/AArch64SchedAmpere1.td
index cf9f50c2784b..269f4ec5e5fb 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedAmpere1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedAmpere1.td
@@ -18,7 +18,7 @@
def Ampere1Model : SchedMachineModel {
let IssueWidth = 4; // 4-way decode and dispatch
- let MicroOpBufferSize = 174; // micro-op re-order buffer size
+ let MicroOpBufferSize = 192; // re-order buffer size
let LoadLatency = 4; // Optimistic load latency
let MispredictPenalty = 10; // Branch mispredict penalty
let LoopMicroOpBufferSize = 32; // Instruction queue size
diff --git a/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td b/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td
index 9c4f000cf351..67f8593f1577 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedAmpere1B.td
@@ -18,7 +18,7 @@
def Ampere1BModel : SchedMachineModel {
let IssueWidth = 12; // Maximum micro-ops dispatch rate.
- let MicroOpBufferSize = 192; // micro-op re-order buffer size
+ let MicroOpBufferSize = 208; // micro-op re-order buffer size
let LoadLatency = 3; // Optimistic load latency
let MispredictPenalty = 10; // Branch mispredict penalty
let LoopMicroOpBufferSize = 32; // Instruction queue size
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
index c7dfd64b2fb2..f7e6545f0dd3 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
@@ -1446,7 +1446,7 @@ def : InstRW<[V1Write_3c_1V01], (instregex "^[SU]DOT_ZZZI?_S$")>;
def : InstRW<[V1Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZ, USDOT_ZZZI)>;
// Dot product, 16 bit
-def : InstRW<[V1Write_4c_1V01], (instregex "^[SU]DOT_ZZZI?_D$")>;
+def : InstRW<[V1Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>;
// Duplicate, immediate and indexed form
def : InstRW<[V1Write_2c_1V01], (instregex "^DUP_ZI_[BHSD]$",
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index e5e60459e814..7d4a57d792a1 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -547,6 +547,10 @@ public:
void AArch64TargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
+
+#define GET_PASS_REGISTRY "AArch64PassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
+
PB.registerLateLoopOptimizationsEPCallback(
[=](LoopPassManager &LPM, OptimizationLevel Level) {
LPM.addPass(AArch64LoopIdiomTransformPass());
@@ -599,7 +603,7 @@ void AArch64PassConfig::addIRPasses() {
addPass(createFalkorMarkStridedAccessesPass());
}
- if (TM->getOptLevel() == CodeGenOptLevel::Aggressive && EnableGEPOpt) {
+ if (EnableGEPOpt) {
// Call SeparateConstOffsetFromGEP pass to extract constants within indices
// and lower a GEP with multiple indices to either arithmetic operations or
// multiple GEPs with single index.
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 7a86c5c60881..ee7137b92445 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -479,7 +479,7 @@ AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
return TTI::TCC_Free;
break;
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index b807aaf76fdb..21643ebb4138 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -654,7 +654,7 @@ public:
return Barrier.HasnXSModifier;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == k_Register && "Invalid access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index 92db89cc0915..80fe4bcb8b58 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -147,6 +147,12 @@ void AArch64GISelUtils::changeFCMPPredToAArch64CC(
case CmpInst::FCMP_UNE:
CondCode = AArch64CC::NE;
break;
+ case CmpInst::FCMP_TRUE:
+ CondCode = AArch64CC::AL;
+ break;
+ case CmpInst::FCMP_FALSE:
+ CondCode = AArch64CC::NV;
+ break;
}
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 677dd0b502b9..a8f2c45279e6 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -479,6 +479,8 @@ private:
int OpIdx = -1) const;
void renderLogicalImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
int OpIdx = -1) const;
+ void renderUbsanTrap(MachineInstrBuilder &MIB, const MachineInstr &MI,
+ int OpIdx) const;
void renderFPImm16(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx = -1) const;
void renderFPImm32(MachineInstrBuilder &MIB, const MachineInstr &MI,
@@ -6159,16 +6161,6 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
break;
}
- case Intrinsic::trap:
- MIB.buildInstr(AArch64::BRK, {}, {}).addImm(1);
- break;
- case Intrinsic::debugtrap:
- MIB.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000);
- break;
- case Intrinsic::ubsantrap:
- MIB.buildInstr(AArch64::BRK, {}, {})
- .addImm(I.getOperand(1).getImm() | ('U' << 8));
- break;
case Intrinsic::aarch64_neon_ld1x2: {
LLT Ty = MRI.getType(I.getOperand(0).getReg());
unsigned Opc = 0;
@@ -7663,6 +7655,14 @@ void AArch64InstructionSelector::renderLogicalImm64(
MIB.addImm(Enc);
}
+void AArch64InstructionSelector::renderUbsanTrap(MachineInstrBuilder &MIB,
+ const MachineInstr &MI,
+ int OpIdx) const {
+ assert(MI.getOpcode() == TargetOpcode::G_UBSANTRAP && OpIdx == 0 &&
+ "Expected G_UBSANTRAP");
+ MIB.addImm(MI.getOperand(0).getImm() | ('U' << 8));
+}
+
void AArch64InstructionSelector::renderFPImm16(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 996abe8e4739..33dba6a5c61e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -611,7 +611,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
Query.Types[0].isVector() &&
(Query.Types[1].getScalarSizeInBits() == 8 ||
Query.Types[1].getScalarSizeInBits() == 16);
- });
+ })
+ .clampMinNumElements(1, s8, 8)
+ .clampMinNumElements(1, s16, 4);
getActionDefinitionsBuilder(G_TRUNC)
.legalFor({{v2s32, v2s64}, {v4s16, v4s32}, {v8s8, v8s16}})
@@ -630,7 +632,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
})
.clampMinNumElements(0, s8, 8)
.clampMinNumElements(0, s16, 4)
- .clampMinNumElements(0, s32, 2)
.alwaysLegal();
getActionDefinitionsBuilder(G_SEXT_INREG)
@@ -1012,6 +1013,11 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
ABSActions
.legalFor({s32, s64});
ABSActions.legalFor(PackedVectorAllTypeList)
+ .customIf([=](const LegalityQuery &Q) {
+ // TODO: Fix suboptimal codegen for 128+ bit types.
+ LLT SrcTy = Q.Types[0];
+ return SrcTy.isScalar() && SrcTy.getSizeInBits() < 128;
+ })
.widenScalarIf(
[=](const LegalityQuery &Query) { return Query.Types[0] == v4s8; },
[=](const LegalityQuery &Query) { return std::make_pair(0, v4s16); })
@@ -1264,6 +1270,8 @@ bool AArch64LegalizerInfo::legalizeCustom(
return legalizeDynStackAlloc(MI, Helper);
case TargetOpcode::G_PREFETCH:
return legalizePrefetch(MI, Helper);
+ case TargetOpcode::G_ABS:
+ return Helper.lowerAbsToCNeg(MI);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index c877658cd38e..37dcfef3b2a3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -831,10 +831,12 @@ def FeatureNoDataDepHazard : SubtargetFeature<"no-data-dep-hazard",
"Does not need SW waitstates"
>;
-def FeatureGFX11FullVGPRs : SubtargetFeature<"gfx11-full-vgprs",
- "HasGFX11FullVGPRs",
+// Allocate 1536 VGPRs for wave32 and 768 VGPRs for wave64
+// with allocation granularity 24 for wave32 and 12 for wave64
+def Feature1_5xVGPRs : SubtargetFeature<"allocate1_5xvgprs",
+ "Has1_5xVGPRs",
"true",
- "GFX11 with 50% more physical VGPRs and 50% larger allocation granule than GFX10"
+ "Has 50% more physical VGPRs and 50% larger allocation granule"
>;
@@ -1491,12 +1493,12 @@ def FeatureISAVersion11_0_Common : FeatureSet<
def FeatureISAVersion11_0_0 : FeatureSet<
!listconcat(FeatureISAVersion11_0_Common.Features,
- [FeatureGFX11FullVGPRs,
+ [Feature1_5xVGPRs,
FeatureUserSGPRInit16Bug])>;
def FeatureISAVersion11_0_1 : FeatureSet<
!listconcat(FeatureISAVersion11_0_Common.Features,
- [FeatureGFX11FullVGPRs])>;
+ [Feature1_5xVGPRs])>;
def FeatureISAVersion11_0_2 : FeatureSet<
!listconcat(FeatureISAVersion11_0_Common.Features,
@@ -1517,7 +1519,7 @@ def FeatureISAVersion11_5_1 : FeatureSet<
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
FeatureVGPRSingleUseHintInsts,
- FeatureGFX11FullVGPRs])>;
+ Feature1_5xVGPRs])>;
def FeatureISAVersion12 : FeatureSet<
[FeatureGFX12,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 72e8b59e0a40..052b231d62a3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -22,6 +22,7 @@
#include "AMDKernelCodeT.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUInstPrinter.h"
+#include "MCTargetDesc/AMDGPUMCKernelDescriptor.h"
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
#include "R600AsmPrinter.h"
#include "SIMachineFunctionInfo.h"
@@ -428,38 +429,43 @@ uint16_t AMDGPUAsmPrinter::getAmdhsaKernelCodeProperties(
return KernelCodeProperties;
}
-amdhsa::kernel_descriptor_t AMDGPUAsmPrinter::getAmdhsaKernelDescriptor(
- const MachineFunction &MF,
- const SIProgramInfo &PI) const {
+MCKernelDescriptor
+AMDGPUAsmPrinter::getAmdhsaKernelDescriptor(const MachineFunction &MF,
+ const SIProgramInfo &PI) const {
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
const Function &F = MF.getFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ MCContext &Ctx = MF.getContext();
- amdhsa::kernel_descriptor_t KernelDescriptor;
- memset(&KernelDescriptor, 0x0, sizeof(KernelDescriptor));
+ MCKernelDescriptor KernelDescriptor;
assert(isUInt<32>(PI.ScratchSize));
assert(isUInt<32>(PI.getComputePGMRSrc1(STM)));
assert(isUInt<32>(PI.getComputePGMRSrc2()));
- KernelDescriptor.group_segment_fixed_size = PI.LDSSize;
- KernelDescriptor.private_segment_fixed_size = PI.ScratchSize;
+ KernelDescriptor.group_segment_fixed_size =
+ MCConstantExpr::create(PI.LDSSize, Ctx);
+ KernelDescriptor.private_segment_fixed_size =
+ MCConstantExpr::create(PI.ScratchSize, Ctx);
Align MaxKernArgAlign;
- KernelDescriptor.kernarg_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign);
+ KernelDescriptor.kernarg_size = MCConstantExpr::create(
+ STM.getKernArgSegmentSize(F, MaxKernArgAlign), Ctx);
- KernelDescriptor.compute_pgm_rsrc1 = PI.getComputePGMRSrc1(STM);
- KernelDescriptor.compute_pgm_rsrc2 = PI.getComputePGMRSrc2();
- KernelDescriptor.kernel_code_properties = getAmdhsaKernelCodeProperties(MF);
+ KernelDescriptor.compute_pgm_rsrc1 =
+ MCConstantExpr::create(PI.getComputePGMRSrc1(STM), Ctx);
+ KernelDescriptor.compute_pgm_rsrc2 =
+ MCConstantExpr::create(PI.getComputePGMRSrc2(), Ctx);
+ KernelDescriptor.kernel_code_properties =
+ MCConstantExpr::create(getAmdhsaKernelCodeProperties(MF), Ctx);
assert(STM.hasGFX90AInsts() || CurrentProgramInfo.ComputePGMRSrc3GFX90A == 0);
- if (STM.hasGFX90AInsts())
- KernelDescriptor.compute_pgm_rsrc3 =
- CurrentProgramInfo.ComputePGMRSrc3GFX90A;
+ KernelDescriptor.compute_pgm_rsrc3 = MCConstantExpr::create(
+ STM.hasGFX90AInsts() ? CurrentProgramInfo.ComputePGMRSrc3GFX90A : 0, Ctx);
- if (AMDGPU::hasKernargPreload(STM))
- KernelDescriptor.kernarg_preload =
- static_cast<uint16_t>(Info->getNumKernargPreloadedSGPRs());
+ KernelDescriptor.kernarg_preload = MCConstantExpr::create(
+ AMDGPU::hasKernargPreload(STM) ? Info->getNumKernargPreloadedSGPRs() : 0,
+ Ctx);
return KernelDescriptor;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 79326cd3d328..b8b2718d293e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -28,15 +28,12 @@ class MCCodeEmitter;
class MCOperand;
namespace AMDGPU {
+struct MCKernelDescriptor;
namespace HSAMD {
class MetadataStreamer;
}
} // namespace AMDGPU
-namespace amdhsa {
-struct kernel_descriptor_t;
-}
-
class AMDGPUAsmPrinter final : public AsmPrinter {
private:
unsigned CodeObjectVersion;
@@ -75,9 +72,9 @@ private:
uint16_t getAmdhsaKernelCodeProperties(
const MachineFunction &MF) const;
- amdhsa::kernel_descriptor_t getAmdhsaKernelDescriptor(
- const MachineFunction &MF,
- const SIProgramInfo &PI) const;
+ AMDGPU::MCKernelDescriptor
+ getAmdhsaKernelDescriptor(const MachineFunction &MF,
+ const SIProgramInfo &PI) const;
void initTargetStreamer(Module &M);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 9ba74a23e8af..dbb3de76b4dd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -209,8 +209,9 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
break;
}
- // Only 32-bit floating point atomic ops are supported.
- if (AtomicRMWInst::isFPOperation(Op) && !I.getType()->isFloatTy()) {
+ // Only 32 and 64 bit floating point atomic ops are supported.
+ if (AtomicRMWInst::isFPOperation(Op) &&
+ !(I.getType()->isFloatTy() || I.getType()->isDoubleTy())) {
return;
}
@@ -920,8 +921,10 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
Value *BroadcastI = nullptr;
if (TyBitWidth == 64) {
- Value *const ExtractLo = B.CreateTrunc(PHI, Int32Ty);
- Value *const ExtractHi = B.CreateTrunc(B.CreateLShr(PHI, 32), Int32Ty);
+ Value *CastedPhi = B.CreateBitCast(PHI, IntNTy);
+ Value *const ExtractLo = B.CreateTrunc(CastedPhi, Int32Ty);
+ Value *const ExtractHi =
+ B.CreateTrunc(B.CreateLShr(CastedPhi, 32), Int32Ty);
CallInst *const ReadFirstLaneLo =
B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo);
CallInst *const ReadFirstLaneHi =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index d7f5110427ec..9bd30458bc0a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -918,6 +918,96 @@ AAAMDWavesPerEU &AAAMDWavesPerEU::createForPosition(const IRPosition &IRP,
llvm_unreachable("AAAMDWavesPerEU is only valid for function position");
}
+static bool inlineAsmUsesAGPRs(const InlineAsm *IA) {
+ for (const auto &CI : IA->ParseConstraints()) {
+ for (StringRef Code : CI.Codes) {
+ Code.consume_front("{");
+ if (Code.starts_with("a"))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+struct AAAMDGPUNoAGPR
+ : public IRAttribute<Attribute::NoUnwind,
+ StateWrapper<BooleanState, AbstractAttribute>,
+ AAAMDGPUNoAGPR> {
+ AAAMDGPUNoAGPR(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}
+
+ static AAAMDGPUNoAGPR &createForPosition(const IRPosition &IRP,
+ Attributor &A) {
+ if (IRP.getPositionKind() == IRPosition::IRP_FUNCTION)
+ return *new (A.Allocator) AAAMDGPUNoAGPR(IRP, A);
+ llvm_unreachable("AAAMDGPUNoAGPR is only valid for function position");
+ }
+
+ void initialize(Attributor &A) override {
+ Function *F = getAssociatedFunction();
+ if (F->hasFnAttribute("amdgpu-no-agpr"))
+ indicateOptimisticFixpoint();
+ }
+
+ const std::string getAsStr(Attributor *A) const override {
+ return getAssumed() ? "amdgpu-no-agpr" : "amdgpu-maybe-agpr";
+ }
+
+ void trackStatistics() const override {}
+
+ ChangeStatus updateImpl(Attributor &A) override {
+ // TODO: Use AACallEdges, but then we need a way to inspect asm edges.
+
+ auto CheckForNoAGPRs = [&](Instruction &I) {
+ const auto &CB = cast<CallBase>(I);
+ const Value *CalleeOp = CB.getCalledOperand();
+ const Function *Callee = dyn_cast<Function>(CalleeOp);
+ if (!Callee) {
+ if (const InlineAsm *IA = dyn_cast<InlineAsm>(CalleeOp))
+ return !inlineAsmUsesAGPRs(IA);
+ return false;
+ }
+
+ // Some intrinsics may use AGPRs, but if we have a choice, we are not
+ // required to use AGPRs.
+ if (Callee->isIntrinsic())
+ return true;
+
+ // TODO: Handle callsite attributes
+ const auto *CalleeInfo = A.getAAFor<AAAMDGPUNoAGPR>(
+ *this, IRPosition::function(*Callee), DepClassTy::REQUIRED);
+ return CalleeInfo && CalleeInfo->getAssumed();
+ };
+
+ bool UsedAssumedInformation = false;
+ if (!A.checkForAllCallLikeInstructions(CheckForNoAGPRs, *this,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+ return ChangeStatus::UNCHANGED;
+ }
+
+ ChangeStatus manifest(Attributor &A) override {
+ if (!getAssumed())
+ return ChangeStatus::UNCHANGED;
+ LLVMContext &Ctx = getAssociatedFunction()->getContext();
+ return A.manifestAttrs(getIRPosition(),
+ {Attribute::get(Ctx, "amdgpu-no-agpr")});
+ }
+
+ const std::string getName() const override { return "AAAMDGPUNoAGPR"; }
+ const char *getIdAddr() const override { return &ID; }
+
+ /// This function should return true if the type of the \p AA is
+ /// AAAMDGPUNoAGPRs
+ static bool classof(const AbstractAttribute *AA) {
+ return (AA->getIdAddr() == &ID);
+ }
+
+ static const char ID;
+};
+
+const char AAAMDGPUNoAGPR::ID = 0;
+
static void addPreloadKernArgHint(Function &F, TargetMachine &TM) {
const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
for (unsigned I = 0;
@@ -946,8 +1036,9 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) {
DenseSet<const char *> Allowed(
{&AAAMDAttributes::ID, &AAUniformWorkGroupSize::ID,
&AAPotentialValues::ID, &AAAMDFlatWorkGroupSize::ID,
- &AAAMDWavesPerEU::ID, &AACallEdges::ID, &AAPointerInfo::ID,
- &AAPotentialConstantValues::ID, &AAUnderlyingObjects::ID});
+ &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID, &AACallEdges::ID,
+ &AAPointerInfo::ID, &AAPotentialConstantValues::ID,
+ &AAUnderlyingObjects::ID});
AttributorConfig AC(CGUpdater);
AC.Allowed = &Allowed;
@@ -963,6 +1054,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM) {
if (!F.isIntrinsic()) {
A.getOrCreateAAFor<AAAMDAttributes>(IRPosition::function(F));
A.getOrCreateAAFor<AAUniformWorkGroupSize>(IRPosition::function(F));
+ A.getOrCreateAAFor<AAAMDGPUNoAGPR>(IRPosition::function(F));
CallingConv::ID CC = F.getCallingConv();
if (!AMDGPU::isEntryFunctionCC(CC)) {
A.getOrCreateAAFor<AAAMDFlatWorkGroupSize>(IRPosition::function(F));
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index bddf3d958a1a..6e7d34f5adaa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1594,6 +1594,9 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
}
}
+ if (auto *NewEltI = dyn_cast<Instruction>(NewElt))
+ NewEltI->copyIRFlags(&I);
+
NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
}
} else {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index bee43b6c18c8..f283af6fa07d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -851,12 +851,7 @@ bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
return true;
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntrID = N->getConstantOperandVal(0);
- switch (IntrID) {
- case Intrinsic::amdgcn_readfirstlane:
- case Intrinsic::amdgcn_readlane:
- return true;
- }
- return false;
+ return AMDGPU::isIntrinsicAlwaysUniform(IntrID);
}
case ISD::LOAD:
if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 94cc1d90e0ca..e13c13913d4e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2045,38 +2045,6 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
if (BaseOpcode->HasD16)
MIB.addImm(IsD16 ? -1 : 0);
- if (IsTexFail) {
- // An image load instruction with TFE/LWE only conditionally writes to its
- // result registers. Initialize them to zero so that we always get well
- // defined result values.
- assert(VDataOut && !VDataIn);
- Register Tied = MRI->cloneVirtualRegister(VDataOut);
- Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
- .addImm(0);
- auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
- if (STI.usePRTStrictNull()) {
- // With enable-prt-strict-null enabled, initialize all result registers to
- // zero.
- auto RegSeq =
- BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
- for (auto Sub : Parts)
- RegSeq.addReg(Zero).addImm(Sub);
- } else {
- // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
- // result register.
- Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
- BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
- auto RegSeq =
- BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
- for (auto Sub : Parts.drop_back(1))
- RegSeq.addReg(Undef).addImm(Sub);
- RegSeq.addReg(Zero).addImm(Parts.back());
- }
- MIB.addReg(Tied, RegState::Implicit);
- MIB->tieOperands(0, MIB->getNumOperands() - 1);
- }
-
MI.eraseFromParent();
constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 90872516dd6d..e55d1de01b4f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2030,6 +2030,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
getActionDefinitionsBuilder({G_MEMCPY, G_MEMCPY_INLINE, G_MEMMOVE, G_MEMSET})
.lower();
+ getActionDefinitionsBuilder({G_TRAP, G_DEBUGTRAP}).custom();
+
getActionDefinitionsBuilder({G_VASTART, G_VAARG, G_BRJT, G_JUMP_TABLE,
G_INDEXED_LOAD, G_INDEXED_SEXTLOAD,
G_INDEXED_ZEXTLOAD, G_INDEXED_STORE})
@@ -2134,6 +2136,10 @@ bool AMDGPULegalizerInfo::legalizeCustom(
return legalizeGetFPEnv(MI, MRI, B);
case TargetOpcode::G_SET_FPENV:
return legalizeSetFPEnv(MI, MRI, B);
+ case TargetOpcode::G_TRAP:
+ return legalizeTrap(MI, MRI, B);
+ case TargetOpcode::G_DEBUGTRAP:
+ return legalizeDebugTrap(MI, MRI, B);
default:
return false;
}
@@ -2925,7 +2931,7 @@ bool AMDGPULegalizerInfo::legalizeGlobalValue(
// functions that use local objects. However, if these dead functions are
// not eliminated, we don't want a compile time error. Just emit a warning
// and a trap, since there should be no callable path here.
- B.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>());
+ B.buildTrap();
B.buildUndef(DstReg);
MI.eraseFromParent();
return true;
@@ -6618,9 +6624,9 @@ bool AMDGPULegalizerInfo::legalizeSBufferLoad(LegalizerHelper &Helper,
}
// TODO: Move to selection
-bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI,
- MachineRegisterInfo &MRI,
- MachineIRBuilder &B) const {
+bool AMDGPULegalizerInfo::legalizeTrap(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
if (!ST.isTrapHandlerEnabled() ||
ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return legalizeTrapEndpgm(MI, MRI, B);
@@ -6726,8 +6732,9 @@ bool AMDGPULegalizerInfo::legalizeTrapHsa(
return true;
}
-bool AMDGPULegalizerInfo::legalizeDebugTrapIntrinsic(
- MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
+bool AMDGPULegalizerInfo::legalizeDebugTrap(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
// Is non-HSA path or trap-handler disabled? Then, report a warning
// accordingly
if (!ST.isTrapHandlerEnabled() ||
@@ -7270,10 +7277,6 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_struct_buffer_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_struct_ptr_buffer_atomic_fadd_v2bf16:
return legalizeBufferAtomic(MI, B, IntrID);
- case Intrinsic::trap:
- return legalizeTrapIntrinsic(MI, MRI, B);
- case Intrinsic::debugtrap:
- return legalizeDebugTrapIntrinsic(MI, MRI, B);
case Intrinsic::amdgcn_rsq_clamp:
return legalizeRsqClampIntrinsic(MI, MRI, B);
case Intrinsic::amdgcn_ds_fadd:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 9661646fffc9..e5ba84a74a0f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -226,16 +226,16 @@ public:
bool legalizeSBufferLoad(LegalizerHelper &Helper, MachineInstr &MI) const;
- bool legalizeTrapIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &B) const;
+ bool legalizeTrap(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const;
bool legalizeTrapEndpgm(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
bool legalizeTrapHsaQueuePtr(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
bool legalizeTrapHsa(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const;
- bool legalizeDebugTrapIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &B) const;
+ bool legalizeDebugTrap(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const;
bool legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 84b4ccc1ae7b..5aa35becd842 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -657,6 +657,8 @@ bool AMDGPULibCalls::fold(CallInst *CI) {
return true;
IRBuilder<> B(CI);
+ if (CI->isStrictFP())
+ B.setIsFPConstrained(true);
if (FPMathOperator *FPOp = dyn_cast<FPMathOperator>(CI)) {
// Under unsafe-math, evaluate calls if possible.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index b85cb26fdc95..595f09664c55 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -340,26 +340,11 @@ public:
// Get uses from the current function, excluding uses by called functions
// Two output variables to avoid walking the globals list twice
- std::optional<bool> HasAbsoluteGVs;
for (auto &GV : M.globals()) {
if (!AMDGPU::isLDSVariableToLower(GV)) {
continue;
}
- // Check if the module is consistent: either all GVs are absolute (happens
- // when we run the pass more than once), or none are.
- const bool IsAbsolute = GV.isAbsoluteSymbolRef();
- if (HasAbsoluteGVs.has_value()) {
- if (*HasAbsoluteGVs != IsAbsolute) {
- report_fatal_error(
- "Module cannot mix absolute and non-absolute LDS GVs");
- }
- } else
- HasAbsoluteGVs = IsAbsolute;
-
- if (IsAbsolute)
- continue;
-
for (User *V : GV.users()) {
if (auto *I = dyn_cast<Instruction>(V)) {
Function *F = I->getFunction();
@@ -469,6 +454,31 @@ public:
}
}
+ // Verify that we fall into one of 2 cases:
+ // - All variables are absolute: this is a re-run of the pass
+ // so we don't have anything to do.
+ // - No variables are absolute.
+ std::optional<bool> HasAbsoluteGVs;
+ for (auto &Map : {direct_map_kernel, indirect_map_kernel}) {
+ for (auto &[Fn, GVs] : Map) {
+ for (auto *GV : GVs) {
+ bool IsAbsolute = GV->isAbsoluteSymbolRef();
+ if (HasAbsoluteGVs.has_value()) {
+ if (*HasAbsoluteGVs != IsAbsolute) {
+ report_fatal_error(
+ "Module cannot mix absolute and non-absolute LDS GVs");
+ }
+ } else
+ HasAbsoluteGVs = IsAbsolute;
+ }
+ }
+ }
+
+ // If we only had absolute GVs, we have nothing to do, return an empty
+ // result.
+ if (HasAbsoluteGVs && *HasAbsoluteGVs)
+ return {FunctionVariableMap(), FunctionVariableMap()};
+
return {std::move(direct_map_kernel), std::move(indirect_map_kernel)};
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
new file mode 100644
index 000000000000..90f36fadf359
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -0,0 +1,73 @@
+//===- AMDGPUPassRegistry.def - Registry of AMDGPU passes -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the
+// AMDGPU backend.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef MODULE_PASS
+#define MODULE_PASS(NAME, CREATE_PASS)
+#endif
+MODULE_PASS("amdgpu-always-inline", AMDGPUAlwaysInlinePass())
+MODULE_PASS("amdgpu-attributor", AMDGPUAttributorPass(*this))
+MODULE_PASS("amdgpu-lower-buffer-fat-pointers",
+ AMDGPULowerBufferFatPointersPass(*this))
+MODULE_PASS("amdgpu-lower-ctor-dtor", AMDGPUCtorDtorLoweringPass())
+MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
+MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
+MODULE_PASS("amdgpu-unify-metadata", AMDGPUUnifyMetadataPass())
+#undef MODULE_PASS
+
+#ifndef FUNCTION_PASS
+#define FUNCTION_PASS(NAME, CREATE_PASS)
+#endif
+FUNCTION_PASS("amdgpu-codegenprepare", AMDGPUCodeGenPreparePass(*this))
+FUNCTION_PASS("amdgpu-image-intrinsic-opt",
+ AMDGPUImageIntrinsicOptimizerPass(*this))
+FUNCTION_PASS("amdgpu-lower-kernel-arguments",
+ AMDGPULowerKernelArgumentsPass(*this))
+FUNCTION_PASS("amdgpu-lower-kernel-attributes",
+ AMDGPULowerKernelAttributesPass())
+FUNCTION_PASS("amdgpu-simplifylib", AMDGPUSimplifyLibCallsPass())
+FUNCTION_PASS("amdgpu-promote-alloca", AMDGPUPromoteAllocaPass(*this))
+FUNCTION_PASS("amdgpu-promote-alloca-to-vector",
+ AMDGPUPromoteAllocaToVectorPass(*this))
+FUNCTION_PASS("amdgpu-promote-kernel-arguments",
+ AMDGPUPromoteKernelArgumentsPass())
+FUNCTION_PASS("amdgpu-rewrite-undef-for-phi", AMDGPURewriteUndefForPHIPass())
+FUNCTION_PASS("amdgpu-unify-divergent-exit-nodes",
+ AMDGPUUnifyDivergentExitNodesPass())
+FUNCTION_PASS("amdgpu-usenative", AMDGPUUseNativeCallsPass())
+#undef FUNCTION_PASS
+
+#ifndef FUNCTION_ANALYSIS
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+
+#ifndef FUNCTION_ALIAS_ANALYSIS
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+FUNCTION_ALIAS_ANALYSIS("amdgpu-aa", AMDGPUAA())
+#undef FUNCTION_ALIAS_ANALYSIS
+#undef FUNCTION_ANALYSIS
+
+#ifndef FUNCTION_PASS_WITH_PARAMS
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS)
+#endif
+FUNCTION_PASS_WITH_PARAMS(
+ "amdgpu-atomic-optimizer",
+ "AMDGPUAtomicOptimizerPass",
+ [=](ScanOptions Strategy) {
+ return AMDGPUAtomicOptimizerPass(*this, Strategy);
+ },
+ parseAMDGPUAtomicOptimizerStrategy, "strategy=dpp|iterative|none")
+#undef FUNCTION_PASS_WITH_PARAMS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 0037825ce089..aa4ec785bf02 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3135,6 +3135,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case Intrinsic::amdgcn_interp_inreg_p2:
case Intrinsic::amdgcn_interp_inreg_p10_f16:
case Intrinsic::amdgcn_interp_inreg_p2_f16:
+ case Intrinsic::amdgcn_interp_p10_rtz_f16:
+ case Intrinsic::amdgcn_interp_p2_rtz_f16:
applyDefaultMapping(OpdMapper);
return;
case Intrinsic::amdgcn_permlane16:
@@ -4778,7 +4780,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_interp_inreg_p10:
case Intrinsic::amdgcn_interp_inreg_p2:
case Intrinsic::amdgcn_interp_inreg_p10_f16:
- case Intrinsic::amdgcn_interp_inreg_p2_f16: {
+ case Intrinsic::amdgcn_interp_inreg_p2_f16:
+ case Intrinsic::amdgcn_interp_p10_rtz_f16:
+ case Intrinsic::amdgcn_interp_p2_rtz_f16: {
unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize);
OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32);
@@ -4889,7 +4893,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16:
case Intrinsic::amdgcn_atomic_cond_sub_u32:
case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
- case Intrinsic::amdgcn_global_load_tr:
+ case Intrinsic::amdgcn_global_load_tr_b64:
+ case Intrinsic::amdgcn_global_load_tr_b128:
return getDefaultMappingAllVGPR(MI);
case Intrinsic::amdgcn_ds_ordered_add:
case Intrinsic::amdgcn_ds_ordered_swap:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
index bb1c6b733729..410dc83d45c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
@@ -354,83 +354,19 @@ def : SourceOfDivergence<int_amdgcn_mov_dpp8>;
def : SourceOfDivergence<int_amdgcn_update_dpp>;
def : SourceOfDivergence<int_amdgcn_writelane>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_4x4x1f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_4x4x1f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_4x4x4f16>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_4x4x4i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_4x4x2bf16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x1f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x4f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x4f16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x16f16>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_16x16x4i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_16x16x16i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x2bf16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x8bf16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x1f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x2f32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x4f16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x8f16>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_32x32x4i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_32x32x8i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x2bf16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x4bf16>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x4bf16_1k>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x4bf16_1k>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_4x4x4bf16_1k>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x8bf16_1k>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x16bf16_1k>;
-def : SourceOfDivergence<int_amdgcn_mfma_f64_16x16x4f64>;
-def : SourceOfDivergence<int_amdgcn_mfma_f64_4x4x4f64>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_16x16x32_i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_i32_32x32x16_i8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x8_xf32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x4_xf32>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x32_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x32_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x32_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_16x16x32_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x16_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x16_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x16_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_mfma_f32_32x32x16_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x32_f16>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x16_f16>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x32_bf16>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x16_bf16>;
-def : SourceOfDivergence<int_amdgcn_smfmac_i32_16x16x64_i8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_i32_32x32x32_i8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x64_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x64_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x64_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_16x16x64_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x32_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x32_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x32_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_smfmac_f32_32x32x32_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_f16>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_bf16>;
-def : SourceOfDivergence<int_amdgcn_wmma_f16_16x16x16_f16>;
-def : SourceOfDivergence<int_amdgcn_wmma_bf16_16x16x16_bf16>;
-def : SourceOfDivergence<int_amdgcn_wmma_i32_16x16x16_iu8>;
-def : SourceOfDivergence<int_amdgcn_wmma_i32_16x16x16_iu4>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_wmma_f32_16x16x16_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_wmma_i32_16x16x32_iu4>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_f16>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_bf16>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f16_16x16x32_f16>;
-def : SourceOfDivergence<int_amdgcn_swmmac_bf16_16x16x32_bf16>;
-def : SourceOfDivergence<int_amdgcn_swmmac_i32_16x16x32_iu8>;
-def : SourceOfDivergence<int_amdgcn_swmmac_i32_16x16x32_iu4>;
-def : SourceOfDivergence<int_amdgcn_swmmac_i32_16x16x64_iu4>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_fp8_fp8>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_fp8_bf8>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_bf8_fp8>;
-def : SourceOfDivergence<int_amdgcn_swmmac_f32_16x16x32_bf8_bf8>;
-def : SourceOfDivergence<int_amdgcn_global_load_tr>;
+foreach intr = AMDGPUMFMAIntrinsics908 in
+def : SourceOfDivergence<intr>;
+foreach intr = AMDGPUMFMAIntrinsics90A in
+def : SourceOfDivergence<intr>;
+foreach intr = AMDGPUMFMAIntrinsics940 in
+def : SourceOfDivergence<intr>;
+foreach intr = AMDGPUWMMAIntrinsicsGFX11 in
+def : SourceOfDivergence<intr>;
+foreach intr = AMDGPUWMMAIntrinsicsGFX12 in
+def : SourceOfDivergence<intr>;
+
+def : SourceOfDivergence<int_amdgcn_global_load_tr_b64>;
+def : SourceOfDivergence<int_amdgcn_global_load_tr_b128>;
// The dummy boolean output is divergent from the IR's perspective,
// but the mask results are uniform. These produce a divergent and
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 2b457fe519d9..f7e552177d6f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -631,107 +631,26 @@ void AMDGPUTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) {
AAM.registerFunctionAnalysis<AMDGPUAA>();
}
+static Expected<ScanOptions>
+parseAMDGPUAtomicOptimizerStrategy(StringRef Params) {
+ if (Params.empty())
+ return ScanOptions::Iterative;
+ Params.consume_front("strategy=");
+ auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
+ .Case("dpp", ScanOptions::DPP)
+ .Cases("iterative", "", ScanOptions::Iterative)
+ .Case("none", ScanOptions::None)
+ .Default(std::nullopt);
+ if (Result)
+ return *Result;
+ return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
+}
+
void AMDGPUTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
- PB.registerPipelineParsingCallback(
- [this](StringRef PassName, ModulePassManager &PM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "amdgpu-attributor") {
- PM.addPass(AMDGPUAttributorPass(*this));
- return true;
- }
- if (PassName == "amdgpu-unify-metadata") {
- PM.addPass(AMDGPUUnifyMetadataPass());
- return true;
- }
- if (PassName == "amdgpu-printf-runtime-binding") {
- PM.addPass(AMDGPUPrintfRuntimeBindingPass());
- return true;
- }
- if (PassName == "amdgpu-always-inline") {
- PM.addPass(AMDGPUAlwaysInlinePass());
- return true;
- }
- if (PassName == "amdgpu-lower-module-lds") {
- PM.addPass(AMDGPULowerModuleLDSPass(*this));
- return true;
- }
- if (PassName == "amdgpu-lower-buffer-fat-pointers") {
- PM.addPass(AMDGPULowerBufferFatPointersPass(*this));
- return true;
- }
- if (PassName == "amdgpu-lower-ctor-dtor") {
- PM.addPass(AMDGPUCtorDtorLoweringPass());
- return true;
- }
- return false;
- });
- PB.registerPipelineParsingCallback(
- [this](StringRef PassName, FunctionPassManager &PM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "amdgpu-simplifylib") {
- PM.addPass(AMDGPUSimplifyLibCallsPass());
- return true;
- }
- if (PassName == "amdgpu-image-intrinsic-opt") {
- PM.addPass(AMDGPUImageIntrinsicOptimizerPass(*this));
- return true;
- }
- if (PassName == "amdgpu-usenative") {
- PM.addPass(AMDGPUUseNativeCallsPass());
- return true;
- }
- if (PassName == "amdgpu-promote-alloca") {
- PM.addPass(AMDGPUPromoteAllocaPass(*this));
- return true;
- }
- if (PassName == "amdgpu-promote-alloca-to-vector") {
- PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
- return true;
- }
- if (PassName == "amdgpu-lower-kernel-attributes") {
- PM.addPass(AMDGPULowerKernelAttributesPass());
- return true;
- }
- if (PassName == "amdgpu-promote-kernel-arguments") {
- PM.addPass(AMDGPUPromoteKernelArgumentsPass());
- return true;
- }
- if (PassName == "amdgpu-unify-divergent-exit-nodes") {
- PM.addPass(AMDGPUUnifyDivergentExitNodesPass());
- return true;
- }
- if (PassName == "amdgpu-atomic-optimizer") {
- PM.addPass(
- AMDGPUAtomicOptimizerPass(*this, AMDGPUAtomicOptimizerStrategy));
- return true;
- }
- if (PassName == "amdgpu-codegenprepare") {
- PM.addPass(AMDGPUCodeGenPreparePass(*this));
- return true;
- }
- if (PassName == "amdgpu-lower-kernel-arguments") {
- PM.addPass(AMDGPULowerKernelArgumentsPass(*this));
- return true;
- }
- if (PassName == "amdgpu-rewrite-undef-for-phi") {
- PM.addPass(AMDGPURewriteUndefForPHIPass());
- return true;
- }
- return false;
- });
-
- PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) {
- FAM.registerPass([&] { return AMDGPUAA(); });
- });
- PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) {
- if (AAName == "amdgpu-aa") {
- AAM.registerFunctionAnalysis<AMDGPUAA>();
- return true;
- }
- return false;
- });
+#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
PB.registerPipelineStartEPCallback(
[](ModulePassManager &PM, OptimizationLevel Level) {
@@ -793,6 +712,15 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(
PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
});
+
+ PB.registerFullLinkTimeOptimizationLastEPCallback(
+ [this](ModulePassManager &PM, OptimizationLevel Level) {
+ // We want to support the -lto-partitions=N option as "best effort".
+ // For that, we need to lower LDS earlier in the pipeline before the
+ // module is partitioned for codegen.
+ if (EnableLowerModuleLDS)
+ PM.addPass(AMDGPULowerModuleLDSPass(*this));
+ });
}
int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 529705479646..294fc683fe92 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -8,6 +8,7 @@
#include "AMDKernelCodeT.h"
#include "MCTargetDesc/AMDGPUMCExpr.h"
+#include "MCTargetDesc/AMDGPUMCKernelDescriptor.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
#include "SIDefines.h"
@@ -981,7 +982,7 @@ public:
return Imm.Type;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(isRegKind());
return Reg.RegNo;
}
@@ -5417,7 +5418,9 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (getParser().parseIdentifier(KernelName))
return true;
- kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI());
+ AMDGPU::MCKernelDescriptor KD =
+ AMDGPU::MCKernelDescriptor::getDefaultAmdhsaKernelDescriptor(
+ &getSTI(), getContext());
StringSet<> Seen;
@@ -5457,89 +5460,111 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
return TokError(".amdhsa_ directives cannot be repeated");
SMLoc ValStart = getLoc();
- int64_t IVal;
- if (getParser().parseAbsoluteExpression(IVal))
+ const MCExpr *ExprVal;
+ if (getParser().parseExpression(ExprVal))
return true;
SMLoc ValEnd = getLoc();
SMRange ValRange = SMRange(ValStart, ValEnd);
- if (IVal < 0)
- return OutOfRangeError(ValRange);
-
+ int64_t IVal = 0;
uint64_t Val = IVal;
+ bool EvaluatableExpr;
+ if ((EvaluatableExpr = ExprVal->evaluateAsAbsolute(IVal))) {
+ if (IVal < 0)
+ return OutOfRangeError(ValRange);
+ Val = IVal;
+ }
#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
- if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
+ if (!isUInt<ENTRY##_WIDTH>(Val)) \
return OutOfRangeError(RANGE); \
- AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
+ AMDGPU::MCKernelDescriptor::bits_set(FIELD, VALUE, ENTRY##_SHIFT, ENTRY, \
+ getContext());
+
+// Some fields use the parsed value immediately which requires the expression to
+// be solvable.
+#define EXPR_RESOLVE_OR_ERROR(RESOLVED) \
+ if (!(RESOLVED)) \
+ return Error(IDRange.Start, "directive should have resolvable expression", \
+ IDRange);
if (ID == ".amdhsa_group_segment_fixed_size") {
- if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
+ if (!isUInt<sizeof(kernel_descriptor_t::group_segment_fixed_size) *
+ CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
- KD.group_segment_fixed_size = Val;
+ KD.group_segment_fixed_size = ExprVal;
} else if (ID == ".amdhsa_private_segment_fixed_size") {
- if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
+ if (!isUInt<sizeof(kernel_descriptor_t::private_segment_fixed_size) *
+ CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
- KD.private_segment_fixed_size = Val;
+ KD.private_segment_fixed_size = ExprVal;
} else if (ID == ".amdhsa_kernarg_size") {
- if (!isUInt<sizeof(KD.kernarg_size) * CHAR_BIT>(Val))
+ if (!isUInt<sizeof(kernel_descriptor_t::kernarg_size) * CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
- KD.kernarg_size = Val;
+ KD.kernarg_size = ExprVal;
} else if (ID == ".amdhsa_user_sgpr_count") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
ExplicitUserSGPRCount = Val;
} else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
- Val, ValRange);
+ ExprVal, ValRange);
if (Val)
ImpliedUserSGPRCount += 4;
} else if (ID == ".amdhsa_user_sgpr_kernarg_preload_length") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (!hasKernargPreload())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
if (Val > getMaxNumUserSGPRs())
return OutOfRangeError(ValRange);
- PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_LENGTH, Val,
+ PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_LENGTH, ExprVal,
ValRange);
if (Val) {
ImpliedUserSGPRCount += Val;
PreloadLength = Val;
}
} else if (ID == ".amdhsa_user_sgpr_kernarg_preload_offset") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (!hasKernargPreload())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
if (Val >= 1024)
return OutOfRangeError(ValRange);
- PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_OFFSET, Val,
+ PARSE_BITS_ENTRY(KD.kernarg_preload, KERNARG_PRELOAD_SPEC_OFFSET, ExprVal,
ValRange);
if (Val)
PreloadOffset = Val;
} else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
+ KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, ExprVal,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
+ KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, ExprVal,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
- Val, ValRange);
+ ExprVal, ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
+ KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, ExprVal,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
@@ -5548,34 +5573,39 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
- ValRange);
+ KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT,
+ ExprVal, ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
- Val, ValRange);
+ ExprVal, ValRange);
if (Val)
ImpliedUserSGPRCount += 1;
} else if (ID == ".amdhsa_wavefront_size32") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
EnableWavefrontSize32 = Val;
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
- Val, ValRange);
+ KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_uses_dynamic_stack") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
- KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK, Val, ValRange);
+ KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange);
+ COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_enable_private_segment") {
if (!hasArchitectedFlatScratch())
return Error(
@@ -5583,42 +5613,48 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
"directive is not supported without architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange);
+ COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
+ COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, ExprVal,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
+ COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, ExprVal,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
+ COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, ExprVal,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
+ COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, ExprVal,
ValRange);
} else if (ID == ".amdhsa_system_vgpr_workitem_id") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
+ COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, ExprVal,
ValRange);
} else if (ID == ".amdhsa_next_free_vgpr") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
VGPRRange = ValRange;
NextFreeVGPR = Val;
} else if (ID == ".amdhsa_next_free_sgpr") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
SGPRRange = ValRange;
NextFreeSGPR = Val;
} else if (ID == ".amdhsa_accum_offset") {
if (!isGFX90A())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
AccumOffset = Val;
} else if (ID == ".amdhsa_reserve_vcc") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (!isUInt<1>(Val))
return OutOfRangeError(ValRange);
ReserveVCC = Val;
} else if (ID == ".amdhsa_reserve_flat_scratch") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (IVersion.Major < 7)
return Error(IDRange.Start, "directive requires gfx7+", IDRange);
if (hasArchitectedFlatScratch())
@@ -5638,97 +5674,105 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
IDRange);
} else if (ID == ".amdhsa_float_round_mode_32") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
+ COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_float_round_mode_16_64") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
+ COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_float_denorm_mode_32") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
+ COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, ExprVal,
+ ValRange);
} else if (ID == ".amdhsa_float_denorm_mode_16_64") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
+ COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, ExprVal,
ValRange);
} else if (ID == ".amdhsa_dx10_clamp") {
if (IVersion.Major >= 12)
return Error(IDRange.Start, "directive unsupported on gfx12+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP, Val,
+ COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP, ExprVal,
ValRange);
} else if (ID == ".amdhsa_ieee_mode") {
if (IVersion.Major >= 12)
return Error(IDRange.Start, "directive unsupported on gfx12+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE, Val,
+ COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE, ExprVal,
ValRange);
} else if (ID == ".amdhsa_fp16_overflow") {
if (IVersion.Major < 9)
return Error(IDRange.Start, "directive requires gfx9+", IDRange);
- PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL, Val,
+ PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
+ COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL, ExprVal,
ValRange);
} else if (ID == ".amdhsa_tg_split") {
if (!isGFX90A())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
- PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, Val,
- ValRange);
+ PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_workgroup_processor_mode") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
- PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE, Val,
+ PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
+ COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE, ExprVal,
ValRange);
} else if (ID == ".amdhsa_memory_ordered") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
- PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED, Val,
+ PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
+ COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED, ExprVal,
ValRange);
} else if (ID == ".amdhsa_forward_progress") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
- PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS, Val,
+ PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
+ COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS, ExprVal,
ValRange);
} else if (ID == ".amdhsa_shared_vgpr_count") {
+ EXPR_RESOLVE_OR_ERROR(EvaluatableExpr);
if (IVersion.Major < 10 || IVersion.Major >= 12)
return Error(IDRange.Start, "directive requires gfx10 or gfx11",
IDRange);
SharedVGPRCount = Val;
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3,
- COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT, Val,
+ COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT, ExprVal,
ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
PARSE_BITS_ENTRY(
KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
- ValRange);
+ COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION,
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_fp_denorm_src") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
- Val, ValRange);
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
PARSE_BITS_ENTRY(
KD.compute_pgm_rsrc2,
- COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
- ValRange);
+ COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO,
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
- Val, ValRange);
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
- Val, ValRange);
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
- Val, ValRange);
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_exception_int_div_zero") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
- Val, ValRange);
+ ExprVal, ValRange);
} else if (ID == ".amdhsa_round_robin_scheduling") {
if (IVersion.Major < 12)
return Error(IDRange.Start, "directive requires gfx12+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN, Val,
+ COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN, ExprVal,
ValRange);
} else {
return Error(IDRange.Start, "unknown .amdhsa_kernel directive", IDRange);
@@ -5755,15 +5799,18 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
VGPRBlocks))
return OutOfRangeError(VGPRRange);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
+ AMDGPU::MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, MCConstantExpr::create(VGPRBlocks, getContext()),
+ COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT,
+ COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, getContext());
if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
SGPRBlocks))
return OutOfRangeError(SGPRRange);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
- SGPRBlocks);
+ AMDGPU::MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, MCConstantExpr::create(SGPRBlocks, getContext()),
+ COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT,
+ COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT, getContext());
if (ExplicitUserSGPRCount && ImpliedUserSGPRCount > *ExplicitUserSGPRCount)
return TokError("amdgpu_user_sgpr_count smaller than than implied by "
@@ -5774,11 +5821,17 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
return TokError("too many user SGPRs enabled");
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
- UserSGPRCount);
-
- if (PreloadLength && KD.kernarg_size &&
- (PreloadLength * 4 + PreloadOffset * 4 > KD.kernarg_size))
+ AMDGPU::MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc2, MCConstantExpr::create(UserSGPRCount, getContext()),
+ COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_SHIFT,
+ COMPUTE_PGM_RSRC2_USER_SGPR_COUNT, getContext());
+
+ int64_t IVal = 0;
+ if (!KD.kernarg_size->evaluateAsAbsolute(IVal))
+ return TokError("Kernarg size should be resolvable");
+ uint64_t kernarg_size = IVal;
+ if (PreloadLength && kernarg_size &&
+ (PreloadLength * 4 + PreloadOffset * 4 > kernarg_size))
return TokError("Kernarg preload length + offset is larger than the "
"kernarg segment size");
@@ -5790,8 +5843,11 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
"increments of 4");
if (AccumOffset > alignTo(std::max((uint64_t)1, NextFreeVGPR), 4))
return TokError("accum_offset exceeds total VGPR allocation");
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET,
- (AccumOffset / 4 - 1));
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc3,
+ MCConstantExpr::create(AccumOffset / 4 - 1, getContext()),
+ COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET_SHIFT,
+ COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET, getContext());
}
if (IVersion.Major >= 10 && IVersion.Major < 12) {
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 4ae514ffcf78..273f92abf354 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -86,7 +86,7 @@ class BUF_Pseudo <string opName, dag outs, dag ins,
bits<1> has_soffset = 1;
bits<1> has_offset = 1;
bits<1> has_slc = 1;
- bits<1> tfe = ?;
+ bits<1> tfe = 0;
bits<4> elements = 0;
bits<1> has_sccb = 1;
bits<1> sccb_value = 0;
@@ -323,6 +323,7 @@ class MUBUF_Pseudo <string opName, dag outs, dag ins,
Instruction BaseOpcode = !cast<Instruction>(MUBUFGetBaseOpcode<NAME>.ret);
let MUBUF = 1;
let AsmMatchConverter = "cvtMubuf";
+ let usesCustomInserter = 1;
}
class MUBUF_Real <MUBUF_Pseudo ps, string real_name = ps.Mnemonic> :
@@ -3369,7 +3370,7 @@ def MUBUFInfoTable : GenericTable {
let CppTypeName = "MUBUFInfo";
let Fields = [
"Opcode", "BaseOpcode", "elements", "has_vaddr", "has_srsrc", "has_soffset",
- "IsBufferInv"
+ "IsBufferInv", "tfe"
];
let PrimaryKey = ["Opcode"];
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index db1f8c187265..d017ec4a7415 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -1593,16 +1593,12 @@ let OtherPredicates = [isGFX12Plus] in {
defm : GlobalFLATAtomicPatsRtn <"GLOBAL_ATOMIC_ORDERED_ADD_B64", "int_amdgcn_global_atomic_ordered_add_b64", i64, i64, /* isIntr */ 1>;
let WaveSizePredicate = isWave32 in {
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B64_w32, int_amdgcn_global_load_tr, v2i32>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w32, int_amdgcn_global_load_tr, v8i16>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w32, int_amdgcn_global_load_tr, v8f16>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w32, int_amdgcn_global_load_tr, v8bf16>;
+ defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B64_w32, int_amdgcn_global_load_tr_b64, v2i32>;
+ defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w32, int_amdgcn_global_load_tr_b128, v8i16>;
}
let WaveSizePredicate = isWave64 in {
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B64_w64, int_amdgcn_global_load_tr, i32>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w64, int_amdgcn_global_load_tr, v4i16>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w64, int_amdgcn_global_load_tr, v4f16>;
- defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w64, int_amdgcn_global_load_tr, v4bf16>;
+ defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B64_w64, int_amdgcn_global_load_tr_b64, i32>;
+ defm : GlobalFLATLoadPats <GLOBAL_LOAD_TR_B128_w64, int_amdgcn_global_load_tr_b128, v4i16>;
}
}
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index e515b729e7d7..75766b11ca82 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -2357,6 +2357,9 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
break;
}
+ // Pad neighboring MFMA with noops for better inter-wave performance.
+ WaitStatesNeeded = std::max(WaitStatesNeeded, checkMFMAPadding(MI));
+
return WaitStatesNeeded;
}
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 9f419a7fbf68..94d93390d091 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -973,12 +973,17 @@ void GCNSchedStage::checkScheduling() {
LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
<< DAG.MinOccupancy << ".\n");
}
-
+ // The maximum number of arch VGPR on non-unified register file, or the
+ // maximum VGPR + AGPR in the unified register file case.
unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
+ // The maximum number of arch VGPR for both unified and non-unified register
+ // file.
+ unsigned MaxArchVGPRs = std::min(MaxVGPRs, ST.getAddressableNumArchVGPRs());
unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
- if (PressureAfter.getVGPRNum(false) > MaxVGPRs ||
- PressureAfter.getAGPRNum() > MaxVGPRs ||
+ if (PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) > MaxVGPRs ||
+ PressureAfter.getVGPRNum(false) > MaxArchVGPRs ||
+ PressureAfter.getAGPRNum() > MaxArchVGPRs ||
PressureAfter.getSGPRNum() > MaxSGPRs) {
DAG.RescheduleRegions[RegionIdx] = true;
DAG.RegionsWithHighRP[RegionIdx] = true;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index ca51da659c33..4da10beabe31 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -223,7 +223,7 @@ protected:
bool HasImageStoreD16Bug = false;
bool HasImageGather4D16Bug = false;
bool HasMSAALoadDstSelBug = false;
- bool HasGFX11FullVGPRs = false;
+ bool Has1_5xVGPRs = false;
bool HasMADIntraFwdBug = false;
bool HasVOPDInsts = false;
bool HasVALUTransUseHazard = false;
@@ -1202,7 +1202,7 @@ public:
/// target.
bool hasNullExportTarget() const { return !GFX11Insts; }
- bool hasGFX11FullVGPRs() const { return HasGFX11FullVGPRs; }
+ bool has1_5xVGPRs() const { return Has1_5xVGPRs; }
bool hasVOPDInsts() const { return HasVOPDInsts; }
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.cpp
new file mode 100644
index 000000000000..77e7e30ff528
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.cpp
@@ -0,0 +1,98 @@
+//===--- AMDHSAKernelDescriptor.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUMCKernelDescriptor.h"
+#include "AMDGPUMCTargetDesc.h"
+#include "Utils/AMDGPUBaseInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/TargetParser/TargetParser.h"
+
+using namespace llvm;
+using namespace llvm::AMDGPU;
+
+MCKernelDescriptor
+MCKernelDescriptor::getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo *STI,
+ MCContext &Ctx) {
+ IsaVersion Version = getIsaVersion(STI->getCPU());
+
+ MCKernelDescriptor KD;
+ const MCExpr *ZeroMCExpr = MCConstantExpr::create(0, Ctx);
+ const MCExpr *OneMCExpr = MCConstantExpr::create(1, Ctx);
+
+ KD.group_segment_fixed_size = ZeroMCExpr;
+ KD.private_segment_fixed_size = ZeroMCExpr;
+ KD.compute_pgm_rsrc1 = ZeroMCExpr;
+ KD.compute_pgm_rsrc2 = ZeroMCExpr;
+ KD.compute_pgm_rsrc3 = ZeroMCExpr;
+ KD.kernarg_size = ZeroMCExpr;
+ KD.kernel_code_properties = ZeroMCExpr;
+ KD.kernarg_preload = ZeroMCExpr;
+
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1,
+ MCConstantExpr::create(amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE, Ctx),
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Ctx);
+ if (Version.Major < 12) {
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP, Ctx);
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE, Ctx);
+ }
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc2, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Ctx);
+ if (Version.Major >= 10) {
+ if (STI->getFeatureBits().test(FeatureWavefrontSize32))
+ MCKernelDescriptor::bits_set(
+ KD.kernel_code_properties, OneMCExpr,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32, Ctx);
+ if (!STI->getFeatureBits().test(FeatureCuMode))
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE, Ctx);
+
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc1, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED, Ctx);
+ }
+ if (AMDGPU::isGFX90A(*STI) && STI->getFeatureBits().test(FeatureTgSplit))
+ MCKernelDescriptor::bits_set(
+ KD.compute_pgm_rsrc3, OneMCExpr,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, Ctx);
+ return KD;
+}
+
+void MCKernelDescriptor::bits_set(const MCExpr *&Dst, const MCExpr *Value,
+ uint32_t Shift, uint32_t Mask,
+ MCContext &Ctx) {
+ auto Sft = MCConstantExpr::create(Shift, Ctx);
+ auto Msk = MCConstantExpr::create(Mask, Ctx);
+ Dst = MCBinaryExpr::createAnd(Dst, MCUnaryExpr::createNot(Msk, Ctx), Ctx);
+ Dst = MCBinaryExpr::createOr(Dst, MCBinaryExpr::createShl(Value, Sft, Ctx),
+ Ctx);
+}
+
+const MCExpr *MCKernelDescriptor::bits_get(const MCExpr *Src, uint32_t Shift,
+ uint32_t Mask, MCContext &Ctx) {
+ auto Sft = MCConstantExpr::create(Shift, Ctx);
+ auto Msk = MCConstantExpr::create(Mask, Ctx);
+ return MCBinaryExpr::createLShr(MCBinaryExpr::createAnd(Src, Msk, Ctx), Sft,
+ Ctx);
+}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.h
new file mode 100644
index 000000000000..26958ac8b9ee
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.h
@@ -0,0 +1,54 @@
+//===--- AMDGPUMCKernelDescriptor.h ---------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// AMDHSA kernel descriptor MCExpr struct for use in MC layer. Uses
+/// AMDHSAKernelDescriptor.h for sizes and constants.
+///
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELDESCRIPTOR_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELDESCRIPTOR_H
+
+#include "llvm/Support/AMDHSAKernelDescriptor.h"
+
+namespace llvm {
+class MCExpr;
+class MCContext;
+class MCSubtargetInfo;
+namespace AMDGPU {
+
+struct MCKernelDescriptor {
+ const MCExpr *group_segment_fixed_size = nullptr;
+ const MCExpr *private_segment_fixed_size = nullptr;
+ const MCExpr *kernarg_size = nullptr;
+ const MCExpr *compute_pgm_rsrc3 = nullptr;
+ const MCExpr *compute_pgm_rsrc1 = nullptr;
+ const MCExpr *compute_pgm_rsrc2 = nullptr;
+ const MCExpr *kernel_code_properties = nullptr;
+ const MCExpr *kernarg_preload = nullptr;
+
+ static MCKernelDescriptor
+ getDefaultAmdhsaKernelDescriptor(const MCSubtargetInfo *STI, MCContext &Ctx);
+ // MCExpr for:
+ // Dst = Dst & ~Mask
+ // Dst = Dst | (Value << Shift)
+ static void bits_set(const MCExpr *&Dst, const MCExpr *Value, uint32_t Shift,
+ uint32_t Mask, MCContext &Ctx);
+
+ // MCExpr for:
+ // return (Src & Mask) >> Shift
+ static const MCExpr *bits_get(const MCExpr *Src, uint32_t Shift,
+ uint32_t Mask, MCContext &Ctx);
+};
+
+} // end namespace AMDGPU
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCKERNELDESCRIPTOR_H
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index 4742b0b3e52e..02fe7be06280 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUTargetStreamer.h"
+#include "AMDGPUMCKernelDescriptor.h"
#include "AMDGPUPTNote.h"
#include "AMDKernelCodeT.h"
#include "Utils/AMDGPUBaseInfo.h"
@@ -283,6 +284,15 @@ bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
return true;
}
+bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
+ const MCSubtargetInfo &STI, bool TrapEnabled) {
+ OS << (TrapEnabled ? "\ts_trap 2" : "\ts_endpgm")
+ << " ; Kernarg preload header. Trap with incompatible firmware that "
+ "doesn't support preloading kernel arguments.\n";
+ OS << "\t.fill 63, 4, 0xbf800000 ; s_nop 0\n";
+ return true;
+}
+
bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
const uint32_t Encoded_s_code_end = 0xbf9f0000;
const uint32_t Encoded_s_nop = 0xbf800000;
@@ -307,94 +317,142 @@ bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
const MCSubtargetInfo &STI, StringRef KernelName,
- const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR,
+ const MCKernelDescriptor &KD, uint64_t NextVGPR, uint64_t NextSGPR,
bool ReserveVCC, bool ReserveFlatScr) {
IsaVersion IVersion = getIsaVersion(STI.getCPU());
+ const MCAsmInfo *MAI = getContext().getAsmInfo();
OS << "\t.amdhsa_kernel " << KernelName << '\n';
-#define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME) \
- STREAM << "\t\t" << DIRECTIVE << " " \
- << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n';
-
- OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size
- << '\n';
- OS << "\t\t.amdhsa_private_segment_fixed_size "
- << KD.private_segment_fixed_size << '\n';
- OS << "\t\t.amdhsa_kernarg_size " << KD.kernarg_size << '\n';
-
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_count", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT);
+ auto PrintField = [&](const MCExpr *Expr, uint32_t Shift, uint32_t Mask,
+ StringRef Directive) {
+ int64_t IVal;
+ OS << "\t\t" << Directive << ' ';
+ const MCExpr *pgm_rsrc1_bits =
+ MCKernelDescriptor::bits_get(Expr, Shift, Mask, getContext());
+ if (pgm_rsrc1_bits->evaluateAsAbsolute(IVal))
+ OS << static_cast<uint64_t>(IVal);
+ else
+ pgm_rsrc1_bits->print(OS, MAI);
+ OS << '\n';
+ };
+
+ OS << "\t\t.amdhsa_group_segment_fixed_size ";
+ KD.group_segment_fixed_size->print(OS, MAI);
+ OS << '\n';
+
+ OS << "\t\t.amdhsa_private_segment_fixed_size ";
+ KD.private_segment_fixed_size->print(OS, MAI);
+ OS << '\n';
+
+ OS << "\t\t.amdhsa_kernarg_size ";
+ KD.kernarg_size->print(OS, MAI);
+ OS << '\n';
+
+ PrintField(
+ KD.compute_pgm_rsrc2, amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_USER_SGPR_COUNT, ".amdhsa_user_sgpr_count");
if (!hasArchitectedFlatScratch(STI))
- PRINT_FIELD(
- OS, ".amdhsa_user_sgpr_private_segment_buffer", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
+ PrintField(
+ KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
+ ".amdhsa_user_sgpr_private_segment_buffer");
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR,
+ ".amdhsa_user_sgpr_dispatch_ptr");
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR,
+ ".amdhsa_user_sgpr_queue_ptr");
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
+ ".amdhsa_user_sgpr_kernarg_segment_ptr");
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID,
+ ".amdhsa_user_sgpr_dispatch_id");
if (!hasArchitectedFlatScratch(STI))
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT,
+ ".amdhsa_user_sgpr_flat_scratch_init");
if (hasKernargPreload(STI)) {
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_preload_length ", KD,
- kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_LENGTH);
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_preload_offset ", KD,
- kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_OFFSET);
+ PrintField(KD.kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_LENGTH_SHIFT,
+ amdhsa::KERNARG_PRELOAD_SPEC_LENGTH,
+ ".amdhsa_user_sgpr_kernarg_preload_length");
+ PrintField(KD.kernarg_preload, amdhsa::KERNARG_PRELOAD_SPEC_OFFSET_SHIFT,
+ amdhsa::KERNARG_PRELOAD_SPEC_OFFSET,
+ ".amdhsa_user_sgpr_kernarg_preload_offset");
}
- PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
+ PrintField(
+ KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
+ ".amdhsa_user_sgpr_private_segment_size");
if (IVersion.Major >= 10)
- PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD,
- kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
+ ".amdhsa_wavefront_size32");
if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
- PRINT_FIELD(OS, ".amdhsa_uses_dynamic_stack", KD, kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
- PRINT_FIELD(OS,
- (hasArchitectedFlatScratch(STI)
- ? ".amdhsa_enable_private_segment"
- : ".amdhsa_system_sgpr_private_segment_wavefront_offset"),
- KD, compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
- PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
- PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
- PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
- PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
- PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
+ PrintField(KD.kernel_code_properties,
+ amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK_SHIFT,
+ amdhsa::KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK,
+ ".amdhsa_uses_dynamic_stack");
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT,
+ (hasArchitectedFlatScratch(STI)
+ ? ".amdhsa_enable_private_segment"
+ : ".amdhsa_system_sgpr_private_segment_wavefront_offset"));
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X,
+ ".amdhsa_system_sgpr_workgroup_id_x");
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y,
+ ".amdhsa_system_sgpr_workgroup_id_y");
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z,
+ ".amdhsa_system_sgpr_workgroup_id_z");
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO,
+ ".amdhsa_system_sgpr_workgroup_info");
+ PrintField(KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID,
+ ".amdhsa_system_vgpr_workitem_id");
// These directives are required.
OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n';
OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n';
- if (AMDGPU::isGFX90A(STI))
- OS << "\t\t.amdhsa_accum_offset " <<
- (AMDHSA_BITS_GET(KD.compute_pgm_rsrc3,
- amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
- << '\n';
+ if (AMDGPU::isGFX90A(STI)) {
+ // MCExpr equivalent of taking the (accum_offset + 1) * 4.
+ const MCExpr *accum_bits = MCKernelDescriptor::bits_get(
+ KD.compute_pgm_rsrc3,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET, getContext());
+ accum_bits = MCBinaryExpr::createAdd(
+ accum_bits, MCConstantExpr::create(1, getContext()), getContext());
+ accum_bits = MCBinaryExpr::createMul(
+ accum_bits, MCConstantExpr::create(4, getContext()), getContext());
+ OS << "\t\t.amdhsa_accum_offset ";
+ int64_t IVal;
+ if (accum_bits->evaluateAsAbsolute(IVal)) {
+ OS << static_cast<uint64_t>(IVal);
+ } else {
+ accum_bits->print(OS, MAI);
+ }
+ OS << '\n';
+ }
if (!ReserveVCC)
OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n';
@@ -411,74 +469,105 @@ void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
break;
}
- PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
- PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
- PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
- PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32,
+ ".amdhsa_float_round_mode_32");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64,
+ ".amdhsa_float_round_mode_16_64");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32,
+ ".amdhsa_float_denorm_mode_32");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
+ ".amdhsa_float_denorm_mode_16_64");
if (IVersion.Major < 12) {
- PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD, compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
- PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD, compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP,
+ ".amdhsa_dx10_clamp");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE,
+ ".amdhsa_ieee_mode");
+ }
+ if (IVersion.Major >= 9) {
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL,
+ ".amdhsa_fp16_overflow");
}
- if (IVersion.Major >= 9)
- PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
if (AMDGPU::isGFX90A(STI))
- PRINT_FIELD(OS, ".amdhsa_tg_split", KD,
- compute_pgm_rsrc3,
- amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
+ PrintField(KD.compute_pgm_rsrc3,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, ".amdhsa_tg_split");
if (IVersion.Major >= 10) {
- PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
- PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
- PRINT_FIELD(OS, ".amdhsa_forward_progress", KD,
- compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE,
+ ".amdhsa_workgroup_processor_mode");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED,
+ ".amdhsa_memory_ordered");
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS,
+ ".amdhsa_forward_progress");
}
if (IVersion.Major >= 10 && IVersion.Major < 12) {
- PRINT_FIELD(OS, ".amdhsa_shared_vgpr_count", KD, compute_pgm_rsrc3,
- amdhsa::COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
+ PrintField(KD.compute_pgm_rsrc3,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT,
+ ".amdhsa_shared_vgpr_count");
+ }
+ if (IVersion.Major >= 12) {
+ PrintField(KD.compute_pgm_rsrc1,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN,
+ ".amdhsa_round_robin_scheduling");
}
- if (IVersion.Major >= 12)
- PRINT_FIELD(OS, ".amdhsa_round_robin_scheduling", KD, compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
- PRINT_FIELD(
- OS, ".amdhsa_exception_fp_ieee_invalid_op", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
- PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
- PRINT_FIELD(
- OS, ".amdhsa_exception_fp_ieee_div_zero", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
- PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
- PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
- PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
- PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD,
- compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
-#undef PRINT_FIELD
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::
+ COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION,
+ ".amdhsa_exception_fp_ieee_invalid_op");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
+ ".amdhsa_exception_fp_denorm_src");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::
+ COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO,
+ ".amdhsa_exception_fp_ieee_div_zero");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
+ ".amdhsa_exception_fp_ieee_overflow");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
+ ".amdhsa_exception_fp_ieee_underflow");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
+ ".amdhsa_exception_fp_ieee_inexact");
+ PrintField(
+ KD.compute_pgm_rsrc2,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO_SHIFT,
+ amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
+ ".amdhsa_exception_int_div_zero");
OS << "\t.end_amdhsa_kernel\n";
}
@@ -781,18 +870,6 @@ bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
return true;
}
-bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
- const MCSubtargetInfo &STI, bool TrapEnabled) {
- const char *TrapInstr = TrapEnabled ? "\ts_trap 2" : "\ts_endpgm";
- OS << TrapInstr
- << " ; Trap with incompatible firmware that doesn't "
- "support preloading kernel arguments.\n";
- for (int i = 0; i < 63; ++i) {
- OS << "\ts_nop 0\n";
- }
- return true;
-}
-
bool AMDGPUTargetELFStreamer::EmitKernargPreloadHeader(
const MCSubtargetInfo &STI, bool TrapEnabled) {
const uint32_t Encoded_s_nop = 0xbf800000;
@@ -835,7 +912,7 @@ bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor(
const MCSubtargetInfo &STI, StringRef KernelName,
- const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
+ const MCKernelDescriptor &KernelDescriptor, uint64_t NextVGPR,
uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) {
auto &Streamer = getStreamer();
auto &Context = Streamer.getContext();
@@ -853,7 +930,7 @@ void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor(
// Kernel descriptor symbol's type and size are fixed.
KernelDescriptorSymbol->setType(ELF::STT_OBJECT);
KernelDescriptorSymbol->setSize(
- MCConstantExpr::create(sizeof(KernelDescriptor), Context));
+ MCConstantExpr::create(sizeof(amdhsa::kernel_descriptor_t), Context));
// The visibility of the kernel code symbol must be protected or less to allow
// static relocations from the kernel descriptor to be used.
@@ -861,31 +938,43 @@ void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor(
KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED);
Streamer.emitLabel(KernelDescriptorSymbol);
- Streamer.emitInt32(KernelDescriptor.group_segment_fixed_size);
- Streamer.emitInt32(KernelDescriptor.private_segment_fixed_size);
- Streamer.emitInt32(KernelDescriptor.kernarg_size);
-
- for (uint8_t Res : KernelDescriptor.reserved0)
- Streamer.emitInt8(Res);
+ Streamer.emitValue(
+ KernelDescriptor.group_segment_fixed_size,
+ sizeof(amdhsa::kernel_descriptor_t::group_segment_fixed_size));
+ Streamer.emitValue(
+ KernelDescriptor.private_segment_fixed_size,
+ sizeof(amdhsa::kernel_descriptor_t::private_segment_fixed_size));
+ Streamer.emitValue(KernelDescriptor.kernarg_size,
+ sizeof(amdhsa::kernel_descriptor_t::kernarg_size));
+
+ for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved0); ++i)
+ Streamer.emitInt8(0u);
// FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The
// expression being created is:
// (start of kernel code) - (start of kernel descriptor)
// It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64.
- Streamer.emitValue(MCBinaryExpr::createSub(
- MCSymbolRefExpr::create(
- KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context),
- MCSymbolRefExpr::create(
- KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context),
- Context),
- sizeof(KernelDescriptor.kernel_code_entry_byte_offset));
- for (uint8_t Res : KernelDescriptor.reserved1)
- Streamer.emitInt8(Res);
- Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc3);
- Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc1);
- Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc2);
- Streamer.emitInt16(KernelDescriptor.kernel_code_properties);
- Streamer.emitInt16(KernelDescriptor.kernarg_preload);
- for (uint8_t Res : KernelDescriptor.reserved3)
- Streamer.emitInt8(Res);
+ Streamer.emitValue(
+ MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(KernelCodeSymbol,
+ MCSymbolRefExpr::VK_AMDGPU_REL64, Context),
+ MCSymbolRefExpr::create(KernelDescriptorSymbol,
+ MCSymbolRefExpr::VK_None, Context),
+ Context),
+ sizeof(amdhsa::kernel_descriptor_t::kernel_code_entry_byte_offset));
+ for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved1); ++i)
+ Streamer.emitInt8(0u);
+ Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc3,
+ sizeof(amdhsa::kernel_descriptor_t::compute_pgm_rsrc3));
+ Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc1,
+ sizeof(amdhsa::kernel_descriptor_t::compute_pgm_rsrc1));
+ Streamer.emitValue(KernelDescriptor.compute_pgm_rsrc2,
+ sizeof(amdhsa::kernel_descriptor_t::compute_pgm_rsrc2));
+ Streamer.emitValue(
+ KernelDescriptor.kernel_code_properties,
+ sizeof(amdhsa::kernel_descriptor_t::kernel_code_properties));
+ Streamer.emitValue(KernelDescriptor.kernarg_preload,
+ sizeof(amdhsa::kernel_descriptor_t::kernarg_preload));
+ for (uint32_t i = 0; i < sizeof(amdhsa::kernel_descriptor_t::reserved3); ++i)
+ Streamer.emitInt8(0u);
}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index 5aa80ff578c6..706897a5dc1f 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -22,15 +22,13 @@ class MCSymbol;
class formatted_raw_ostream;
namespace AMDGPU {
+
+struct MCKernelDescriptor;
namespace HSAMD {
struct Metadata;
}
} // namespace AMDGPU
-namespace amdhsa {
-struct kernel_descriptor_t;
-}
-
class AMDGPUTargetStreamer : public MCTargetStreamer {
AMDGPUPALMetadata PALMetadata;
@@ -94,10 +92,11 @@ public:
return true;
}
- virtual void EmitAmdhsaKernelDescriptor(
- const MCSubtargetInfo &STI, StringRef KernelName,
- const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
- uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) {}
+ virtual void
+ EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
+ const AMDGPU::MCKernelDescriptor &KernelDescriptor,
+ uint64_t NextVGPR, uint64_t NextSGPR,
+ bool ReserveVCC, bool ReserveFlatScr) {}
static StringRef getArchNameFromElfMach(unsigned ElfMach);
static unsigned getElfMach(StringRef GPU);
@@ -150,10 +149,11 @@ public:
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
bool TrapEnabled) override;
- void EmitAmdhsaKernelDescriptor(
- const MCSubtargetInfo &STI, StringRef KernelName,
- const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
- uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) override;
+ void
+ EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
+ const AMDGPU::MCKernelDescriptor &KernelDescriptor,
+ uint64_t NextVGPR, uint64_t NextSGPR,
+ bool ReserveVCC, bool ReserveFlatScr) override;
};
class AMDGPUTargetELFStreamer final : public AMDGPUTargetStreamer {
@@ -205,10 +205,11 @@ public:
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
bool TrapEnabled) override;
- void EmitAmdhsaKernelDescriptor(
- const MCSubtargetInfo &STI, StringRef KernelName,
- const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
- uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) override;
+ void
+ EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
+ const AMDGPU::MCKernelDescriptor &KernelDescriptor,
+ uint64_t NextVGPR, uint64_t NextSGPR,
+ bool ReserveVCC, bool ReserveFlatScr) override;
};
}
#endif
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt b/llvm/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
index 0842a58f794b..14a02b6d8e36 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
@@ -8,6 +8,7 @@ add_llvm_component_library(LLVMAMDGPUDesc
AMDGPUMCExpr.cpp
AMDGPUMCTargetDesc.cpp
AMDGPUTargetStreamer.cpp
+ AMDGPUMCKernelDescriptor.cpp
R600InstPrinter.cpp
R600MCCodeEmitter.cpp
R600MCTargetDesc.cpp
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 595ef39ce03e..23e8be0d5e45 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -210,6 +210,7 @@ class MIMG <dag outs, string dns = "">
: MIMG_Base <outs, dns> {
let hasPostISelHook = 1;
+ let usesCustomInserter = 1;
Instruction Opcode = !cast<Instruction>(NAME);
MIMGBaseOpcode BaseOpcode;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 5ccf21f76015..14948ef9ea8d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1355,7 +1355,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
MachineMemOperand::MOVolatile;
return true;
}
- case Intrinsic::amdgcn_global_load_tr: {
+ case Intrinsic::amdgcn_global_load_tr_b64:
+ case Intrinsic::amdgcn_global_load_tr_b128: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::getVT(CI.getType());
Info.ptrVal = CI.getOperand(0);
@@ -1462,7 +1463,8 @@ bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
case Intrinsic::amdgcn_global_atomic_fmin:
case Intrinsic::amdgcn_global_atomic_fmin_num:
case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
- case Intrinsic::amdgcn_global_load_tr:
+ case Intrinsic::amdgcn_global_load_tr_b64:
+ case Intrinsic::amdgcn_global_load_tr_b128:
Ptr = II->getArgOperand(0);
break;
case Intrinsic::amdgcn_global_load_lds:
@@ -3831,9 +3833,6 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64));
}
- if (!IsTailCall)
- Ops.push_back(CLI.ConvergenceControlToken);
-
if (IsTailCall) {
// Each tail call may have to adjust the stack by a different amount, so
// this information must travel along with the operation for eventual
@@ -3860,6 +3859,17 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
if (InGlue.getNode())
Ops.push_back(InGlue);
+ // NOTE: This potentially results in *two* glue operands, and the wrong one
+ // might possibly show up where the other was intended. In particular,
+ // Emitter::EmitMachineNode() expects only the glued convergence token if it
+ // exists. Similarly, the selection of the call expects to match only the
+ // InGlue operand if it exists.
+ if (SDValue Token = CLI.ConvergenceControlToken) {
+ Ops.push_back(SDValue(DAG.getMachineNode(TargetOpcode::CONVERGENCECTRL_GLUE,
+ DL, MVT::Glue, Token),
+ 0));
+ }
+
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
// If we're doing a tall call, use a TC_RETURN here rather than an
@@ -4858,8 +4868,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
if (Subtarget->hasScalarAddSub64()) {
unsigned Opc = IsAdd ? AMDGPU::S_ADD_U64 : AMDGPU::S_SUB_U64;
BuildMI(*BB, MI, DL, TII->get(Opc), Dest.getReg())
- .addReg(Src0.getReg())
- .addReg(Src1.getReg());
+ .add(Src0)
+ .add(Src1);
} else {
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const TargetRegisterClass *BoolRC = TRI->getBoolRC();
@@ -5224,24 +5234,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
MachineInstrBuilder MIB;
MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg);
- for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
- MachineOperand &MO = MI.getOperand(I);
- if (I != 2) {
- MIB.add(MO);
- continue;
- }
- }
-
- MachineOperand &MO = MI.getOperand(2);
- MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- // The token operand is always a register, whose definition is IMPLICIT_DEF
- // iff there was no token on the call.
- if (MachineInstr *Def = MRI.getVRegDef(MO.getReg())) {
- if (Def->getOpcode() != TargetOpcode::IMPLICIT_DEF) {
- MO.setImplicit();
- MIB.add(MO);
- }
- }
+ for (const MachineOperand &MO : MI.operands())
+ MIB.add(MO);
MIB.cloneMemRefs(MI);
MI.eraseFromParent();
@@ -5409,6 +5403,11 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
return SplitBB;
}
default:
+ if (TII->isImage(MI) || TII->isMUBUF(MI)) {
+ if (!MI.mayStore())
+ AddMemOpInit(MI);
+ return BB;
+ }
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
}
@@ -15033,60 +15032,67 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
// result register that will be written in the case of a memory access failure.
// The required code is also added to tie this init code to the result of the
// img instruction.
-void SITargetLowering::AddIMGInit(MachineInstr &MI) const {
+void SITargetLowering::AddMemOpInit(MachineInstr &MI) const {
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
MachineBasicBlock &MBB = *MI.getParent();
- MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe);
- MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe);
- MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16);
-
- if (!TFE && !LWE) // intersect_ray
- return;
+ int DstIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
+ unsigned InitIdx = 0;
- unsigned TFEVal = TFE ? TFE->getImm() : 0;
- unsigned LWEVal = LWE ? LWE->getImm() : 0;
- unsigned D16Val = D16 ? D16->getImm() : 0;
+ if (TII->isImage(MI)) {
+ MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe);
+ MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe);
+ MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16);
- if (!TFEVal && !LWEVal)
- return;
+ if (!TFE && !LWE) // intersect_ray
+ return;
- // At least one of TFE or LWE are non-zero
- // We have to insert a suitable initialization of the result value and
- // tie this to the dest of the image instruction.
+ unsigned TFEVal = TFE ? TFE->getImm() : 0;
+ unsigned LWEVal = LWE ? LWE->getImm() : 0;
+ unsigned D16Val = D16 ? D16->getImm() : 0;
- const DebugLoc &DL = MI.getDebugLoc();
+ if (!TFEVal && !LWEVal)
+ return;
- int DstIdx =
- AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
+ // At least one of TFE or LWE are non-zero
+ // We have to insert a suitable initialization of the result value and
+ // tie this to the dest of the image instruction.
- // Calculate which dword we have to initialize to 0.
- MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask);
+ // Calculate which dword we have to initialize to 0.
+ MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask);
- // check that dmask operand is found.
- assert(MO_Dmask && "Expected dmask operand in instruction");
+ // check that dmask operand is found.
+ assert(MO_Dmask && "Expected dmask operand in instruction");
- unsigned dmask = MO_Dmask->getImm();
- // Determine the number of active lanes taking into account the
- // Gather4 special case
- unsigned ActiveLanes = TII->isGather4(MI) ? 4 : llvm::popcount(dmask);
+ unsigned dmask = MO_Dmask->getImm();
+ // Determine the number of active lanes taking into account the
+ // Gather4 special case
+ unsigned ActiveLanes = TII->isGather4(MI) ? 4 : llvm::popcount(dmask);
- bool Packed = !Subtarget->hasUnpackedD16VMem();
+ bool Packed = !Subtarget->hasUnpackedD16VMem();
- unsigned InitIdx =
- D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1;
+ InitIdx = D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1;
- // Abandon attempt if the dst size isn't large enough
- // - this is in fact an error but this is picked up elsewhere and
- // reported correctly.
- uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
- if (DstSize < InitIdx)
+ // Abandon attempt if the dst size isn't large enough
+ // - this is in fact an error but this is picked up elsewhere and
+ // reported correctly.
+ uint32_t DstSize =
+ TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
+ if (DstSize < InitIdx)
+ return;
+ } else if (TII->isMUBUF(MI) && AMDGPU::getMUBUFTfe(MI.getOpcode())) {
+ InitIdx = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
+ } else {
return;
+ }
+
+ const DebugLoc &DL = MI.getDebugLoc();
// Create a register for the initialization value.
- Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx));
+ Register PrevDst = MRI.cloneVirtualRegister(MI.getOperand(DstIdx).getReg());
unsigned NewDst = 0; // Final initialized value will be in here
// If PRTStrictNull feature is enabled (the default) then initialize
@@ -15184,11 +15190,8 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
return;
}
- if (TII->isImage(MI)) {
- if (!MI.mayStore())
- AddIMGInit(MI);
+ if (TII->isImage(MI))
TII->enforceOperandRCAlignment(MI, AMDGPU::OpName::vaddr);
- }
}
static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 89da4428e3ab..9856a2923d38 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -466,7 +466,7 @@ public:
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
- void AddIMGInit(MachineInstr &MI) const;
+ void AddMemOpInit(MachineInstr &MI) const;
void AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 1c942dcefdac..04f3a2f57605 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -618,8 +618,8 @@ def SI_RETURN : SPseudoInstSI <
// This version is only needed so we can fill in the output register
// in the custom inserter.
def SI_CALL_ISEL : SPseudoInstSI <
- (outs), (ins SSrc_b64:$src0, unknown:$callee, unknown:$token),
- [(AMDGPUcall i64:$src0, tglobaladdr:$callee, untyped:$token)]> {
+ (outs), (ins SSrc_b64:$src0, unknown:$callee),
+ [(AMDGPUcall i64:$src0, tglobaladdr:$callee)]> {
let Size = 4;
let isCall = 1;
let SchedRW = [WriteBranch];
@@ -629,8 +629,8 @@ def SI_CALL_ISEL : SPseudoInstSI <
}
def : GCNPat<
- (AMDGPUcall i64:$src0, (i64 0), untyped:$token),
- (SI_CALL_ISEL $src0, (i64 0), untyped:$token)
+ (AMDGPUcall i64:$src0, (i64 0)),
+ (SI_CALL_ISEL $src0, (i64 0))
>;
// Wrapper around s_swappc_b64 with extra $callee parameter to track
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 4ddee2f6d5be..e2e70ba9733b 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -399,19 +399,35 @@ static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
default:
return UNKNOWN;
+ case AMDGPU::BUFFER_LOAD_DWORD_BOTHEN:
+ case AMDGPU::BUFFER_LOAD_DWORD_BOTHEN_exact:
+ case AMDGPU::BUFFER_LOAD_DWORD_IDXEN:
+ case AMDGPU::BUFFER_LOAD_DWORD_IDXEN_exact:
case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
+ case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:
+ case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact:
+ case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_IDXEN:
+ case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact:
case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_OFFEN:
case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_OFFEN_exact:
case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_OFFSET:
case AMDGPU::BUFFER_LOAD_DWORD_VBUFFER_OFFSET_exact:
return BUFFER_LOAD;
+ case AMDGPU::BUFFER_STORE_DWORD_BOTHEN:
+ case AMDGPU::BUFFER_STORE_DWORD_BOTHEN_exact:
+ case AMDGPU::BUFFER_STORE_DWORD_IDXEN:
+ case AMDGPU::BUFFER_STORE_DWORD_IDXEN_exact:
case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
+ case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_BOTHEN:
+ case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact:
+ case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_IDXEN:
+ case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_IDXEN_exact:
case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_OFFEN:
case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact:
case AMDGPU::BUFFER_STORE_DWORD_VBUFFER_OFFSET:
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 2569f40fec0e..12433dc83c48 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -748,35 +748,7 @@ bool SIMachineFunctionInfo::initializeBaseYamlFields(
}
bool SIMachineFunctionInfo::mayUseAGPRs(const Function &F) const {
- for (const BasicBlock &BB : F) {
- for (const Instruction &I : BB) {
- const auto *CB = dyn_cast<CallBase>(&I);
- if (!CB)
- continue;
-
- if (CB->isInlineAsm()) {
- const InlineAsm *IA = dyn_cast<InlineAsm>(CB->getCalledOperand());
- for (const auto &CI : IA->ParseConstraints()) {
- for (StringRef Code : CI.Codes) {
- Code.consume_front("{");
- if (Code.starts_with("a"))
- return true;
- }
- }
- continue;
- }
-
- const Function *Callee =
- dyn_cast<Function>(CB->getCalledOperand()->stripPointerCasts());
- if (!Callee)
- return true;
-
- if (Callee->getIntrinsicID() == Intrinsic::not_intrinsic)
- return true;
- }
- }
-
- return false;
+ return !F.hasFnAttribute("amdgpu-no-agpr");
}
bool SIMachineFunctionInfo::usesAGPRs(const MachineFunction &MF) const {
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 79a7d1cf66c4..245731ad5fc7 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -363,8 +363,8 @@ SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
for (auto &Row : SubRegFromChannelTable)
Row.fill(AMDGPU::NoSubRegister);
for (unsigned Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
- unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
- unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
+ unsigned Width = getSubRegIdxSize(Idx) / 32;
+ unsigned Offset = getSubRegIdxOffset(Idx) / 32;
assert(Width < SubRegFromChannelTableWidthMap.size());
Width = SubRegFromChannelTableWidthMap[Width];
if (Width == 0)
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index a91fb87998fe..afc9da07bc96 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -67,6 +67,7 @@ class SM_Real <SM_Pseudo ps, string opName = ps.Mnemonic>
let UseNamedOperandTable = ps.UseNamedOperandTable;
let SchedRW = ps.SchedRW;
let SubtargetPredicate = ps.SubtargetPredicate;
+ let OtherPredicates = ps.OtherPredicates;
let AsmMatchConverter = ps.AsmMatchConverter;
let IsAtomicRet = ps.IsAtomicRet;
let IsAtomicNoRet = ps.IsAtomicNoRet;
@@ -1172,11 +1173,9 @@ defm S_LOAD_DWORDX4 : SM_Real_Loads_gfx10<0x002>;
defm S_LOAD_DWORDX8 : SM_Real_Loads_gfx10<0x003>;
defm S_LOAD_DWORDX16 : SM_Real_Loads_gfx10<0x004>;
-let SubtargetPredicate = HasScalarFlatScratchInsts in {
defm S_SCRATCH_LOAD_DWORD : SM_Real_Loads_gfx10<0x005>;
defm S_SCRATCH_LOAD_DWORDX2 : SM_Real_Loads_gfx10<0x006>;
defm S_SCRATCH_LOAD_DWORDX4 : SM_Real_Loads_gfx10<0x007>;
-} // End SubtargetPredicate = HasScalarFlatScratchInsts
defm S_BUFFER_LOAD_DWORD : SM_Real_Loads_gfx10<0x008>;
defm S_BUFFER_LOAD_DWORDX2 : SM_Real_Loads_gfx10<0x009>;
@@ -1184,19 +1183,15 @@ defm S_BUFFER_LOAD_DWORDX4 : SM_Real_Loads_gfx10<0x00a>;
defm S_BUFFER_LOAD_DWORDX8 : SM_Real_Loads_gfx10<0x00b>;
defm S_BUFFER_LOAD_DWORDX16 : SM_Real_Loads_gfx10<0x00c>;
-let SubtargetPredicate = HasScalarStores in {
defm S_STORE_DWORD : SM_Real_Stores_gfx10<0x010>;
defm S_STORE_DWORDX2 : SM_Real_Stores_gfx10<0x011>;
defm S_STORE_DWORDX4 : SM_Real_Stores_gfx10<0x012>;
-let OtherPredicates = [HasScalarFlatScratchInsts] in {
defm S_SCRATCH_STORE_DWORD : SM_Real_Stores_gfx10<0x015>;
defm S_SCRATCH_STORE_DWORDX2 : SM_Real_Stores_gfx10<0x016>;
defm S_SCRATCH_STORE_DWORDX4 : SM_Real_Stores_gfx10<0x017>;
-} // End OtherPredicates = [HasScalarFlatScratchInsts]
defm S_BUFFER_STORE_DWORD : SM_Real_Stores_gfx10<0x018>;
defm S_BUFFER_STORE_DWORDX2 : SM_Real_Stores_gfx10<0x019>;
defm S_BUFFER_STORE_DWORDX4 : SM_Real_Stores_gfx10<0x01a>;
-} // End SubtargetPredicate = HasScalarStores
def S_MEMREALTIME_gfx10 : SMEM_Real_gfx10<0x025, S_MEMREALTIME>;
def S_MEMTIME_gfx10 : SMEM_Real_gfx10<0x024, S_MEMTIME>;
@@ -1204,9 +1199,7 @@ def S_GL1_INV_gfx10 : SMEM_Real_gfx10<0x01f, S_GL1_INV>;
def S_GET_WAVEID_IN_WORKGROUP_gfx10 : SMEM_Real_gfx10<0x02a, S_GET_WAVEID_IN_WORKGROUP>;
def S_DCACHE_INV_gfx10 : SMEM_Real_gfx10<0x020, S_DCACHE_INV>;
-let SubtargetPredicate = HasScalarStores in {
def S_DCACHE_WB_gfx10 : SMEM_Real_gfx10<0x021, S_DCACHE_WB>;
-} // End SubtargetPredicate = HasScalarStores
multiclass SM_Real_Probe_gfx10<bits<8> op> {
defvar ps = NAME;
@@ -1243,8 +1236,6 @@ multiclass SM_Real_Atomics_gfx10<bits<8> op> {
def _SGPR_IMM_RTN_gfx10 : SMEM_Atomic_Real_gfx10 <op, !cast<SM_Atomic_Pseudo>(ps#_SGPR_IMM_RTN)>;
}
-let SubtargetPredicate = HasScalarAtomics in {
-
defm S_BUFFER_ATOMIC_SWAP : SM_Real_Atomics_gfx10 <0x40>;
defm S_BUFFER_ATOMIC_CMPSWAP : SM_Real_Atomics_gfx10 <0x41>;
defm S_BUFFER_ATOMIC_ADD : SM_Real_Atomics_gfx10 <0x42>;
@@ -1311,8 +1302,6 @@ multiclass SM_Real_Discard_gfx10<bits<8> op> {
defm S_DCACHE_DISCARD : SM_Real_Discard_gfx10 <0x28>;
defm S_DCACHE_DISCARD_X2 : SM_Real_Discard_gfx10 <0x29>;
-} // End SubtargetPredicate = HasScalarAtomics
-
def SMInfoTable : GenericTable {
let FilterClass = "SM_Real";
let CppTypeName = "SMInfo";
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 6d53f68ace70..5d44396b07b6 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -318,6 +318,7 @@ struct MUBUFInfo {
bool has_srsrc;
bool has_soffset;
bool IsBufferInv;
+ bool tfe;
};
struct MTBUFInfo {
@@ -466,6 +467,11 @@ bool getMUBUFIsBufferInv(unsigned Opc) {
return Info ? Info->IsBufferInv : false;
}
+bool getMUBUFTfe(unsigned Opc) {
+ const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
+ return Info ? Info->tfe : false;
+}
+
bool getSMEMIsBuffer(unsigned Opc) {
const SMInfo *Info = getSMEMOpcodeHelper(Opc);
return Info ? Info->IsBuffer : false;
@@ -1081,7 +1087,7 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
*EnableWavefrontSize32 :
STI->getFeatureBits().test(FeatureWavefrontSize32);
- if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
+ if (STI->getFeatureBits().test(Feature1_5xVGPRs))
return IsWave32 ? 24 : 12;
if (hasGFX10_3Insts(*STI))
@@ -1108,7 +1114,7 @@ unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
if (!isGFX10Plus(*STI))
return 256;
bool IsWave32 = STI->getFeatureBits().test(FeatureWavefrontSize32);
- if (STI->getFeatureBits().test(FeatureGFX11FullVGPRs))
+ if (STI->getFeatureBits().test(Feature1_5xVGPRs))
return IsWave32 ? 1536 : 768;
return IsWave32 ? 1024 : 512;
}
@@ -1215,47 +1221,6 @@ void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
}
}
-amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
- const MCSubtargetInfo *STI) {
- IsaVersion Version = getIsaVersion(STI->getCPU());
-
- amdhsa::kernel_descriptor_t KD;
- memset(&KD, 0, sizeof(KD));
-
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
- amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
- if (Version.Major >= 12) {
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN, 0);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX12_PLUS_DISABLE_PERF, 0);
- } else {
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP, 1);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE, 1);
- }
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
- amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
- if (Version.Major >= 10) {
- AMDHSA_BITS_SET(KD.kernel_code_properties,
- amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
- STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE,
- STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
- amdhsa::COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED, 1);
- }
- if (AMDGPU::isGFX90A(*STI)) {
- AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
- amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
- STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
- }
- return KD;
-}
-
bool isGroupSegment(const GlobalValue *GV) {
return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 29ac402d9535..943588fe701c 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -34,10 +34,6 @@ class StringRef;
class Triple;
class raw_ostream;
-namespace amdhsa {
-struct kernel_descriptor_t;
-}
-
namespace AMDGPU {
struct IsaVersion;
@@ -526,6 +522,9 @@ LLVM_READONLY
bool getMUBUFIsBufferInv(unsigned Opc);
LLVM_READONLY
+bool getMUBUFTfe(unsigned Opc);
+
+LLVM_READONLY
bool getSMEMIsBuffer(unsigned Opc);
LLVM_READONLY
@@ -852,9 +851,6 @@ unsigned mapWMMA3AddrTo2AddrOpcode(unsigned Opc);
void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
const MCSubtargetInfo *STI);
-amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
- const MCSubtargetInfo *STI);
-
bool isGroupSegment(const GlobalValue *GV);
bool isGlobalSegment(const GlobalValue *GV);
bool isReadOnlySegment(const GlobalValue *GV);
diff --git a/llvm/lib/Target/AMDGPU/VINTERPInstructions.td b/llvm/lib/Target/AMDGPU/VINTERPInstructions.td
index 77063e2b70f6..1f7bffb26a60 100644
--- a/llvm/lib/Target/AMDGPU/VINTERPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VINTERPInstructions.td
@@ -173,6 +173,12 @@ defm : VInterpF16Pat<int_amdgcn_interp_inreg_p10_f16,
defm : VInterpF16Pat<int_amdgcn_interp_inreg_p2_f16,
V_INTERP_P2_F16_F32_inreg, f16,
[VINTERPModsHi, VINTERPMods, VINTERPMods]>;
+defm : VInterpF16Pat<int_amdgcn_interp_p10_rtz_f16,
+ V_INTERP_P10_RTZ_F16_F32_inreg, f32,
+ [VINTERPModsHi, VINTERPMods, VINTERPModsHi]>;
+defm : VInterpF16Pat<int_amdgcn_interp_p2_rtz_f16,
+ V_INTERP_P2_RTZ_F16_F32_inreg, f16,
+ [VINTERPModsHi, VINTERPMods, VINTERPMods]>;
//===----------------------------------------------------------------------===//
// VINTERP Real Instructions
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 9cfdb15a0f43..028db9d17e30 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -72,15 +72,6 @@
using namespace llvm;
-namespace llvm {
-struct ARMInstrTable {
- MCInstrDesc Insts[4445];
- MCOperandInfo OperandInfo[3026];
- MCPhysReg ImplicitOps[130];
-};
-extern const ARMInstrTable ARMDescs;
-} // end namespace llvm
-
namespace {
class ARMOperand;
@@ -360,11 +351,6 @@ class ARMAsmParser : public MCTargetAsmParser {
ITState.CurPosition = ~0U;
}
- // Return the low-subreg of a given Q register.
- unsigned getDRegFromQReg(unsigned QReg) const {
- return MRI->getSubReg(QReg, ARM::dsub_0);
- }
-
// Get the condition code corresponding to the current IT block slot.
ARMCC::CondCodes currentITCond() {
unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
@@ -586,9 +572,6 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasV8_1MMainline() const {
return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
}
- bool hasMVE() const {
- return getSTI().hasFeature(ARM::HasMVEIntegerOps);
- }
bool hasMVEFloat() const {
return getSTI().hasFeature(ARM::HasMVEFloatOps);
}
@@ -768,6 +751,19 @@ public:
void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
void onLabelParsed(MCSymbol *Symbol) override;
+
+ const MCInstrDesc &getInstrDesc(unsigned int Opcode) const {
+ return MII.get(Opcode);
+ }
+
+ bool hasMVE() const { return getSTI().hasFeature(ARM::HasMVEIntegerOps); }
+
+ // Return the low-subreg of a given Q register.
+ unsigned getDRegFromQReg(unsigned QReg) const {
+ return MRI->getSubReg(QReg, ARM::dsub_0);
+ }
+
+ const MCRegisterInfo *getMRI() const { return MRI; }
};
/// ARMOperand - Instances of this class represent a parsed ARM machine
@@ -814,6 +810,8 @@ class ARMOperand : public MCParsedAsmOperand {
SMLoc StartLoc, EndLoc, AlignmentLoc;
SmallVector<unsigned, 8> Registers;
+ ARMAsmParser *Parser;
+
struct CCOp {
ARMCC::CondCodes Val;
};
@@ -964,7 +962,7 @@ class ARMOperand : public MCParsedAsmOperand {
};
public:
- ARMOperand(KindTy K) : Kind(K) {}
+ ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(K), Parser(&Parser) {}
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
@@ -1002,7 +1000,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
return Reg.RegNum;
}
@@ -2043,6 +2041,11 @@ public:
bool isProcIFlags() const { return Kind == k_ProcIFlags; }
// NEON operands.
+ bool isAnyVectorList() const {
+ return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
+ Kind == k_VectorListIndexed;
+ }
+
bool isVectorList() const { return Kind == k_VectorList; }
bool isSingleSpacedVectorList() const {
@@ -2054,6 +2057,9 @@ public:
}
bool isVecListOneD() const {
+ // We convert a single D reg to a list containing a D reg
+ if (isDReg() && !Parser->hasMVE())
+ return true;
if (!isSingleSpacedVectorList()) return false;
return VectorList.Count == 1;
}
@@ -2065,6 +2071,10 @@ public:
}
bool isVecListDPair() const {
+ // We convert a single Q reg to a list with the two corresponding D
+ // registers
+ if (isQReg() && !Parser->hasMVE())
+ return true;
if (!isSingleSpacedVectorList()) return false;
return (ARMMCRegisterClasses[ARM::DPairRegClassID]
.contains(VectorList.RegNum));
@@ -2542,8 +2552,7 @@ public:
RegNum = 0;
} else {
unsigned NextOpIndex = Inst.getNumOperands();
- const MCInstrDesc &MCID =
- ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
+ auto &MCID = Parser->getInstrDesc(Inst.getOpcode());
int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
assert(TiedOp >= 0 &&
"Inactive register in vpred_r is not tied to an output!");
@@ -3378,7 +3387,21 @@ public:
void addVecListOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
+
+ if (isAnyVectorList())
+ Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
+ else if (isDReg() && !Parser->hasMVE()) {
+ Inst.addOperand(MCOperand::createReg(Reg.RegNum));
+ } else if (isQReg() && !Parser->hasMVE()) {
+ auto DPair = Parser->getDRegFromQReg(Reg.RegNum);
+ DPair = Parser->getMRI()->getMatchingSuperReg(
+ DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
+ Inst.addOperand(MCOperand::createReg(DPair));
+ } else {
+ LLVM_DEBUG(dbgs() << "TYPE: " << Kind << "\n");
+ llvm_unreachable(
+ "attempted to add a vector list register with wrong type!");
+ }
}
void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
@@ -3607,67 +3630,72 @@ public:
void print(raw_ostream &OS) const override;
- static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
+ static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
Op->ITMask.Mask = Mask;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
- SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_CondCode);
+ static std::unique_ptr<ARMOperand>
+ CreateCondCode(ARMCC::CondCodes CC, SMLoc S, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
Op->CC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
- SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_VPTPred);
+ static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
Op->VCC.Val = CC;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
+ static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
+ static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
Op->Cop.Val = CopVal;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
- SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
+ static std::unique_ptr<ARMOperand>
+ CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
Op->Cop.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_CCOut);
+ static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_Token);
+ static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
Op->Tok.Data = Str.data();
Op->Tok.Length = Str.size();
Op->StartLoc = S;
@@ -3676,8 +3704,8 @@ public:
}
static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
- SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_Register);
+ SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
Op->Reg.RegNum = RegNum;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -3686,9 +3714,9 @@ public:
static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
- unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
- SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
+ unsigned ShiftReg, unsigned ShiftImm, SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
Op->RegShiftedReg.ShiftTy = ShTy;
Op->RegShiftedReg.SrcReg = SrcReg;
Op->RegShiftedReg.ShiftReg = ShiftReg;
@@ -3700,8 +3728,9 @@ public:
static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
- unsigned ShiftImm, SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
+ unsigned ShiftImm, SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
Op->RegShiftedImm.ShiftTy = ShTy;
Op->RegShiftedImm.SrcReg = SrcReg;
Op->RegShiftedImm.ShiftImm = ShiftImm;
@@ -3711,8 +3740,9 @@ public:
}
static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
- SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
+ SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
Op->ShifterImm.isASR = isASR;
Op->ShifterImm.Imm = Imm;
Op->StartLoc = S;
@@ -3720,9 +3750,9 @@ public:
return Op;
}
- static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
- SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
+ static std::unique_ptr<ARMOperand>
+ CreateRotImm(unsigned Imm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
Op->RotImm.Imm = Imm;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -3730,8 +3760,9 @@ public:
}
static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
- SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
+ SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
Op->ModImm.Bits = Bits;
Op->ModImm.Rot = Rot;
Op->StartLoc = S;
@@ -3740,17 +3771,20 @@ public:
}
static std::unique_ptr<ARMOperand>
- CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
+ CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static std::unique_ptr<ARMOperand>
- CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
+ static std::unique_ptr<ARMOperand> CreateBitfield(unsigned LSB,
+ unsigned Width, SMLoc S,
+ SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
Op->Bitfield.LSB = LSB;
Op->Bitfield.Width = Width;
Op->StartLoc = S;
@@ -3760,7 +3794,7 @@ public:
static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
- SMLoc StartLoc, SMLoc EndLoc) {
+ SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
assert(Regs.size() > 0 && "RegList contains no registers?");
KindTy Kind = k_RegisterList;
@@ -3783,7 +3817,7 @@ public:
assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
- auto Op = std::make_unique<ARMOperand>(Kind);
+ auto Op = std::make_unique<ARMOperand>(Kind, Parser);
for (const auto &P : Regs)
Op->Registers.push_back(P.second);
@@ -3792,11 +3826,10 @@ public:
return Op;
}
- static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
- unsigned Count,
- bool isDoubleSpaced,
- SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_VectorList);
+ static std::unique_ptr<ARMOperand>
+ CreateVectorList(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
+ SMLoc S, SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
@@ -3807,8 +3840,8 @@ public:
static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
- SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
+ SMLoc S, SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.isDoubleSpaced = isDoubleSpaced;
@@ -3819,8 +3852,9 @@ public:
static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
- bool isDoubleSpaced, SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
+ bool isDoubleSpaced, SMLoc S, SMLoc E,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
Op->VectorList.RegNum = RegNum;
Op->VectorList.Count = Count;
Op->VectorList.LaneIndex = Index;
@@ -3830,9 +3864,10 @@ public:
return Op;
}
- static std::unique_ptr<ARMOperand>
- CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
- auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
+ static std::unique_ptr<ARMOperand> CreateVectorIndex(unsigned Idx, SMLoc S,
+ SMLoc E, MCContext &Ctx,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
Op->VectorIndex.Val = Idx;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -3840,8 +3875,8 @@ public:
}
static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
- SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_Immediate);
+ SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
Op->Imm.Val = Val;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -3851,8 +3886,9 @@ public:
static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
- bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
- auto Op = std::make_unique<ARMOperand>(k_Memory);
+ bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser,
+ SMLoc AlignmentLoc = SMLoc()) {
+ auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
Op->Memory.BaseRegNum = BaseRegNum;
Op->Memory.OffsetImm = OffsetImm;
Op->Memory.OffsetRegNum = OffsetRegNum;
@@ -3868,8 +3904,8 @@ public:
static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
- unsigned ShiftImm, SMLoc S, SMLoc E) {
- auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
+ unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
Op->PostIdxReg.RegNum = RegNum;
Op->PostIdxReg.isAdd = isAdd;
Op->PostIdxReg.ShiftTy = ShiftTy;
@@ -3879,9 +3915,9 @@ public:
return Op;
}
- static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
- SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
+ static std::unique_ptr<ARMOperand>
+ CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
Op->MBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
@@ -3889,8 +3925,9 @@ public:
}
static std::unique_ptr<ARMOperand>
- CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
+ CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
Op->ISBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
@@ -3898,33 +3935,36 @@ public:
}
static std::unique_ptr<ARMOperand>
- CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
+ CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
Op->TSBOpt.Val = Opt;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
- SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
+ static std::unique_ptr<ARMOperand>
+ CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S, ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
Op->IFlags.Val = IFlags;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_MSRMask);
+ static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
Op->MMask.Val = MMask;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
- static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
- auto Op = std::make_unique<ARMOperand>(k_BankedReg);
+ static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S,
+ ARMAsmParser &Parser) {
+ auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
Op->BankedReg.Val = Reg;
Op->StartLoc = S;
Op->EndLoc = S;
@@ -4328,12 +4368,11 @@ int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
}
if (ShiftReg && ShiftTy != ARM_AM::rrx)
- Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
- ShiftReg, Imm,
- S, EndLoc));
+ Operands.push_back(ARMOperand::CreateShiftedRegister(
+ ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *this));
else
Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
- S, EndLoc));
+ S, EndLoc, *this));
return 0;
}
@@ -4352,12 +4391,13 @@ bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
if (RegNo == -1)
return true;
- Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
+ Operands.push_back(
+ ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc, *this));
const AsmToken &ExclaimTok = Parser.getTok();
if (ExclaimTok.is(AsmToken::Exclaim)) {
Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
- ExclaimTok.getLoc()));
+ ExclaimTok.getLoc(), *this));
Parser.Lex(); // Eat exclaim token
return false;
}
@@ -4382,9 +4422,8 @@ bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
SMLoc E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat right bracket token.
- Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
- SIdx, E,
- getContext()));
+ Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), SIdx, E,
+ getContext(), *this));
}
return false;
@@ -4451,7 +4490,8 @@ ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
return ParseStatus::NoMatch;
Parser.Lex(); // Eat the token.
- Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
+ Operands.push_back(
+ ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S, *this));
return ParseStatus::Success;
}
@@ -4473,7 +4513,7 @@ ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
return ParseStatus::NoMatch;
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
+ Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *this));
return ParseStatus::Success;
}
@@ -4492,7 +4532,7 @@ ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
return ParseStatus::NoMatch;
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
+ Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S, *this));
return ParseStatus::Success;
}
@@ -4523,7 +4563,7 @@ ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
SMLoc E = Parser.getTok().getEndLoc();
Parser.Lex(); // Eat the '}'
- Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
+ Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E, *this));
return ParseStatus::Success;
}
@@ -4726,11 +4766,12 @@ bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
Parser.Lex(); // Eat '}' token.
// Push the register list operand.
- Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
+ Operands.push_back(ARMOperand::CreateRegList(Registers, S, E, *this));
// The ARM system instruction variants for LDM/STM have a '^' token here.
if (Parser.getTok().is(AsmToken::Caret)) {
- Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateToken("^", Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat '^' token.
}
@@ -4803,16 +4844,15 @@ ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
return Res;
switch (LaneKind) {
case NoLanes:
- Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
+ Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
break;
case AllLanes:
- Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
- S, E));
+ Operands.push_back(
+ ARMOperand::CreateVectorListAllLanes(Reg, 1, false, S, E, *this));
break;
case IndexedLane:
- Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
- LaneIndex,
- false, S, E));
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(
+ Reg, 1, LaneIndex, false, S, E, *this));
break;
}
return ParseStatus::Success;
@@ -4824,23 +4864,22 @@ ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
return Res;
switch (LaneKind) {
case NoLanes:
- Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
+ Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
break;
case AllLanes:
Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
&ARMMCRegisterClasses[ARM::DPairRegClassID]);
- Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
- S, E));
+ Operands.push_back(
+ ARMOperand::CreateVectorListAllLanes(Reg, 2, false, S, E, *this));
break;
case IndexedLane:
- Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
- LaneIndex,
- false, S, E));
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(
+ Reg, 2, LaneIndex, false, S, E, *this));
break;
}
return ParseStatus::Success;
}
- Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
+ Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
return ParseStatus::Success;
}
@@ -4994,14 +5033,12 @@ ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
}
auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
ARMOperand::CreateVectorListAllLanes);
- Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
+ Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E, *this));
break;
}
case IndexedLane:
- Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
- LaneIndex,
- (Spacing == 2),
- S, E));
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(
+ FirstReg, Count, LaneIndex, (Spacing == 2), S, E, *this));
break;
}
return ParseStatus::Success;
@@ -5068,7 +5105,8 @@ ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
} else
return ParseStatus::Failure;
- Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
+ Operands.push_back(
+ ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S, *this));
return ParseStatus::Success;
}
@@ -5086,7 +5124,8 @@ ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
+ Operands.push_back(
+ ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S, *this));
return ParseStatus::Success;
}
@@ -5131,7 +5170,7 @@ ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
return ParseStatus::Failure;
Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
- (ARM_ISB::InstSyncBOpt)Opt, S));
+ (ARM_ISB::InstSyncBOpt)Opt, S, *this));
return ParseStatus::Success;
}
@@ -5165,7 +5204,8 @@ ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
}
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
+ Operands.push_back(
+ ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S, *this));
return ParseStatus::Success;
}
@@ -5186,7 +5226,7 @@ ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
}
unsigned SYSmvalue = Val & 0xFF;
Parser.Lex();
- Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
+ Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
return ParseStatus::Success;
}
@@ -5202,7 +5242,7 @@ ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
+ Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
return ParseStatus::Success;
}
@@ -5265,7 +5305,7 @@ ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
FlagsVal |= 16;
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
+ Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *this));
return ParseStatus::Success;
}
@@ -5289,7 +5329,7 @@ ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
unsigned Encoding = TheReg->Encoding;
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
+ Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S, *this));
return ParseStatus::Success;
}
@@ -5331,7 +5371,7 @@ ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands,
if (Val < Low || Val > High)
return Error(Loc, "immediate value out of range");
- Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
+ Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *this));
return ParseStatus::Success;
}
@@ -5350,9 +5390,8 @@ ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
if (Val == -1)
return Error(S, "'be' or 'le' operand expected");
- Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
- getContext()),
- S, Tok.getEndLoc()));
+ Operands.push_back(ARMOperand::CreateImm(
+ MCConstantExpr::create(Val, getContext()), S, Tok.getEndLoc(), *this));
return ParseStatus::Success;
}
@@ -5407,7 +5446,8 @@ ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
}
- Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
+ Operands.push_back(
+ ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *this));
return ParseStatus::Success;
}
@@ -5448,7 +5488,7 @@ ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
- Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
+ Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *this));
return ParseStatus::Success;
}
@@ -5498,9 +5538,8 @@ ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
int Enc = ARM_AM::getSOImmVal(Imm1);
if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
// We have a match!
- Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
- (Enc & 0xF00) >> 7,
- Sx1, Ex1));
+ Operands.push_back(ARMOperand::CreateModImm(
+ (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *this));
return ParseStatus::Success;
}
@@ -5511,13 +5550,13 @@ ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
// instruction with a mod_imm operand. The alias is defined such that the
// parser method is shared, that's why we have to do this here.
if (Parser.getTok().is(AsmToken::EndOfStatement)) {
- Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
+ Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
return ParseStatus::Success;
}
} else {
// Operands like #(l1 - l2) can only be evaluated at a later stage (via an
// MCFixup). Fallback to a plain immediate.
- Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
+ Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
return ParseStatus::Success;
}
@@ -5551,7 +5590,7 @@ ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
Imm2 = CE->getValue();
if (!(Imm2 & ~0x1E)) {
// We have a match!
- Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
+ Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *this));
return ParseStatus::Success;
}
return Error(Sx2,
@@ -5606,7 +5645,7 @@ ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
if (Width < 1 || Width > 32 - LSB)
return Error(E, "'width' operand must be in the range [1,32-lsb]");
- Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
+ Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *this));
return ParseStatus::Success;
}
@@ -5653,8 +5692,8 @@ ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
E = Parser.getTok().getLoc();
}
- Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
- ShiftImm, S, E));
+ Operands.push_back(
+ ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, ShiftImm, S, E, *this));
return ParseStatus::Success;
}
@@ -5695,8 +5734,8 @@ ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
if (isNegative && Val == 0)
Val = std::numeric_limits<int32_t>::min();
- Operands.push_back(
- ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
+ Operands.push_back(ARMOperand::CreateImm(
+ MCConstantExpr::create(Val, getContext()), S, E, *this));
return ParseStatus::Success;
}
@@ -5720,8 +5759,8 @@ ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
return Error(Tok.getLoc(), "register expected");
}
- Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
- 0, S, Tok.getEndLoc()));
+ Operands.push_back(ARMOperand::CreatePostIdxReg(
+ Reg, isAdd, ARM_AM::no_shift, 0, S, Tok.getEndLoc(), *this));
return ParseStatus::Success;
}
@@ -5780,7 +5819,8 @@ void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
if (CondOutI != 0) {
((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
} else {
- ARMOperand Op = *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc());
+ ARMOperand Op =
+ *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc(), *this);
Op.addCCOutOperands(Inst, 1);
}
// Rn
@@ -5792,8 +5832,8 @@ void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
if (CondI != 0) {
((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
} else {
- ARMOperand Op =
- *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
+ ARMOperand Op = *ARMOperand::CreateCondCode(
+ llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
Op.addCondCodeOperands(Inst, 2);
}
}
@@ -5849,8 +5889,8 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
if (CondI != 0) {
((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
} else {
- ARMOperand Op =
- *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
+ ARMOperand Op = *ARMOperand::CreateCondCode(
+ llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
Op.addCondCodeOperands(Inst, 2);
}
}
@@ -5879,7 +5919,7 @@ void ARMAsmParser::cvtMVEVMOVQtoDReg(
.addCondCodeOperands(Inst, 2); // condition code
} else {
ARMOperand Op =
- *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc());
+ *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc(), *this);
Op.addCondCodeOperands(Inst, 2);
}
}
@@ -5909,14 +5949,14 @@ bool ARMAsmParser::parseMemory(OperandVector &Operands) {
E = Tok.getEndLoc();
Parser.Lex(); // Eat right bracket token.
- Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
- ARM_AM::no_shift, 0, 0, false,
- S, E));
+ Operands.push_back(ARMOperand::CreateMem(
+ BaseRegNum, nullptr, 0, ARM_AM::no_shift, 0, 0, false, S, E, *this));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand. It's rather odd, but syntactically valid.
if (Parser.getTok().is(AsmToken::Exclaim)) {
- Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat the '!'.
}
@@ -5967,13 +6007,14 @@ bool ARMAsmParser::parseMemory(OperandVector &Operands) {
// Don't worry about range checking the value here. That's handled by
// the is*() predicates.
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
- ARM_AM::no_shift, 0, Align,
- false, S, E, AlignmentLoc));
+ ARM_AM::no_shift, 0, Align, false,
+ S, E, *this, AlignmentLoc));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
- Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat the '!'.
}
@@ -6009,8 +6050,9 @@ bool ARMAsmParser::parseMemory(OperandVector &Operands) {
AdjustedOffset = CE;
} else
AdjustedOffset = Offset;
- Operands.push_back(ARMOperand::CreateMem(
- BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
+ Operands.push_back(ARMOperand::CreateMem(BaseRegNum, AdjustedOffset, 0,
+ ARM_AM::no_shift, 0, 0, false, S,
+ E, *this));
// Now we should have the closing ']'
if (Parser.getTok().isNot(AsmToken::RBrac))
@@ -6021,7 +6063,8 @@ bool ARMAsmParser::parseMemory(OperandVector &Operands) {
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
- Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat the '!'.
}
@@ -6060,12 +6103,13 @@ bool ARMAsmParser::parseMemory(OperandVector &Operands) {
Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
ShiftType, ShiftImm, 0, isNegative,
- S, E));
+ S, E, *this));
// If there's a pre-indexing writeback marker, '!', just add it as a token
// operand.
if (Parser.getTok().is(AsmToken::Exclaim)) {
- Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat the '!'.
}
@@ -6203,9 +6247,9 @@ ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
// If we had a '-' in front, toggle the sign bit.
IntVal ^= (uint64_t)isNegative << 31;
Parser.Lex(); // Eat the token.
- Operands.push_back(ARMOperand::CreateImm(
- MCConstantExpr::create(IntVal, getContext()),
- S, Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateImm(MCConstantExpr::create(IntVal, getContext()), S,
+ Parser.getTok().getLoc(), *this));
return ParseStatus::Success;
}
// Also handle plain integers. Instructions which allow floating point
@@ -6218,9 +6262,9 @@ ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
float RealVal = ARM_AM::getFPImmFloat(Val);
Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
- Operands.push_back(ARMOperand::CreateImm(
- MCConstantExpr::create(Val, getContext()), S,
- Parser.getTok().getLoc()));
+ Operands.push_back(
+ ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S,
+ Parser.getTok().getLoc(), *this));
return ParseStatus::Success;
}
@@ -6266,7 +6310,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
S = Parser.getTok().getLoc();
Parser.Lex();
- Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
+ Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S, *this));
return false;
}
}
@@ -6286,7 +6330,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
if (getParser().parseExpression(IdVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
+ Operands.push_back(ARMOperand::CreateImm(IdVal, S, E, *this));
return false;
}
case AsmToken::LBrac:
@@ -6330,14 +6374,14 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
getContext());
}
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
+ Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E, *this));
// There can be a trailing '!' on operands that we want as a separate
// '!' Token operand. Handle that here. For example, the compatibility
// alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
if (Parser.getTok().is(AsmToken::Exclaim)) {
- Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
- Parser.getTok().getLoc()));
+ Operands.push_back(ARMOperand::CreateToken(
+ Parser.getTok().getString(), Parser.getTok().getLoc(), *this));
Parser.Lex(); // Eat exclaim token
}
return false;
@@ -6362,7 +6406,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
getContext());
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
+ Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E, *this));
return false;
}
case AsmToken::Equal: {
@@ -6377,7 +6421,8 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
// execute-only: we assume that assembly programmers know what they are
// doing and allow literal pool creation here
- Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
+ Operands.push_back(
+ ARMOperand::CreateConstantPoolImm(SubExprVal, S, E, *this));
return false;
}
}
@@ -6865,8 +6910,9 @@ static void applyMnemonicAliases(StringRef &Mnemonic,
const FeatureBitset &Features,
unsigned VariantID);
-// The GNU assembler has aliases of ldrd and strd with the second register
-// omitted. We don't have a way to do that in tablegen, so fix it up here.
+// The GNU assembler has aliases of ldrd, strd, ldrexd, strexd, ldaexd, and
+// stlexd with the second register omitted. We don't have a way to do that in
+// tablegen, so fix it up here.
//
// We have to be careful to not emit an invalid Rt2 here, because the rest of
// the assembly parser could then generate confusing diagnostics refering to
@@ -6876,13 +6922,19 @@ static void applyMnemonicAliases(StringRef &Mnemonic,
void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
OperandVector &Operands,
unsigned MnemonicOpsEndInd) {
- if (Mnemonic != "ldrd" && Mnemonic != "strd")
+ if (Mnemonic != "ldrd" && Mnemonic != "strd" && Mnemonic != "ldrexd" &&
+ Mnemonic != "strexd" && Mnemonic != "ldaexd" && Mnemonic != "stlexd")
return;
- if (Operands.size() < MnemonicOpsEndInd + 2)
+
+ unsigned IdX = Mnemonic == "strexd" || Mnemonic == "stlexd"
+ ? MnemonicOpsEndInd + 1
+ : MnemonicOpsEndInd;
+
+ if (Operands.size() < IdX + 2)
return;
- ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
- ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
+ ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[IdX]);
+ ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[IdX + 1]);
if (!Op2.isReg())
return;
@@ -6906,9 +6958,9 @@ void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
(PairedReg == ARM::SP && !hasV8Ops()))
return;
- Operands.insert(
- Operands.begin() + MnemonicOpsEndInd + 1,
- ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
+ Operands.insert(Operands.begin() + IdX + 1,
+ ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
+ Op2.getEndLoc(), *this));
}
// Dual-register instruction have the following syntax:
@@ -6968,7 +7020,7 @@ bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
Operands.erase(Operands.begin() + MnemonicOpsEndInd + 2);
Operands[MnemonicOpsEndInd + 1] =
- ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
+ ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc(), *this);
return false;
}
@@ -7041,7 +7093,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
return Error(NameLoc, "conditional execution not supported in Thumb1");
}
- Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
+ Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
// Handle the mask for IT and VPT instructions. In ARMOperand and
// MCOperand, this is stored in a format independent of the
@@ -7073,7 +7125,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
if (Pos == 'e')
Mask |= 8;
}
- Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
+ Operands.push_back(ARMOperand::CreateITMask(Mask, Loc, *this));
}
// FIXME: This is all a pretty gross hack. We should automatically handle
@@ -7113,8 +7165,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Add the carry setting operand, if necessary.
if (CanAcceptCarrySet && CarrySetting) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
- Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
- Loc));
+ Operands.push_back(
+ ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, Loc, *this));
}
// Add the predication code operand, if necessary.
@@ -7122,7 +7174,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
CarrySetting);
Operands.push_back(ARMOperand::CreateCondCode(
- ARMCC::CondCodes(PredicationCode), Loc));
+ ARMCC::CondCodes(PredicationCode), Loc, *this));
}
// Add the VPT predication code operand, if necessary.
@@ -7134,14 +7186,14 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
CarrySetting);
Operands.push_back(ARMOperand::CreateVPTPred(
- ARMVCC::VPTCodes(VPTPredicationCode), Loc));
+ ARMVCC::VPTCodes(VPTPredicationCode), Loc, *this));
}
// Add the processor imod operand, if necessary.
if (ProcessorIMod) {
Operands.push_back(ARMOperand::CreateImm(
- MCConstantExpr::create(ProcessorIMod, getContext()),
- NameLoc, NameLoc));
+ MCConstantExpr::create(ProcessorIMod, getContext()), NameLoc, NameLoc,
+ *this));
} else if (Mnemonic == "cps" && isMClass()) {
return Error(NameLoc, "instruction 'cps' requires effect for M-class");
}
@@ -7170,7 +7222,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// so discard it to avoid errors that can be caused by the matcher.
if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
- Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
+ Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc, *this));
}
}
@@ -7229,9 +7281,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
Mnemonic.size() - 1 + CarrySetting);
Operands.insert(Operands.begin(),
- ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
- Operands.insert(Operands.begin(),
- ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
+ ARMOperand::CreateVPTPred(ARMVCC::None, PLoc, *this));
+ Operands.insert(Operands.begin(), ARMOperand::CreateToken(
+ StringRef("vmovlt"), MLoc, *this));
} else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
MnemonicOpsEndInd)) {
@@ -7245,9 +7297,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
Mnemonic.size() - 1 + CarrySetting);
Operands.insert(Operands.begin(),
- ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
+ ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc, *this));
Operands.insert(Operands.begin(),
- ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
+ ARMOperand::CreateToken(StringRef("vcvtn"), MLoc, *this));
} else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
MnemonicOpsEndInd)) {
@@ -7257,8 +7309,8 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
removeCondCode(Operands, MnemonicOpsEndInd);
Operands.erase(Operands.begin());
SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
- Operands.insert(Operands.begin(),
- ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
+ Operands.insert(Operands.begin(), ARMOperand::CreateToken(
+ StringRef("vmullt"), MLoc, *this));
} else if (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
!Mnemonic.starts_with("vcvtn") &&
!Mnemonic.starts_with("vcvtp") &&
@@ -7284,7 +7336,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Mnemonic = Mnemonic.substr(0, 4);
Operands.insert(Operands.begin(),
- ARMOperand::CreateToken(Mnemonic, MLoc));
+ ARMOperand::CreateToken(Mnemonic, MLoc, *this));
}
}
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
@@ -7292,7 +7344,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Add VPTPred
Operands.insert(Operands.begin() + 1,
ARMOperand::CreateVPTPred(
- ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
+ ARMVCC::VPTCodes(VPTPredicationCode), PLoc, *this));
++MnemonicOpsEndInd;
}
} else if (CanAcceptVPTPredicationCode) {
@@ -7322,7 +7374,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Mnemonic = Name.slice(0, Mnemonic.size() + 1);
Operands.erase(Operands.begin());
Operands.insert(Operands.begin(),
- ARMOperand::CreateToken(Mnemonic, NameLoc));
+ ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
}
}
@@ -7336,6 +7388,9 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
removeCondCode(Operands, MnemonicOpsEndInd);
+ // GNU Assembler extension (compatibility).
+ fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
+
// Adjust operands of ldrexd/strexd to MCK_GPRPair.
// ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
// a single GPRPair reg operand is used in the .td file to replace the two
@@ -7343,40 +7398,42 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// automatically
// expressed as a GPRPair, so we have to manually merge them.
// FIXME: We would really like to be able to tablegen'erate this.
- if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 &&
+ bool IsLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
+ if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
(Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
Mnemonic == "stlexd")) {
- bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
- unsigned Idx = isLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
+ unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
- // Adjust only if Op1 and Op2 are GPRs.
- if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
- MRC.contains(Op2.getReg())) {
+ // Adjust only if Op1 is a GPR.
+ if (Op1.isReg() && MRC.contains(Op1.getReg())) {
unsigned Reg1 = Op1.getReg();
- unsigned Reg2 = Op2.getReg();
unsigned Rt = MRI->getEncodingValue(Reg1);
+ unsigned Reg2 = Op2.getReg();
unsigned Rt2 = MRI->getEncodingValue(Reg2);
-
- // Rt2 must be Rt + 1 and Rt must be even.
- if (Rt + 1 != Rt2 || (Rt & 1)) {
+ // Rt2 must be Rt + 1.
+ if (Rt + 1 != Rt2)
return Error(Op2.getStartLoc(),
- isLoad ? "destination operands must be sequential"
+ IsLoad ? "destination operands must be sequential"
: "source operands must be sequential");
- }
+
+ // Rt must be even
+ if (Rt & 1)
+ return Error(
+ Op1.getStartLoc(),
+ IsLoad ? "destination operands must start start at an even register"
+ : "source operands must start start at an even register");
+
unsigned NewReg = MRI->getMatchingSuperReg(
Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
- Operands[Idx] =
- ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
+ Operands[Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
+ Op2.getEndLoc(), *this);
Operands.erase(Operands.begin() + Idx + 1);
}
}
- // GNU Assembler extension (compatibility).
- fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
-
// FIXME: As said above, this is all a pretty gross hack. This instruction
// does not fit with other "subs" and tblgen.
// Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
@@ -7391,7 +7448,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
ARM::LR &&
static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
- Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
+ Operands.front() = ARMOperand::CreateToken(Name, NameLoc, *this);
removeCCOut(Operands, MnemonicOpsEndInd);
}
return false;
@@ -12990,29 +13047,6 @@ unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
return Match_Success;
return Match_rGPR;
- // Note: This mutates the operand which could cause issues for future
- // matches if this one fails later.
- // It would be better to do this in addVecList but as this doesn't have access
- // to MRI this isn't possible.
- // If trying to match a VecListDPair with a Q register, convert Q to list.
- case MCK_VecListDPair:
- if (Op.isQReg() && !hasMVE()) {
- auto DPair = getDRegFromQReg(Op.getReg());
- DPair = MRI->getMatchingSuperReg(
- DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
- Op.setVecListDPair(DPair);
- return Match_Success;
- }
- return Match_InvalidOperand;
- // Note: This mutates the operand (see above).
- // If trying to match a VecListDPair with a D register, convert D singleton
- // list.
- case MCK_VecListOneD:
- if (Op.isDReg() && !hasMVE()) {
- Op.setVecListOneD(Op.getReg());
- return Match_Success;
- }
- return Match_InvalidOperand;
}
return Match_InvalidOperand;
}
@@ -13062,13 +13096,13 @@ bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
}
std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
- return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc());
+ return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc(), *this);
}
std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
- return ARMOperand::CreateCCOut(0, SMLoc());
+ return ARMOperand::CreateCCOut(0, SMLoc(), *this);
}
std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
- return ARMOperand::CreateVPTPred(ARMVCC::None, SMLoc());
+ return ARMOperand::CreateVPTPred(ARMVCC::None, SMLoc(), *this);
}
diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
index 5c113ccfdc15..e8d2cba7ee55 100644
--- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
+++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
@@ -958,6 +958,7 @@ bool MVETPAndVPTOptimisations::ReplaceConstByVPNOTs(MachineBasicBlock &MBB,
unsigned NotImm = ~Imm & 0xffff;
if (LastVPTReg != 0 && LastVPTReg != VPR && LastVPTImm == Imm) {
+ MRI->clearKillFlags(LastVPTReg);
Instr.getOperand(PIdx + 1).setReg(LastVPTReg);
if (MRI->use_empty(VPR)) {
DeadInstructions.insert(Copy);
diff --git a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index 0f4ece64bff5..047c6731333c 100644
--- a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -612,11 +612,11 @@ bool Thumb1FrameLowering::needPopSpecialFixUp(const MachineFunction &MF) const {
static void findTemporariesForLR(const BitVector &GPRsNoLRSP,
const BitVector &PopFriendly,
- const LivePhysRegs &UsedRegs, unsigned &PopReg,
+ const LiveRegUnits &UsedRegs, unsigned &PopReg,
unsigned &TmpReg, MachineRegisterInfo &MRI) {
PopReg = TmpReg = 0;
for (auto Reg : GPRsNoLRSP.set_bits()) {
- if (UsedRegs.available(MRI, Reg)) {
+ if (UsedRegs.available(Reg)) {
// Remember the first pop-friendly register and exit.
if (PopFriendly.test(Reg)) {
PopReg = Reg;
@@ -684,7 +684,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB,
// Look for a temporary register to use.
// First, compute the liveness information.
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
- LivePhysRegs UsedRegs(TRI);
+ LiveRegUnits UsedRegs(TRI);
UsedRegs.addLiveOuts(MBB);
// The semantic of pristines changed recently and now,
// the callee-saved registers that are touched in the function
@@ -710,11 +710,6 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB,
unsigned TemporaryReg = 0;
BitVector PopFriendly =
TRI.getAllocatableSet(MF, TRI.getRegClass(ARM::tGPRRegClassID));
- // R7 may be used as a frame pointer, hence marked as not generally
- // allocatable, however there's no reason to not use it as a temporary for
- // restoring LR.
- if (STI.getFramePointerReg() == ARM::R7)
- PopFriendly.set(ARM::R7);
assert(PopFriendly.any() && "No allocatable pop-friendly register?!");
// Rebuild the GPRs from the high registers because they are removed
diff --git a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index db4aa03437c6..383dfcc31117 100644
--- a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -195,7 +195,7 @@ public:
return Tok;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert((Kind == k_Register || Kind == k_Memri) && "Invalid access!");
return RegImm.Reg;
diff --git a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
index 1688355f427c..9672ed009e9b 100644
--- a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
+++ b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
@@ -148,7 +148,7 @@ public:
/// getEndLoc - Gets location of the last token of this operand
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == Register && "Invalid type access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index b8ca8ec98c06..071fe004806e 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -113,6 +113,10 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, VT, Expand);
setOperationAction(ISD::SRA_PARTS, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::SETCC, VT, Expand);
setOperationAction(ISD::SELECT, VT, Expand);
@@ -125,11 +129,6 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
STI.getHasJmp32() ? Custom : Promote);
}
- setOperationAction(ISD::CTTZ, MVT::i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
-
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
if (!STI.hasMovsx()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
diff --git a/llvm/lib/Target/BPF/BPFPassRegistry.def b/llvm/lib/Target/BPF/BPFPassRegistry.def
new file mode 100644
index 000000000000..73a8ef2f95bf
--- /dev/null
+++ b/llvm/lib/Target/BPF/BPFPassRegistry.def
@@ -0,0 +1,32 @@
+//===- BPFPassRegistry.def - Registry of BPF passes -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the
+// BPF backend.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef FUNCTION_PASS
+#define FUNCTION_PASS(NAME, CREATE_PASS)
+#endif
+FUNCTION_PASS("bpf-aspace-simplify", BPFASpaceCastSimplifyPass())
+FUNCTION_PASS("bpf-ir-peephole", BPFIRPeepholePass())
+#undef FUNCTION_PASS
+
+#ifndef FUNCTION_PASS_WITH_PARAMS
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS)
+#endif
+FUNCTION_PASS_WITH_PARAMS(
+ "bpf-preserve-static-offset", "BPFPreserveStaticOffsetPass",
+ [=](bool AllowPartial) {
+ return BPFPreserveStaticOffsetPass(AllowPartial);
+ },
+ parseBPFPreserveStaticOffsetOptions, "allow-partial")
+#undef FUNCTION_PASS_WITH_PARAMS
diff --git a/llvm/lib/Target/BPF/BPFTargetMachine.cpp b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
index 5f26bec2e390..a7bed69b0f2a 100644
--- a/llvm/lib/Target/BPF/BPFTargetMachine.cpp
+++ b/llvm/lib/Target/BPF/BPFTargetMachine.cpp
@@ -108,25 +108,16 @@ TargetPassConfig *BPFTargetMachine::createPassConfig(PassManagerBase &PM) {
return new BPFPassConfig(*this, PM);
}
+static Expected<bool> parseBPFPreserveStaticOffsetOptions(StringRef Params) {
+ return PassBuilder::parseSinglePassOption(Params, "allow-partial",
+ "BPFPreserveStaticOffsetPass");
+}
+
void BPFTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
- PB.registerPipelineParsingCallback(
- [](StringRef PassName, FunctionPassManager &FPM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "bpf-ir-peephole") {
- FPM.addPass(BPFIRPeepholePass());
- return true;
- }
- if (PassName == "bpf-preserve-static-offset") {
- FPM.addPass(BPFPreserveStaticOffsetPass(false));
- return true;
- }
- if (PassName == "bpf-aspace-simplify") {
- FPM.addPass(BPFASpaceCastSimplifyPass());
- return true;
- }
- return false;
- });
+#define GET_PASS_REGISTRY "BPFPassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
+
PB.registerPipelineStartEPCallback(
[=](ModulePassManager &MPM, OptimizationLevel) {
FunctionPassManager FPM;
diff --git a/llvm/lib/Target/BPF/CMakeLists.txt b/llvm/lib/Target/BPF/CMakeLists.txt
index cb21ed03a86c..eade4cacb710 100644
--- a/llvm/lib/Target/BPF/CMakeLists.txt
+++ b/llvm/lib/Target/BPF/CMakeLists.txt
@@ -54,6 +54,7 @@ add_llvm_target(BPFCodeGen
GlobalISel
IPO
MC
+ Passes
Scalar
SelectionDAG
Support
diff --git a/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp b/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
index 4711e58bbed6..30bd3dcefa60 100644
--- a/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
+++ b/llvm/lib/Target/CSKY/AsmParser/CSKYAsmParser.cpp
@@ -400,7 +400,7 @@ public:
/// Gets location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == Register && "Invalid type access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 36eb29d53766..cd388ed3e319 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -255,9 +255,14 @@ class DXILOpMapping<int opCode, DXILOpClass opClass,
}
// Concrete definition of DXIL Operation mapping to corresponding LLVM intrinsic
+def Abs : DXILOpMapping<6, unary, int_fabs,
+ "Returns the absolute value of the input.">;
def IsInf : DXILOpMapping<9, isSpecialFloat, int_dx_isinf,
"Determines if the specified value is infinite.",
[llvm_i1_ty, llvm_halforfloat_ty]>;
+def Cos : DXILOpMapping<12, unary, int_cos,
+ "Returns cosine(theta) for theta in radians.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
def Sin : DXILOpMapping<13, unary, int_sin,
"Returns sine(theta) for theta in radians.",
[llvm_halforfloat_ty, LLVMMatchType<0>]>;
@@ -269,14 +274,33 @@ def Frac : DXILOpMapping<22, unary, int_dx_frac,
"Returns a fraction from 0 to 1 that represents the "
"decimal part of the input.",
[llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Log2 : DXILOpMapping<23, unary, int_log2,
+ "Returns the base-2 logarithm of the specified value.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Sqrt : DXILOpMapping<24, unary, int_sqrt,
+ "Returns the square root of the specified floating-point"
+ "value, per component.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
def RSqrt : DXILOpMapping<25, unary, int_dx_rsqrt,
"Returns the reciprocal of the square root of the specified value."
"rsqrt(x) = 1 / sqrt(x).",
[llvm_halforfloat_ty, LLVMMatchType<0>]>;
-def Round : DXILOpMapping<26, unary, int_round,
+def Round : DXILOpMapping<26, unary, int_roundeven,
"Returns the input rounded to the nearest integer"
"within a floating-point type.",
[llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Floor : DXILOpMapping<27, unary, int_floor,
+ "Returns the largest integer that is less than or equal to the input.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Ceil : DXILOpMapping<28, unary, int_ceil,
+ "Returns the smallest integer that is greater than or equal to the input.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Trunc : DXILOpMapping<29, unary, int_trunc,
+ "Returns the specified value truncated to the integer component.",
+ [llvm_halforfloat_ty, LLVMMatchType<0>]>;
+def Rbits : DXILOpMapping<30, unary, int_bitreverse,
+ "Returns the specified value with its bits reversed.",
+ [llvm_anyint_ty, LLVMMatchType<0>]>;
def FMax : DXILOpMapping<35, binary, int_maxnum,
"Float maximum. FMax(a,b) = a > b ? a : b">;
def FMin : DXILOpMapping<36, binary, int_minnum,
@@ -295,6 +319,15 @@ def IMad : DXILOpMapping<48, tertiary, int_dx_imad,
"Signed integer arithmetic multiply/add operation. imad(m,a,b) = m * a + b.">;
def UMad : DXILOpMapping<49, tertiary, int_dx_umad,
"Unsigned integer arithmetic multiply/add operation. umad(m,a,b) = m * a + b.">;
+let OpTypes = !listconcat([llvm_halforfloat_ty], !listsplat(llvm_halforfloat_ty, 4)) in
+ def Dot2 : DXILOpMapping<54, dot2, int_dx_dot2,
+ "dot product of two float vectors Dot(a,b) = a[0]*b[0] + ... + a[n]*b[n] where n is between 0 and 1">;
+let OpTypes = !listconcat([llvm_halforfloat_ty], !listsplat(llvm_halforfloat_ty, 6)) in
+ def Dot3 : DXILOpMapping<55, dot3, int_dx_dot3,
+ "dot product of two float vectors Dot(a,b) = a[0]*b[0] + ... + a[n]*b[n] where n is between 0 and 2">;
+let OpTypes = !listconcat([llvm_halforfloat_ty], !listsplat(llvm_halforfloat_ty, 8)) in
+ def Dot4 : DXILOpMapping<56, dot4, int_dx_dot4,
+ "dot product of two float vectors Dot(a,b) = a[0]*b[0] + ... + a[n]*b[n] where n is between 0 and 3">;
def ThreadId : DXILOpMapping<93, threadId, int_dx_thread_id,
"Reads the thread ID">;
def GroupId : DXILOpMapping<94, groupId, int_dx_group_id,
diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
index 0db42bc0a0fb..3e2d10f5ee7a 100644
--- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
+++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
@@ -33,7 +33,11 @@ using namespace llvm;
static bool isIntrinsicExpansion(Function &F) {
switch (F.getIntrinsicID()) {
+ case Intrinsic::abs:
case Intrinsic::exp:
+ case Intrinsic::log:
+ case Intrinsic::log10:
+ case Intrinsic::pow:
case Intrinsic::dx_any:
case Intrinsic::dx_clamp:
case Intrinsic::dx_uclamp:
@@ -46,6 +50,26 @@ static bool isIntrinsicExpansion(Function &F) {
return false;
}
+static bool expandAbs(CallInst *Orig) {
+ Value *X = Orig->getOperand(0);
+ IRBuilder<> Builder(Orig->getParent());
+ Builder.SetInsertPoint(Orig);
+ Type *Ty = X->getType();
+ Type *EltTy = Ty->getScalarType();
+ Constant *Zero = Ty->isVectorTy()
+ ? ConstantVector::getSplat(
+ ElementCount::getFixed(
+ cast<FixedVectorType>(Ty)->getNumElements()),
+ ConstantInt::get(EltTy, 0))
+ : ConstantInt::get(EltTy, 0);
+ auto *V = Builder.CreateSub(Zero, X);
+ auto *MaxCall =
+ Builder.CreateIntrinsic(Ty, Intrinsic::smax, {X, V}, nullptr, "dx.max");
+ Orig->replaceAllUsesWith(MaxCall);
+ Orig->eraseFromParent();
+ return true;
+}
+
static bool expandIntegerDot(CallInst *Orig, Intrinsic::ID DotIntrinsic) {
assert(DotIntrinsic == Intrinsic::dx_sdot ||
DotIntrinsic == Intrinsic::dx_udot);
@@ -87,8 +111,8 @@ static bool expandExpIntrinsic(CallInst *Orig) {
Ty->isVectorTy() ? ConstantVector::getSplat(
ElementCount::getFixed(
cast<FixedVectorType>(Ty)->getNumElements()),
- ConstantFP::get(EltTy, numbers::log2e))
- : ConstantFP::get(EltTy, numbers::log2e);
+ ConstantFP::get(EltTy, numbers::log2ef))
+ : ConstantFP::get(EltTy, numbers::log2ef);
Value *NewX = Builder.CreateFMul(Log2eConst, X);
auto *Exp2Call =
Builder.CreateIntrinsic(Ty, Intrinsic::exp2, {NewX}, nullptr, "dx.exp2");
@@ -148,6 +172,52 @@ static bool expandLerpIntrinsic(CallInst *Orig) {
return true;
}
+static bool expandLogIntrinsic(CallInst *Orig,
+ float LogConstVal = numbers::ln2f) {
+ Value *X = Orig->getOperand(0);
+ IRBuilder<> Builder(Orig->getParent());
+ Builder.SetInsertPoint(Orig);
+ Type *Ty = X->getType();
+ Type *EltTy = Ty->getScalarType();
+ Constant *Ln2Const =
+ Ty->isVectorTy() ? ConstantVector::getSplat(
+ ElementCount::getFixed(
+ cast<FixedVectorType>(Ty)->getNumElements()),
+ ConstantFP::get(EltTy, LogConstVal))
+ : ConstantFP::get(EltTy, LogConstVal);
+ auto *Log2Call =
+ Builder.CreateIntrinsic(Ty, Intrinsic::log2, {X}, nullptr, "elt.log2");
+ Log2Call->setTailCall(Orig->isTailCall());
+ Log2Call->setAttributes(Orig->getAttributes());
+ auto *Result = Builder.CreateFMul(Ln2Const, Log2Call);
+ Orig->replaceAllUsesWith(Result);
+ Orig->eraseFromParent();
+ return true;
+}
+static bool expandLog10Intrinsic(CallInst *Orig) {
+ return expandLogIntrinsic(Orig, numbers::ln2f / numbers::ln10f);
+}
+
+static bool expandPowIntrinsic(CallInst *Orig) {
+
+ Value *X = Orig->getOperand(0);
+ Value *Y = Orig->getOperand(1);
+ Type *Ty = X->getType();
+ IRBuilder<> Builder(Orig->getParent());
+ Builder.SetInsertPoint(Orig);
+
+ auto *Log2Call =
+ Builder.CreateIntrinsic(Ty, Intrinsic::log2, {X}, nullptr, "elt.log2");
+ auto *Mul = Builder.CreateFMul(Log2Call, Y);
+ auto *Exp2Call =
+ Builder.CreateIntrinsic(Ty, Intrinsic::exp2, {Mul}, nullptr, "elt.exp2");
+ Exp2Call->setTailCall(Orig->isTailCall());
+ Exp2Call->setAttributes(Orig->getAttributes());
+ Orig->replaceAllUsesWith(Exp2Call);
+ Orig->eraseFromParent();
+ return true;
+}
+
static bool expandRcpIntrinsic(CallInst *Orig) {
Value *X = Orig->getOperand(0);
IRBuilder<> Builder(Orig->getParent());
@@ -213,8 +283,16 @@ static bool expandClampIntrinsic(CallInst *Orig, Intrinsic::ID ClampIntrinsic) {
static bool expandIntrinsic(Function &F, CallInst *Orig) {
switch (F.getIntrinsicID()) {
+ case Intrinsic::abs:
+ return expandAbs(Orig);
case Intrinsic::exp:
return expandExpIntrinsic(Orig);
+ case Intrinsic::log:
+ return expandLogIntrinsic(Orig);
+ case Intrinsic::log10:
+ return expandLog10Intrinsic(Orig);
+ case Intrinsic::pow:
+ return expandPowIntrinsic(Orig);
case Intrinsic::dx_any:
return expandAnyIntrinsic(Orig);
case Intrinsic::dx_uclamp:
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
index a1eacc2d4800..0b3982ea0f43 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
@@ -190,13 +190,13 @@ static StructType *getHandleType(LLVMContext &Ctx) {
static Type *getTypeFromParameterKind(ParameterKind Kind, Type *OverloadTy) {
auto &Ctx = OverloadTy->getContext();
switch (Kind) {
- case ParameterKind::VOID:
+ case ParameterKind::Void:
return Type::getVoidTy(Ctx);
- case ParameterKind::HALF:
+ case ParameterKind::Half:
return Type::getHalfTy(Ctx);
- case ParameterKind::FLOAT:
+ case ParameterKind::Float:
return Type::getFloatTy(Ctx);
- case ParameterKind::DOUBLE:
+ case ParameterKind::Double:
return Type::getDoubleTy(Ctx);
case ParameterKind::I1:
return Type::getInt1Ty(Ctx);
@@ -208,11 +208,11 @@ static Type *getTypeFromParameterKind(ParameterKind Kind, Type *OverloadTy) {
return Type::getInt32Ty(Ctx);
case ParameterKind::I64:
return Type::getInt64Ty(Ctx);
- case ParameterKind::OVERLOAD:
+ case ParameterKind::Overload:
return OverloadTy;
- case ParameterKind::RESOURCE_RET:
+ case ParameterKind::ResourceRet:
return getResRetType(OverloadTy, Ctx);
- case ParameterKind::DXIL_HANDLE:
+ case ParameterKind::DXILHandle:
return getHandleType(Ctx);
default:
break;
@@ -254,7 +254,7 @@ namespace dxil {
CallInst *DXILOpBuilder::createDXILOpCall(dxil::OpCode OpCode, Type *ReturnTy,
Type *OverloadTy,
- llvm::iterator_range<Use *> Args) {
+ SmallVector<Value *> Args) {
const OpCodeProperty *Prop = getOpCodeProperty(OpCode);
OverloadKind Kind = getOverloadKind(OverloadTy);
@@ -272,10 +272,8 @@ CallInst *DXILOpBuilder::createDXILOpCall(dxil::OpCode OpCode, Type *ReturnTy,
FunctionType *DXILOpFT = getDXILOpFunctionType(Prop, ReturnTy, OverloadTy);
DXILFn = M.getOrInsertFunction(DXILFnName, DXILOpFT);
}
- SmallVector<Value *> FullArgs;
- FullArgs.emplace_back(B.getInt32((int32_t)OpCode));
- FullArgs.append(Args.begin(), Args.end());
- return B.CreateCall(DXILFn, FullArgs);
+
+ return B.CreateCall(DXILFn, Args);
}
Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT) {
@@ -320,8 +318,8 @@ Type *DXILOpBuilder::getOverloadTy(dxil::OpCode OpCode, FunctionType *FT) {
auto ParamKinds = getOpCodeParameterKind(*Prop);
auto Kind = ParamKinds[Prop->OverloadParamIndex];
// For ResRet and CBufferRet, OverloadTy is in field of StructType.
- if (Kind == ParameterKind::CBUFFER_RET ||
- Kind == ParameterKind::RESOURCE_RET) {
+ if (Kind == ParameterKind::CBufferRet ||
+ Kind == ParameterKind::ResourceRet) {
auto *ST = cast<StructType>(OverloadType);
OverloadType = ST->getElementType(0);
}
diff --git a/llvm/lib/Target/DirectX/DXILOpBuilder.h b/llvm/lib/Target/DirectX/DXILOpBuilder.h
index f3abcc6e02a4..5babeae47017 100644
--- a/llvm/lib/Target/DirectX/DXILOpBuilder.h
+++ b/llvm/lib/Target/DirectX/DXILOpBuilder.h
@@ -13,7 +13,7 @@
#define LLVM_LIB_TARGET_DIRECTX_DXILOPBUILDER_H
#include "DXILConstants.h"
-#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
class Module;
@@ -35,8 +35,7 @@ public:
/// \param OverloadTy Overload type of the DXIL Op call constructed
/// \return DXIL Op call constructed
CallInst *createDXILOpCall(dxil::OpCode OpCode, Type *ReturnTy,
- Type *OverloadTy,
- llvm::iterator_range<Use *> Args);
+ Type *OverloadTy, SmallVector<Value *> Args);
Type *getOverloadTy(dxil::OpCode OpCode, FunctionType *FT);
static const char *getOpCodeName(dxil::OpCode DXILOp);
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index 3e334b0ec298..f09e322f88e1 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -30,6 +30,48 @@
using namespace llvm;
using namespace llvm::dxil;
+static bool isVectorArgExpansion(Function &F) {
+ switch (F.getIntrinsicID()) {
+ case Intrinsic::dx_dot2:
+ case Intrinsic::dx_dot3:
+ case Intrinsic::dx_dot4:
+ return true;
+ }
+ return false;
+}
+
+static SmallVector<Value *> populateOperands(Value *Arg, IRBuilder<> &Builder) {
+ SmallVector<Value *, 4> ExtractedElements;
+ auto *VecArg = dyn_cast<FixedVectorType>(Arg->getType());
+ for (unsigned I = 0; I < VecArg->getNumElements(); ++I) {
+ Value *Index = ConstantInt::get(Type::getInt32Ty(Arg->getContext()), I);
+ Value *ExtractedElement = Builder.CreateExtractElement(Arg, Index);
+ ExtractedElements.push_back(ExtractedElement);
+ }
+ return ExtractedElements;
+}
+
+static SmallVector<Value *> argVectorFlatten(CallInst *Orig,
+ IRBuilder<> &Builder) {
+ // Note: arg[NumOperands-1] is a pointer and is not needed by our flattening.
+ unsigned NumOperands = Orig->getNumOperands() - 1;
+ assert(NumOperands > 0);
+ Value *Arg0 = Orig->getOperand(0);
+ [[maybe_unused]] auto *VecArg0 = dyn_cast<FixedVectorType>(Arg0->getType());
+ assert(VecArg0);
+ SmallVector<Value *> NewOperands = populateOperands(Arg0, Builder);
+ for (unsigned I = 1; I < NumOperands; ++I) {
+ Value *Arg = Orig->getOperand(I);
+ [[maybe_unused]] auto *VecArg = dyn_cast<FixedVectorType>(Arg->getType());
+ assert(VecArg);
+ assert(VecArg0->getElementType() == VecArg->getElementType());
+ assert(VecArg0->getNumElements() == VecArg->getNumElements());
+ auto NextOperandList = populateOperands(Arg, Builder);
+ NewOperands.append(NextOperandList.begin(), NextOperandList.end());
+ }
+ return NewOperands;
+}
+
static void lowerIntrinsic(dxil::OpCode DXILOp, Function &F, Module &M) {
IRBuilder<> B(M.getContext());
DXILOpBuilder DXILB(M, B);
@@ -39,9 +81,18 @@ static void lowerIntrinsic(dxil::OpCode DXILOp, Function &F, Module &M) {
if (!CI)
continue;
+ SmallVector<Value *> Args;
+ Value *DXILOpArg = B.getInt32(static_cast<unsigned>(DXILOp));
+ Args.emplace_back(DXILOpArg);
B.SetInsertPoint(CI);
- CallInst *DXILCI = DXILB.createDXILOpCall(DXILOp, F.getReturnType(),
- OverloadTy, CI->args());
+ if (isVectorArgExpansion(F)) {
+ SmallVector<Value *> NewArgs = argVectorFlatten(CI, B);
+ Args.append(NewArgs.begin(), NewArgs.end());
+ } else
+ Args.append(CI->arg_begin(), CI->arg_end());
+
+ CallInst *DXILCI =
+ DXILB.createDXILOpCall(DXILOp, F.getReturnType(), OverloadTy, Args);
CI->replaceAllUsesWith(DXILCI);
CI->eraseFromParent();
diff --git a/llvm/lib/Target/DirectX/DirectXPassRegistry.def b/llvm/lib/Target/DirectX/DirectXPassRegistry.def
new file mode 100644
index 000000000000..1b326d020d51
--- /dev/null
+++ b/llvm/lib/Target/DirectX/DirectXPassRegistry.def
@@ -0,0 +1,29 @@
+//===- DirectXPassRegistry.def - Registry of DirectX passes -----*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the
+// DirectX backend.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef MODULE_ANALYSIS
+#define MODULE_ANALYSIS(NAME, CREATE_PASS)
+#endif
+MODULE_ANALYSIS("dx-shader-flags", dxil::ShaderFlagsAnalysis())
+MODULE_ANALYSIS("dxil-resource", DXILResourceAnalysis())
+#undef MODULE_ANALYSIS
+
+#ifndef MODULE_PASS
+#define MODULE_PASS(NAME, CREATE_PASS)
+#endif
+// TODO: rename to print<foo> after NPM switch
+MODULE_PASS("print-dx-shader-flags", dxil::ShaderFlagsAnalysisPrinter(dbgs()))
+MODULE_PASS("print-dxil-resource", DXILResourcePrinterPass(dbgs()))
+#undef MODULE_PASS
diff --git a/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp b/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp
index 03c825b3977d..bebca0675522 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetMachine.cpp
@@ -104,24 +104,8 @@ DirectXTargetMachine::~DirectXTargetMachine() {}
void DirectXTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
- PB.registerPipelineParsingCallback(
- [](StringRef PassName, ModulePassManager &PM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "print-dxil-resource") {
- PM.addPass(DXILResourcePrinterPass(dbgs()));
- return true;
- }
- if (PassName == "print-dx-shader-flags") {
- PM.addPass(dxil::ShaderFlagsAnalysisPrinter(dbgs()));
- return true;
- }
- return false;
- });
-
- PB.registerAnalysisRegistrationCallback([](ModuleAnalysisManager &MAM) {
- MAM.registerPass([&] { return DXILResourceAnalysis(); });
- MAM.registerPass([&] { return dxil::ShaderFlagsAnalysis(); });
- });
+#define GET_PASS_REGISTRY "DirectXPassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
}
bool DirectXTargetMachine::addPassesToEmitFile(
diff --git a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index 864591d4eb95..092cccbcca9c 100644
--- a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -245,7 +245,7 @@ public:
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == Register && "Invalid access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonPassRegistry.def b/llvm/lib/Target/Hexagon/HexagonPassRegistry.def
new file mode 100644
index 000000000000..4f58ae6193e0
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonPassRegistry.def
@@ -0,0 +1,21 @@
+//===- HexagonPassRegistry.def - Registry of Hexagon passes -----*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the
+// Hexagon backend.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef LOOP_PASS
+#define LOOP_PASS(NAME, CREATE_PASS)
+#endif
+LOOP_PASS("hexagon-loop-idiom", HexagonLoopIdiomRecognitionPass())
+LOOP_PASS("hexagon-vlcr", HexagonVectorLoopCarriedReusePass())
+#undef LOOP_PASS
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 3c346c334d6d..e64d7e52a9aa 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -299,6 +299,9 @@ HexagonTargetMachine::getSubtargetImpl(const Function &F) const {
void HexagonTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
+#define GET_PASS_REGISTRY "HexagonPassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
+
PB.registerLateLoopOptimizationsEPCallback(
[=](LoopPassManager &LPM, OptimizationLevel Level) {
LPM.addPass(HexagonLoopIdiomRecognitionPass());
diff --git a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index ff3649b77e35..6ab1375b974e 100644
--- a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -151,7 +151,7 @@ public:
// getEndLoc - Gets location of the last token of this operand
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(isReg() && "Invalid type access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
index cf163e4e1200..20284b18428b 100644
--- a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
+++ b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
@@ -467,9 +467,9 @@ public:
/// Gets location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == KindTy::Register && "Invalid type access!");
- return Reg.RegNum.id();
+ return Reg.RegNum;
}
const MCExpr *getImm() const {
diff --git a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
index b2c0fda1ccc2..126176133dc0 100644
--- a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
+++ b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp
@@ -157,7 +157,7 @@ public:
bool isDReg() const;
bool isFPDReg() const;
bool isFPCReg() const;
- unsigned getReg() const override;
+ MCRegister getReg() const override;
void addRegOperands(MCInst &Inst, unsigned N) const;
static std::unique_ptr<M68kOperand> createMemOp(M68kMemOp MemOp, SMLoc Start,
@@ -312,7 +312,7 @@ bool M68kOperand::isReg() const {
return Kind == KindTy::MemOp && MemOp.Op == M68kMemOp::Kind::Reg;
}
-unsigned M68kOperand::getReg() const {
+MCRegister M68kOperand::getReg() const {
assert(isReg());
return MemOp.OuterReg;
}
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index 158393f02a24..786aa7bcb64e 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -939,6 +939,7 @@ SDValue M68kTargetLowering::LowerFormalArguments(
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
assert(VA.getValNo() != LastVal && "Same value in different locations");
+ (void)LastVal;
LastVal = VA.getValNo();
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td
index 84eb8e56da76..3a9e4fe77f1c 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.td
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.td
@@ -200,32 +200,35 @@ class MxRegOp<ValueType vt,
// REGISTER DIRECT. The operand is in the data register specified by
// the effective address register field.
-def MxXRD16 : MxRegOp<i16, XR16, MxSize16, "r">;
-def MxXRD32 : MxRegOp<i32, XR32, MxSize32, "r">;
-
-def MxXRD16_TC : MxRegOp<i16, XR16_TC, MxSize16, "r">;
-def MxXRD32_TC : MxRegOp<i32, XR32_TC, MxSize32, "r">;
+foreach size = [16, 32] in {
+ def MxXRD # size : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("XR"#size),
+ !cast<MxSize>("MxSize"#size), "r">;
+ def MxXRD # size # _TC : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("XR"#size#"_TC"),
+ !cast<MxSize>("MxSize"#size), "r">;
+} // foreach size
// DATA REGISTER DIRECT. The operand is in the data register specified by
// the effective address register field.
let ParserMatchClass = MxDRegClass in {
- def MxDRD8 : MxRegOp<i8, DR8, MxSize8, "d">;
- def MxDRD16 : MxRegOp<i16, DR16, MxSize16, "d">;
- def MxDRD32 : MxRegOp<i32, DR32, MxSize32, "d">;
-
- def MxDRD16_TC : MxRegOp<i16, DR16_TC, MxSize16, "d">;
- def MxDRD32_TC : MxRegOp<i32, DR32_TC, MxSize32, "d">;
-}
+foreach size = [8, 16, 32] in {
+ def MxDRD # size : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("DR"#size),
+ !cast<MxSize>("MxSize"#size), "d">;
+ if !gt(size, 8) then
+ def MxDRD # size # _TC : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("DR"#size#"_TC"),
+ !cast<MxSize>("MxSize"#size), "d">;
+} // foreach size
+} // let ParserMatchClass
// ADDRESS REGISTER DIRECT. The operand is in the address register specified by
// the effective address register field.
let ParserMatchClass = MxARegClass in {
- def MxARD16 : MxRegOp<i16, AR16, MxSize16, "a">;
- def MxARD32 : MxRegOp<i32, AR32, MxSize32, "a">;
-
- def MxARD16_TC : MxRegOp<i16, AR16_TC, MxSize16, "a">;
- def MxARD32_TC : MxRegOp<i32, AR32_TC, MxSize32, "a">;
-}
+foreach size = [16, 32] in {
+ def MxARD # size : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("AR"#size),
+ !cast<MxSize>("MxSize"#size), "a">;
+ def MxARD # size # _TC : MxRegOp<!cast<ValueType>("i"#size), !cast<MxRegClass>("AR"#size#"_TC"),
+ !cast<MxSize>("MxSize"#size), "a">;
+} // foreach size
+} // let ParserMatchClass
// FLOATING POINT DATA REGISTER.
let ParserMatchClass = MxFPDRegClass in {
@@ -255,13 +258,11 @@ class MxMemOp<dag ops, MxSize size, string letter,
// a data reference with the exception of the jump and jump-to-subroutine
// instructions.
def MxARI : MxOpClass<"ARI">;
-def MxARI8 : MxMemOp<(ops AR32), MxSize8, "j", "printARI8Mem", MxARI>;
-def MxARI16 : MxMemOp<(ops AR32), MxSize16, "j", "printARI16Mem", MxARI>;
-def MxARI32 : MxMemOp<(ops AR32), MxSize32, "j", "printARI32Mem", MxARI>;
-
-def MxARI8_TC : MxMemOp<(ops AR32_TC), MxSize8, "j", "printARI8Mem", MxARI>;
-def MxARI16_TC : MxMemOp<(ops AR32_TC), MxSize16, "j", "printARI16Mem", MxARI>;
-def MxARI32_TC : MxMemOp<(ops AR32_TC), MxSize32, "j", "printARI32Mem", MxARI>;
+foreach size = ["8", "16", "32"] in {
+ defvar ResSize = !cast<MxSize>("MxSize"#size);
+ def MxARI # size : MxMemOp<(ops AR32), ResSize, "j", "printARI"#size#"Mem", MxARI>;
+ def MxARI # size # _TC : MxMemOp<(ops AR32_TC), ResSize, "j", "printARI"#size#"Mem", MxARI>;
+} // foreach size
// ADDRESS REGISTER INDIRECT WITH POSTINCREMENT. The address of the operand is
// in the address register specified by the register field. After the operand
@@ -271,13 +272,11 @@ def MxARI32_TC : MxMemOp<(ops AR32_TC), MxSize32, "j", "printARI32Mem", MxARI
// by two rather than one to keep the stack pointer on a word boundary.
// The reference is classified as a data reference.
def MxARIPI : MxOpClass<"ARIPI">;
-def MxARIPI8 : MxMemOp<(ops AR32), MxSize8, "o", "printARIPI8Mem", MxARIPI>;
-def MxARIPI16 : MxMemOp<(ops AR32), MxSize16, "o", "printARIPI16Mem", MxARIPI>;
-def MxARIPI32 : MxMemOp<(ops AR32), MxSize32, "o", "printARIPI32Mem", MxARIPI>;
-
-def MxARIPI8_TC : MxMemOp<(ops AR32_TC), MxSize8, "o", "printARIPI8Mem", MxARIPI>;
-def MxARIPI16_TC : MxMemOp<(ops AR32_TC), MxSize16, "o", "printARIPI16Mem", MxARIPI>;
-def MxARIPI32_TC : MxMemOp<(ops AR32_TC), MxSize32, "o", "printARIPI32Mem", MxARIPI>;
+foreach size = ["8", "16", "32"] in {
+ defvar ResSize = !cast<MxSize>("MxSize"#size);
+ def MxARIPI # size : MxMemOp<(ops AR32), ResSize, "o", "printARIPI"#size#"Mem", MxARIPI>;
+ def MxARIPI # size # _TC : MxMemOp<(ops AR32_TC), ResSize, "o", "printARIPI"#size#"Mem", MxARIPI>;
+} // foreach size
// ADDRESS REGISTER INDIRECT WITH PREDECREMENT. The address of the operand is in
// the address register specified by the register field. Before the operand
@@ -287,13 +286,11 @@ def MxARIPI32_TC : MxMemOp<(ops AR32_TC), MxSize32, "o", "printARIPI32Mem", MxA
// two rather than one to keep the stack pointer on a word boundary.
// The reference is classified as a data reference.
def MxARIPD : MxOpClass<"ARIPD">;
-def MxARIPD8 : MxMemOp<(ops AR32), MxSize8, "e", "printARIPD8Mem", MxARIPD>;
-def MxARIPD16 : MxMemOp<(ops AR32), MxSize16, "e", "printARIPD16Mem", MxARIPD>;
-def MxARIPD32 : MxMemOp<(ops AR32), MxSize32, "e", "printARIPD32Mem", MxARIPD>;
-
-def MxARIPD8_TC : MxMemOp<(ops AR32_TC), MxSize8, "e", "printARIPD8Mem", MxARIPD>;
-def MxARIPD16_TC : MxMemOp<(ops AR32_TC), MxSize16, "e", "printARIPD16Mem", MxARIPD>;
-def MxARIPD32_TC : MxMemOp<(ops AR32_TC), MxSize32, "e", "printARIPD32Mem", MxARIPD>;
+foreach size = ["8", "16", "32"] in {
+ defvar ResSize = !cast<MxSize>("MxSize"#size);
+ def MxARIPD # size : MxMemOp<(ops AR32), ResSize, "e", "printARIPD"#size#"Mem", MxARIPD>;
+ def MxARIPD # size # _TC : MxMemOp<(ops AR32_TC), ResSize, "e", "printARIPD"#size#"Mem", MxARIPD>;
+} // foreach size
// ADDRESS REGISTER INDIRECT WITH DISPLACEMENT. This addressing mode requires one
// word of extension. The address of the operand is the sum of the address in
@@ -301,13 +298,11 @@ def MxARIPD32_TC : MxMemOp<(ops AR32_TC), MxSize32, "e", "printARIPD32Mem", MxA
// extension word. The reference is classified as a data reference with the
// exception of the jump and jump-to-subroutine instructions.
def MxARID : MxOpClass<"ARID">;
-def MxARID8 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize8, "p", "printARID8Mem", MxARID>;
-def MxARID16 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize16, "p", "printARID16Mem", MxARID>;
-def MxARID32 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize32, "p", "printARID32Mem", MxARID>;
-
-def MxARID8_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize8, "p", "printARID8Mem", MxARID>;
-def MxARID16_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize16, "p", "printARID16Mem", MxARID>;
-def MxARID32_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize32, "p", "printARID32Mem", MxARID>;
+foreach size = ["8", "16", "32"] in {
+ defvar ResSize = !cast<MxSize>("MxSize"#size);
+ def MxARID # size : MxMemOp<(ops i16imm:$disp, AR32:$reg), ResSize, "p", "printARID"#size#"Mem", MxARID>;
+ def MxARID # size # _TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), ResSize, "p", "printARID"#size#"Mem", MxARID>;
+} // foreach size
// ADDRESS REGISTER INDIRECT WITH INDEX. This addressing mode requires one word
// of extension. The address of the operand is the sum of the address in the
@@ -316,19 +311,11 @@ def MxARID32_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize32, "p", "pr
// The reference is classified as a data reference with the exception of the
// jump and jump-to-subroutine instructions
def MxARII : MxOpClass<"ARII">;
-def MxARII8 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
- MxSize8, "f", "printARII8Mem", MxARII>;
-def MxARII16 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
- MxSize16, "f", "printARII16Mem", MxARII>;
-def MxARII32 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
- MxSize32, "f", "printARII32Mem", MxARII>;
-
-def MxARII8_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
- MxSize8, "f", "printARII8Mem", MxARII>;
-def MxARII16_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
- MxSize16, "f", "printARII16Mem", MxARII>;
-def MxARII32_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
- MxSize32, "f", "printARII32Mem", MxARII>;
+foreach size = ["8", "16", "32"] in {
+ defvar ResSize = !cast<MxSize>("MxSize"#size);
+ def MxARII # size : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index), ResSize, "f", "printARII"#size#"Mem", MxARII>;
+ def MxARII # size # _TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32:$index), ResSize, "f", "printARII"#size#"Mem", MxARII>;
+} // foreach size
// ABSOLUTE SHORT ADDRESS. This addressing mode requires one word of extension.
// The address of the operand is the extension word. The 16-bit address is sign
@@ -361,14 +348,14 @@ def MxPCD : MxOpClass<"PCD">;
def MxPCI : MxOpClass<"PCI">;
let OperandType = "OPERAND_PCREL" in {
+foreach size = ["8", "16", "32"] in {
+defvar ResSize = !cast<MxSize>("MxSize"#size);
// PROGRAM COUNTER WITH DISPLACEMENT. This addressing mode requires one word of
// extension. The address of the operand is the sum of the address in the program
// counter and the Sign-extended 16-bit displacement integer in the extension
// word. The value in the program counter is the address of the extension word.
// The reference is classified as a program reference.
-def MxPCD8 : MxMemOp<(ops i16imm), MxSize8, "q", "printPCD8Mem", MxPCD>;
-def MxPCD16 : MxMemOp<(ops i16imm), MxSize16, "q", "printPCD16Mem", MxPCD>;
-def MxPCD32 : MxMemOp<(ops i16imm), MxSize32, "q", "printPCD32Mem", MxPCD>;
+def MxPCD # size : MxMemOp<(ops i16imm), ResSize, "q", "printPCD"#size#"Mem", MxPCD>;
// PROGRAM COUNTER WITH INDEX. This addressing mode requires one word of
// extension. The address is the sum of the address in the program counter, the
@@ -376,9 +363,8 @@ def MxPCD32 : MxMemOp<(ops i16imm), MxSize32, "q", "printPCD32Mem", MxPCD>;
// word, and the contents of the index register. The value in the program
// counter is the address of the extension word. This reference is classified as
// a program reference.
-def MxPCI8 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize8, "k", "printPCI8Mem", MxPCI>;
-def MxPCI16 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize16, "k", "printPCI16Mem", MxPCI>;
-def MxPCI32 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize32, "k", "printPCI32Mem", MxPCI>;
+def MxPCI # size : MxMemOp<(ops i8imm:$disp, XR32:$index), ResSize, "k", "printPCI"#size#"Mem", MxPCI>;
+} // foreach size
} // OPERAND_PCREL
def MxImm : AsmOperandClass {
diff --git a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
index 818a468612a5..2bc1a89ef59c 100644
--- a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
+++ b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
@@ -183,7 +183,7 @@ public:
return Tok;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == k_Reg && "Invalid access!");
return Reg;
}
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 9d6e8dc573a8..076e0a20cb97 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1458,7 +1458,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
// As a special case until we sort out the definition of div/divu, accept
// $0/$zero here so that MCK_ZERO works correctly.
if (Kind == k_RegisterIndex && RegIdx.Index == 0 &&
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 181b82f14bfe..4d6a00c14a35 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -498,10 +498,9 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
assert(Relocs.size() == Sorted.size() && "Some relocs were not consumed");
- // Overwrite the original vector with the sorted elements. The caller expects
- // them in reverse order.
+ // Overwrite the original vector with the sorted elements.
unsigned CopyTo = 0;
- for (const auto &R : reverse(Sorted))
+ for (const auto &R : Sorted)
Relocs[CopyTo++] = R.R;
}
diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
index bded59439a73..c30129743a96 100644
--- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
+++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
@@ -500,6 +500,15 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword(
.addReg(Incr, RegState::Kill)
.addImm(ShiftImm);
}
+ } else {
+ // and OldVal, OldVal, Mask
+ // and Incr, Incr, Mask
+ BuildMI(loopMBB, DL, TII->get(Mips::AND), OldVal)
+ .addReg(OldVal)
+ .addReg(Mask);
+ BuildMI(loopMBB, DL, TII->get(Mips::AND), Incr)
+ .addReg(Incr)
+ .addReg(Mask);
}
}
// unsigned: sltu Scratch4, oldVal, Incr
diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
index 3307e840a2af..8468dd6a2211 100644
--- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp
@@ -508,16 +508,8 @@ bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
const MipsSubtarget &ST = MI.getMF()->getSubtarget<MipsSubtarget>();
- const MipsInstrInfo &TII = *ST.getInstrInfo();
- const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
- const RegisterBankInfo &RBI = *ST.getRegBankInfo();
switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
- case Intrinsic::trap: {
- MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
- MI.eraseFromParent();
- return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
- }
case Intrinsic::vacopy: {
MachinePointerInfo MPO;
LLT PtrTy = LLT::pointer(0, 32);
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 2783b1e0ec73..9f31b72bbceb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1022,7 +1022,6 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
- PointerType *PTy = GVar->getType();
Type *ETy = GVar->getValueType();
if (GVar->hasExternalLinkage()) {
@@ -1030,6 +1029,9 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
O << ".visible ";
else
O << ".extern ";
+ } else if (STI.getPTXVersion() >= 50 && GVar->hasCommonLinkage() &&
+ GVar->getAddressSpace() == ADDRESS_SPACE_GLOBAL) {
+ O << ".common ";
} else if (GVar->hasLinkOnceLinkage() || GVar->hasWeakLinkage() ||
GVar->hasAvailableExternallyLinkage() ||
GVar->hasCommonLinkage()) {
@@ -1141,7 +1143,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
}
O << ".";
- emitPTXAddressSpace(PTy->getAddressSpace(), O);
+ emitPTXAddressSpace(GVar->getAddressSpace(), O);
if (isManaged(*GVar)) {
if (STI.getPTXVersion() < 40 || STI.getSmVersion() < 30) {
@@ -1170,8 +1172,8 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// Ptx allows variable initilization only for constant and global state
// spaces.
if (GVar->hasInitializer()) {
- if ((PTy->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
- (PTy->getAddressSpace() == ADDRESS_SPACE_CONST)) {
+ if ((GVar->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
+ (GVar->getAddressSpace() == ADDRESS_SPACE_CONST)) {
const Constant *Initializer = GVar->getInitializer();
// 'undef' is treated as there is no value specified.
if (!Initializer->isNullValue() && !isa<UndefValue>(Initializer)) {
@@ -1186,7 +1188,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
!isa<UndefValue>(GVar->getInitializer())) {
report_fatal_error("initial value of '" + GVar->getName() +
"' is not allowed in addrspace(" +
- Twine(PTy->getAddressSpace()) + ")");
+ Twine(GVar->getAddressSpace()) + ")");
}
}
}
@@ -1205,8 +1207,8 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
ElementSize = DL.getTypeStoreSize(ETy);
// Ptx allows variable initilization only for constant and
// global state spaces.
- if (((PTy->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
- (PTy->getAddressSpace() == ADDRESS_SPACE_CONST)) &&
+ if (((GVar->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
+ (GVar->getAddressSpace() == ADDRESS_SPACE_CONST)) &&
GVar->hasInitializer()) {
const Constant *Initializer = GVar->getInitializer();
if (!isa<UndefValue>(Initializer) && !Initializer->isNullValue()) {
@@ -1719,7 +1721,7 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
// Emit the Fake Stack Object
const MachineFrameInfo &MFI = MF.getFrameInfo();
- int NumBytes = (int) MFI.getStackSize();
+ int64_t NumBytes = MFI.getStackSize();
if (NumBytes) {
O << "\t.local .align " << MFI.getMaxAlign().value() << " .b8 \t"
<< DEPOTNAME << getFunctionNumber() << "[" << NumBytes << "];\n";
diff --git a/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def b/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def
new file mode 100644
index 000000000000..6ff15ab6f13c
--- /dev/null
+++ b/llvm/lib/Target/NVPTX/NVPTXPassRegistry.def
@@ -0,0 +1,40 @@
+//===- NVPTXPassRegistry.def - Registry of NVPTX passes ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the
+// NVPTX backend.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef MODULE_PASS
+#define MODULE_PASS(NAME, CREATE_PASS)
+#endif
+MODULE_PASS("generic-to-nvvm", GenericToNVVMPass())
+MODULE_PASS("nvptx-lower-ctor-dtor", NVPTXCtorDtorLoweringPass())
+#undef MODULE_PASS
+
+#ifndef FUNCTION_ANALYSIS
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+
+#ifndef FUNCTION_ALIAS_ANALYSIS
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+FUNCTION_ALIAS_ANALYSIS("nvptx-aa", NVPTXAA())
+#undef FUNCTION_ALIAS_ANALYSIS
+#undef FUNCTION_ANALYSIS
+
+#ifndef FUNCTION_PASS
+#define FUNCTION_PASS(NAME, CREATE_PASS)
+#endif
+FUNCTION_PASS("nvvm-intr-range", NVVMIntrRangePass())
+FUNCTION_PASS("nvvm-reflect", NVVMReflectPass())
+#undef FUNCTION_PASS
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 69d4596f7843..78f48652c992 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -227,45 +227,8 @@ void NVPTXTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) {
void NVPTXTargetMachine::registerPassBuilderCallbacks(
PassBuilder &PB, bool PopulateClassToPassNames) {
- PB.registerPipelineParsingCallback(
- [](StringRef PassName, FunctionPassManager &PM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "nvvm-reflect") {
- PM.addPass(NVVMReflectPass());
- return true;
- }
- if (PassName == "nvvm-intr-range") {
- PM.addPass(NVVMIntrRangePass());
- return true;
- }
- return false;
- });
-
- PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) {
- FAM.registerPass([&] { return NVPTXAA(); });
- });
-
- PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) {
- if (AAName == "nvptx-aa") {
- AAM.registerFunctionAnalysis<NVPTXAA>();
- return true;
- }
- return false;
- });
-
- PB.registerPipelineParsingCallback(
- [](StringRef PassName, ModulePassManager &PM,
- ArrayRef<PassBuilder::PipelineElement>) {
- if (PassName == "nvptx-lower-ctor-dtor") {
- PM.addPass(NVPTXCtorDtorLoweringPass());
- return true;
- }
- if (PassName == "generic-to-nvvm") {
- PM.addPass(GenericToNVVMPass());
- return true;
- }
- return false;
- });
+#define GET_PASS_REGISTRY "NVPTXPassRegistry.def"
+#include "llvm/Passes/TargetPassRegistry.inc"
PB.registerPipelineStartEPCallback(
[this](ModulePassManager &PM, OptimizationLevel Level) {
diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 8108cfa521c8..55978af38000 100644
--- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -276,9 +276,11 @@ public:
return TLSReg.Sym;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override { llvm_unreachable("Not implemented"); }
+
+ unsigned getRegNum() const {
assert(isRegNumber() && "Invalid access!");
- return (unsigned) Imm.Val;
+ return (unsigned)Imm.Val;
}
unsigned getFpReg() const {
@@ -459,22 +461,22 @@ public:
void addRegGPRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(RRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(RRegs[getRegNum()]));
}
void addRegGPRCNoR0Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(RRegsNoR0[getReg()]));
+ Inst.addOperand(MCOperand::createReg(RRegsNoR0[getRegNum()]));
}
void addRegG8RCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(XRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(XRegs[getRegNum()]));
}
void addRegG8RCNoX0Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(XRegsNoX0[getReg()]));
+ Inst.addOperand(MCOperand::createReg(XRegsNoX0[getRegNum()]));
}
void addRegG8pRCOperands(MCInst &Inst, unsigned N) const {
@@ -498,12 +500,12 @@ public:
void addRegF4RCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(FRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(FRegs[getRegNum()]));
}
void addRegF8RCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(FRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(FRegs[getRegNum()]));
}
void addRegFpRCOperands(MCInst &Inst, unsigned N) const {
@@ -513,12 +515,12 @@ public:
void addRegVFRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(VFRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(VFRegs[getRegNum()]));
}
void addRegVRRCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(VRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(VRegs[getRegNum()]));
}
void addRegVSRCOperands(MCInst &Inst, unsigned N) const {
@@ -538,12 +540,12 @@ public:
void addRegSPE4RCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(RRegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(RRegs[getRegNum()]));
}
void addRegSPERCOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::createReg(SPERegs[getReg()]));
+ Inst.addOperand(MCOperand::createReg(SPERegs[getRegNum()]));
}
void addRegACCRCOperands(MCInst &Inst, unsigned N) const {
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 64cae1caa643..16942c6893a1 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -2845,9 +2845,9 @@ void PPCAIXAsmPrinter::emitFunctionDescriptor() {
}
void PPCAIXAsmPrinter::emitFunctionEntryLabel() {
- // It's not necessary to emit the label when we have individual
- // function in its own csect.
- if (!TM.getFunctionSections())
+ // For functions without user defined section, it's not necessary to emit the
+ // label when we have individual function in its own csect.
+ if (!TM.getFunctionSections() || MF->getFunction().hasSection())
PPCAsmPrinter::emitFunctionEntryLabel();
// Emit aliasing label for function entry point label.
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index dfea9e770924..af82b6cdb180 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -7558,6 +7558,16 @@ static void reduceVSXSwap(SDNode *N, SelectionDAG *DAG) {
DAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), N->getOperand(0));
}
+// Check if an SDValue has the 'aix-small-tls' global variable attribute.
+static bool hasAIXSmallTLSAttr(SDValue Val) {
+ if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val))
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal()))
+ if (GV->hasAttribute("aix-small-tls"))
+ return true;
+
+ return false;
+}
+
// Is an ADDI eligible for folding for non-TOC-based local-exec accesses?
static bool isEligibleToFoldADDIForLocalExecAccesses(SelectionDAG *DAG,
SDValue ADDIToFold) {
@@ -7567,20 +7577,25 @@ static bool isEligibleToFoldADDIForLocalExecAccesses(SelectionDAG *DAG,
(ADDIToFold.getMachineOpcode() != PPC::ADDI8))
return false;
+ // Folding is only allowed for the AIX small-local-exec TLS target attribute
+ // or when the 'aix-small-tls' global variable attribute is present.
+ const PPCSubtarget &Subtarget =
+ DAG->getMachineFunction().getSubtarget<PPCSubtarget>();
+ SDValue TLSVarNode = ADDIToFold.getOperand(1);
+ if (!(Subtarget.hasAIXSmallLocalExecTLS() || hasAIXSmallTLSAttr(TLSVarNode)))
+ return false;
+
// The first operand of the ADDIToFold should be the thread pointer.
// This transformation is only performed if the first operand of the
// addi is the thread pointer.
SDValue TPRegNode = ADDIToFold.getOperand(0);
RegisterSDNode *TPReg = dyn_cast<RegisterSDNode>(TPRegNode.getNode());
- const PPCSubtarget &Subtarget =
- DAG->getMachineFunction().getSubtarget<PPCSubtarget>();
if (!TPReg || (TPReg->getReg() != Subtarget.getThreadPointerRegister()))
return false;
// The second operand of the ADDIToFold should be the global TLS address
// (the local-exec TLS variable). We only perform the folding if the TLS
// variable is the second operand.
- SDValue TLSVarNode = ADDIToFold.getOperand(1);
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(TLSVarNode);
if (!GA)
return false;
@@ -7649,7 +7664,6 @@ static void foldADDIForLocalExecAccesses(SDNode *N, SelectionDAG *DAG) {
void PPCDAGToDAGISel::PeepholePPC64() {
SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
- bool HasAIXSmallLocalExecTLS = Subtarget->hasAIXSmallLocalExecTLS();
while (Position != CurDAG->allnodes_begin()) {
SDNode *N = &*--Position;
@@ -7661,8 +7675,7 @@ void PPCDAGToDAGISel::PeepholePPC64() {
reduceVSXSwap(N, CurDAG);
// This optimization is performed for non-TOC-based local-exec accesses.
- if (HasAIXSmallLocalExecTLS)
- foldADDIForLocalExecAccesses(N, CurDAG);
+ foldADDIForLocalExecAccesses(N, CurDAG);
unsigned FirstOp;
unsigned StorageOpcode = N->getMachineOpcode();
@@ -7821,8 +7834,7 @@ void PPCDAGToDAGISel::PeepholePPC64() {
ImmOpnd.getValueType());
} else if (Offset != 0) {
// This optimization is performed for non-TOC-based local-exec accesses.
- if (HasAIXSmallLocalExecTLS &&
- isEligibleToFoldADDIForLocalExecAccesses(CurDAG, Base)) {
+ if (isEligibleToFoldADDIForLocalExecAccesses(CurDAG, Base)) {
// Add the non-zero offset information into the load or store
// instruction to be used for non-TOC-based local-exec accesses.
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index cce0efad39c7..7436b202fba0 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3367,15 +3367,21 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool Is64Bit = Subtarget.isPPC64();
- bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
bool IsTLSLocalExecModel = Model == TLSModel::LocalExec;
if (IsTLSLocalExecModel || Model == TLSModel::InitialExec) {
+ bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
+ bool HasAIXSmallTLSGlobalAttr = false;
SDValue VariableOffsetTGA =
DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TPREL_FLAG);
SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
SDValue TLSReg;
+
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->hasAttribute("aix-small-tls"))
+ HasAIXSmallTLSGlobalAttr = true;
+
if (Is64Bit) {
// For local-exec and initial-exec on AIX (64-bit), the sequence generated
// involves a load of the variable offset (from the TOC), followed by an
@@ -3385,14 +3391,16 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
// add reg2, reg1, r13 // r13 contains the thread pointer
TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
- // With the -maix-small-local-exec-tls option, produce a faster access
- // sequence for local-exec TLS variables where the offset from the TLS
- // base is encoded as an immediate operand.
+ // With the -maix-small-local-exec-tls option, or with the "aix-small-tls"
+ // global variable attribute, produce a faster access sequence for
+ // local-exec TLS variables where the offset from the TLS base is encoded
+ // as an immediate operand.
//
// We only utilize the faster local-exec access sequence when the TLS
// variable has a size within the policy limit. We treat types that are
// not sized or are empty as being over the policy size limit.
- if (HasAIXSmallLocalExecTLS && IsTLSLocalExecModel) {
+ if ((HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr) &&
+ IsTLSLocalExecModel) {
Type *GVType = GV->getValueType();
if (GVType->isSized() && !GVType->isEmptyTy() &&
GV->getParent()->getDataLayout().getTypeAllocSize(GVType) <=
@@ -3410,8 +3418,9 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
TLSReg = DAG.getNode(PPCISD::GET_TPOINTER, dl, PtrVT);
// We do not implement the 32-bit version of the faster access sequence
- // for local-exec that is controlled by -maix-small-local-exec-tls.
- if (HasAIXSmallLocalExecTLS)
+ // for local-exec that is controlled by the -maix-small-local-exec-tls
+ // option, or the "aix-small-tls" global variable attribute.
+ if (HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr)
report_fatal_error("The small-local-exec TLS access sequence is "
"currently only supported on AIX (64-bit mode).");
}
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 7adf1adcc647..57e1019adb74 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -219,7 +219,7 @@ InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
return TTI::TCC_Free;
break;
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 1779959324da..123b37442329 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -213,7 +213,11 @@ class RISCVAsmParser : public MCTargetAsmParser {
ParseStatus parseReglist(OperandVector &Operands);
ParseStatus parseRegReg(OperandVector &Operands);
ParseStatus parseRetval(OperandVector &Operands);
- ParseStatus parseZcmpSpimm(OperandVector &Operands);
+ ParseStatus parseZcmpStackAdj(OperandVector &Operands,
+ bool ExpectNegative = false);
+ ParseStatus parseZcmpNegStackAdj(OperandVector &Operands) {
+ return parseZcmpStackAdj(Operands, /*ExpectNegative*/ true);
+ }
bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
@@ -977,9 +981,9 @@ public:
return Imm.IsRV64;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == KindTy::Register && "Invalid type access!");
- return Reg.RegNum.id();
+ return Reg.RegNum;
}
StringRef getSysReg() const {
@@ -1062,7 +1066,7 @@ public:
break;
case KindTy::Spimm:
OS << "<Spimm: ";
- RISCVZC::printSpimm(Spimm.Val, OS);
+ OS << Spimm.Val;
OS << '>';
break;
case KindTy::RegReg:
@@ -1608,7 +1612,7 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
ErrorLoc,
"operand must be {ra [, s0[-sN]]} or {x1 [, x8[-x9][, x18[-xN]]]}");
}
- case Match_InvalidSpimm: {
+ case Match_InvalidStackAdj: {
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
return Error(
ErrorLoc,
@@ -2583,8 +2587,9 @@ ParseStatus RISCVAsmParser::parseReglist(OperandVector &Operands) {
return ParseStatus::Success;
}
-ParseStatus RISCVAsmParser::parseZcmpSpimm(OperandVector &Operands) {
- (void)parseOptionalToken(AsmToken::Minus);
+ParseStatus RISCVAsmParser::parseZcmpStackAdj(OperandVector &Operands,
+ bool ExpectNegative) {
+ bool Negative = parseOptionalToken(AsmToken::Minus);
SMLoc S = getLoc();
int64_t StackAdjustment = getLexer().getTok().getIntVal();
@@ -2592,7 +2597,8 @@ ParseStatus RISCVAsmParser::parseZcmpSpimm(OperandVector &Operands) {
unsigned RlistVal = static_cast<RISCVOperand *>(Operands[1].get())->Rlist.Val;
bool IsEABI = isRVE();
- if (!RISCVZC::getSpimm(RlistVal, Spimm, StackAdjustment, isRV64(), IsEABI))
+ if (Negative != ExpectNegative ||
+ !RISCVZC::getSpimm(RlistVal, Spimm, StackAdjustment, isRV64(), IsEABI))
return ParseStatus::NoMatch;
Operands.push_back(RISCVOperand::createSpimm(Spimm << 4, S));
getLexer().Lex();
@@ -3081,34 +3087,11 @@ void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
void RISCVAsmParser::emitLoadImm(MCRegister DestReg, int64_t Value,
MCStreamer &Out) {
- RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Value, getSTI());
-
- MCRegister SrcReg = RISCV::X0;
- for (const RISCVMatInt::Inst &Inst : Seq) {
- switch (Inst.getOpndKind()) {
- case RISCVMatInt::Imm:
- emitToStreamer(Out,
- MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addImm(Inst.getImm()));
- break;
- case RISCVMatInt::RegX0:
- emitToStreamer(
- Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addReg(
- RISCV::X0));
- break;
- case RISCVMatInt::RegReg:
- emitToStreamer(
- Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addReg(
- SrcReg));
- break;
- case RISCVMatInt::RegImm:
- emitToStreamer(
- Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addImm(
- Inst.getImm()));
- break;
- }
+ SmallVector<MCInst, 8> Seq;
+ RISCVMatInt::generateMCInstSeq(Value, getSTI(), DestReg, Seq);
- // Only the first instruction has X0 as its source.
- SrcReg = DestReg;
+ for (MCInst &Inst : Seq) {
+ emitToStreamer(Out, Inst);
}
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 45e19cdea300..8af4bc658409 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -34,14 +34,15 @@ private:
// Whether this is assigning args for a return.
bool IsRet;
- // true if assignArg has been called for a mask argument, false otherwise.
- bool AssignedFirstMaskArg = false;
+ RVVArgDispatcher &RVVDispatcher;
public:
RISCVOutgoingValueAssigner(
- RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
+ RVVArgDispatcher &RVVDispatcher)
: CallLowering::OutgoingValueAssigner(nullptr),
- RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+ RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
+ RVVDispatcher(RVVDispatcher) {}
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
@@ -51,16 +52,9 @@ public:
const DataLayout &DL = MF.getDataLayout();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
- ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
- FirstMaskArgument = ValNo;
- AssignedFirstMaskArg = true;
- }
-
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(), FirstMaskArgument))
+ *Subtarget.getTargetLowering(), RVVDispatcher))
return true;
StackSize = State.getStackSize();
@@ -181,14 +175,15 @@ private:
// Whether this is assigning args from a return.
bool IsRet;
- // true if assignArg has been called for a mask argument, false otherwise.
- bool AssignedFirstMaskArg = false;
+ RVVArgDispatcher &RVVDispatcher;
public:
RISCVIncomingValueAssigner(
- RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
+ RVVArgDispatcher &RVVDispatcher)
: CallLowering::IncomingValueAssigner(nullptr),
- RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+ RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
+ RVVDispatcher(RVVDispatcher) {}
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
@@ -201,16 +196,9 @@ public:
if (LocVT.isScalableVector())
MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
- ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
- FirstMaskArgument = ValNo;
- AssignedFirstMaskArg = true;
- }
-
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(), FirstMaskArgument))
+ *Subtarget.getTargetLowering(), RVVDispatcher))
return true;
StackSize = State.getStackSize();
@@ -420,9 +408,11 @@ bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 4> SplitRetInfos;
splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC);
+ RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ F.getReturnType()};
RISCVOutgoingValueAssigner Assigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/true);
+ /*IsRet=*/true, Dispatcher);
RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
MIRBuilder, CC, F.isVarArg());
@@ -531,6 +521,7 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
CallingConv::ID CC = F.getCallingConv();
SmallVector<ArgInfo, 32> SplitArgInfos;
+ SmallVector<Type *, 4> TypeList;
unsigned Index = 0;
for (auto &Arg : F.args()) {
// Construct the ArgInfo object from destination register and argument type.
@@ -542,12 +533,15 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
// correspondingly and appended to SplitArgInfos.
splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+ TypeList.push_back(Arg.getType());
+
++Index;
}
+ RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(), TypeList};
RISCVIncomingValueAssigner Assigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/false);
+ /*IsRet=*/false, Dispatcher);
RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
SmallVector<CCValAssign, 16> ArgLocs;
@@ -585,11 +579,13 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 32> SplitArgInfos;
SmallVector<ISD::OutputArg, 8> Outs;
+ SmallVector<Type *, 4> TypeList;
for (auto &AInfo : Info.OrigArgs) {
// Handle any required unmerging of split value types from a given VReg into
// physical registers. ArgInfo objects are constructed correspondingly and
// appended to SplitArgInfos.
splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+ TypeList.push_back(AInfo.Ty);
}
// TODO: Support tail calls.
@@ -607,9 +603,10 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv));
+ RVVArgDispatcher ArgDispatcher{&MF, getTLI<RISCVTargetLowering>(), TypeList};
RISCVOutgoingValueAssigner ArgAssigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/false);
+ /*IsRet=*/false, ArgDispatcher);
RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
MIRBuilder, CC, Info.IsVarArg))
@@ -637,9 +634,11 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 4> SplitRetInfos;
splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
+ RVVArgDispatcher RetDispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ F.getReturnType()};
RISCVIncomingValueAssigner RetAssigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/true);
+ /*IsRet=*/true, RetDispatcher);
RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
MIRBuilder, CC, Info.IsVarArg))
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 5738f86e7e9f..3103992a86c0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -77,8 +77,6 @@ private:
MachineRegisterInfo &MRI) const;
bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
MachineRegisterInfo &MRI) const;
- bool selectIntrinsicWithSideEffects(MachineInstr &MI, MachineIRBuilder &MIB,
- MachineRegisterInfo &MRI) const;
void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const;
bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
@@ -686,8 +684,6 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return selectSelect(MI, MIB, MRI);
case TargetOpcode::G_FCMP:
return selectFPCompare(MI, MIB, MRI);
- case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
- return selectIntrinsicWithSideEffects(MI, MIB, MRI);
case TargetOpcode::G_FENCE: {
AtomicOrdering FenceOrdering =
static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
@@ -1255,29 +1251,6 @@ bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
return true;
}
-bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
- MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
- assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
- "Unexpected opcode");
- // Find the intrinsic ID.
- unsigned IntrinID = cast<GIntrinsic>(MI).getIntrinsicID();
-
- // Select the instruction.
- switch (IntrinID) {
- default:
- return false;
- case Intrinsic::trap:
- MIB.buildInstr(RISCV::UNIMP, {}, {});
- break;
- case Intrinsic::debugtrap:
- MIB.buildInstr(RISCV::EBREAK, {}, {});
- break;
- }
-
- MI.eraseFromParent();
- return true;
-}
-
void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const {
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 64ae4e94a8c9..22cae389cc33 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -40,6 +40,32 @@ static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx,
};
}
+static LegalityPredicate
+typeIsLegalIntOrFPVec(unsigned TypeIdx,
+ std::initializer_list<LLT> IntOrFPVecTys,
+ const RISCVSubtarget &ST) {
+ LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
+ ST.hasVInstructionsI64()) &&
+ (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
+ ST.getELen() == 64);
+ };
+
+ return all(typeInSet(TypeIdx, IntOrFPVecTys), P);
+}
+
+static LegalityPredicate
+typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list<LLT> BoolVecTys,
+ const RISCVSubtarget &ST) {
+ LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
+ ST.getELen() == 64);
+ };
+ return all(typeInSet(TypeIdx, BoolVecTys), P);
+}
+
RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
: STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
const LLT sDoubleXLen = LLT::scalar(2 * XLen);
@@ -50,6 +76,14 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
+ const LLT nxv1s1 = LLT::scalable_vector(1, s1);
+ const LLT nxv2s1 = LLT::scalable_vector(2, s1);
+ const LLT nxv4s1 = LLT::scalable_vector(4, s1);
+ const LLT nxv8s1 = LLT::scalable_vector(8, s1);
+ const LLT nxv16s1 = LLT::scalable_vector(16, s1);
+ const LLT nxv32s1 = LLT::scalable_vector(32, s1);
+ const LLT nxv64s1 = LLT::scalable_vector(64, s1);
+
const LLT nxv1s8 = LLT::scalable_vector(1, s8);
const LLT nxv2s8 = LLT::scalable_vector(2, s8);
const LLT nxv4s8 = LLT::scalable_vector(4, s8);
@@ -78,22 +112,16 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
using namespace TargetOpcode;
- auto AllVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
- nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
- nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
- nxv1s64, nxv2s64, nxv4s64, nxv8s64};
+ auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
+
+ auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
+ nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
+ nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
+ nxv1s64, nxv2s64, nxv4s64, nxv8s64};
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
.legalFor({s32, sXLen})
- .legalIf(all(
- typeInSet(0, AllVecTys),
- LegalityPredicate([=, &ST](const LegalityQuery &Query) {
- return ST.hasVInstructions() &&
- (Query.Types[0].getScalarSizeInBits() != 64 ||
- ST.hasVInstructionsI64()) &&
- (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
- ST.getELen() == 64);
- })))
+ .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
@@ -191,8 +219,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
ConstantActions.customFor({s64});
ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
+ // TODO: transform illegal vector types into legal vector type
getActionDefinitionsBuilder(G_IMPLICIT_DEF)
.legalFor({s32, sXLen, p0})
+ .legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
+ .legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
.widenScalarToNextPow2(0)
.clampScalar(0, s32, sXLen);
@@ -202,8 +233,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(1, sXLen, sXLen)
.clampScalar(0, sXLen, sXLen);
- auto &SelectActions = getActionDefinitionsBuilder(G_SELECT).legalFor(
- {{s32, sXLen}, {p0, sXLen}});
+ auto &SelectActions =
+ getActionDefinitionsBuilder(G_SELECT)
+ .legalFor({{s32, sXLen}, {p0, sXLen}})
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalBoolVec(1, BoolVecTys, ST)));
if (XLen == 64 || ST.hasStdExtD())
SelectActions.legalFor({{s64, sXLen}});
SelectActions.widenScalarToNextPow2(0)
@@ -374,6 +408,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, sXLen)
.lowerForCartesianProduct({s32, sXLen, p0}, {p0});
+ getActionDefinitionsBuilder(G_VSCALE)
+ .clampScalar(0, sXLen, sXLen)
+ .customFor({sXLen});
+
getLegacyLegalizerInfo().computeTables();
}
@@ -412,7 +450,7 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
// Store the result in the destination va_list
MachineMemOperand *StoreMMO = MF.getMachineMemOperand(
MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, Alignment);
- MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
+ MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);
MI.eraseFromParent();
return true;
@@ -495,6 +533,48 @@ bool RISCVLegalizerInfo::shouldBeInConstantPool(APInt APImm,
return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
}
+bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ const LLT XLenTy(STI.getXLenVT());
+ Register Dst = MI.getOperand(0).getReg();
+
+ // We define our scalable vector types for lmul=1 to use a 64 bit known
+ // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
+ // vscale as VLENB / 8.
+ static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
+ if (STI.getRealMinVLen() < RISCV::RVVBitsPerBlock)
+ // Support for VLEN==32 is incomplete.
+ return false;
+
+ // We assume VLENB is a multiple of 8. We manually choose the best shift
+ // here because SimplifyDemandedBits isn't always able to simplify it.
+ uint64_t Val = MI.getOperand(1).getCImm()->getZExtValue();
+ if (isPowerOf2_64(Val)) {
+ uint64_t Log2 = Log2_64(Val);
+ if (Log2 < 3) {
+ auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
+ MIB.buildLShr(Dst, VLENB, MIB.buildConstant(XLenTy, 3 - Log2));
+ } else if (Log2 > 3) {
+ auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
+ MIB.buildShl(Dst, VLENB, MIB.buildConstant(XLenTy, Log2 - 3));
+ } else {
+ MIB.buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
+ }
+ } else if ((Val % 8) == 0) {
+ // If the multiplier is a multiple of 8, scale it down to avoid needing
+ // to shift the VLENB value.
+ auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
+ MIB.buildMul(Dst, VLENB, MIB.buildConstant(XLenTy, Val / 8));
+ } else {
+ auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
+ auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
+ MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
+ }
+
+ MI.eraseFromParent();
+ return true;
+}
+
bool RISCVLegalizerInfo::legalizeCustom(
LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const {
@@ -552,6 +632,8 @@ bool RISCVLegalizerInfo::legalizeCustom(
}
case TargetOpcode::G_VASTART:
return legalizeVAStart(MI, MIRBuilder);
+ case TargetOpcode::G_VSCALE:
+ return legalizeVScale(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 323426034827..e2a98c8d2c73 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -42,6 +42,7 @@ private:
GISelChangeObserver &Observer) const;
bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
+ bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 58c971aee2f4..888bcc46ea1e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -13,6 +13,7 @@
#include "RISCVRegisterBankInfo.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
@@ -343,6 +344,8 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case TargetOpcode::G_IMPLICIT_DEF: {
Register Dst = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ uint64_t DstMinSize = DstTy.getSizeInBits().getKnownMinValue();
auto Mapping = GPRValueMapping;
// FIXME: May need to do a better job determining when to use FPRB.
// For example, the look through COPY case:
@@ -350,7 +353,11 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// %1:_(s32) = COPY %0
// $f10_d = COPY %1(s32)
if (anyUseOnlyUseFP(Dst, MRI, TRI))
- Mapping = getFPValueMapping(MRI.getType(Dst).getSizeInBits());
+ Mapping = getFPValueMapping(DstMinSize);
+
+ if (DstTy.isVector())
+ Mapping = getVRBValueMapping(DstMinSize);
+
return getInstructionMapping(DefaultMappingID, /*Cost=*/1, Mapping,
NumOperands);
}
@@ -401,6 +408,17 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_SELECT: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ if (Ty.isVector()) {
+ auto &Sel = cast<GSelect>(MI);
+ LLT TestTy = MRI.getType(Sel.getCondReg());
+ assert(TestTy.isVector() && "Unexpected condition argument type");
+ OpdsMapping[0] = OpdsMapping[2] = OpdsMapping[3] =
+ getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue());
+ OpdsMapping[1] =
+ getVRBValueMapping(TestTy.getSizeInBits().getKnownMinValue());
+ break;
+ }
+
// Try to minimize the number of copies. If we have more floating point
// constrained values than not, then we'll put everything on FPR. Otherwise,
// everything has to be on GPR.
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
index 61f8e7171037..5d9a58babe60 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
@@ -236,6 +236,4 @@ void RISCVZC::printRlist(unsigned SlistEncode, raw_ostream &OS) {
OS << "}";
}
-void RISCVZC::printSpimm(int64_t Spimm, raw_ostream &OS) { OS << Spimm; }
-
} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 6d0381c30d3e..92f405b5f6ac 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -297,7 +297,8 @@ enum OperandType : unsigned {
OPERAND_RVKRNUM_0_7,
OPERAND_RVKRNUM_1_10,
OPERAND_RVKRNUM_2_14,
- OPERAND_LAST_RISCV_IMM = OPERAND_RVKRNUM_2_14,
+ OPERAND_SPIMM,
+ OPERAND_LAST_RISCV_IMM = OPERAND_SPIMM,
// Operand is either a register or uimm5, this is used by V extension pseudo
// instructions to represent a value that be passed as AVL to either vsetvli
// or vsetivli.
@@ -580,15 +581,17 @@ inline static bool getSpimm(unsigned RlistVal, unsigned &SpimmVal,
int64_t StackAdjustment, bool IsRV64, bool IsEABI) {
if (RlistVal == RLISTENCODE::INVALID_RLIST)
return false;
- unsigned stackAdj = getStackAdjBase(RlistVal, IsRV64, IsEABI);
- SpimmVal = (StackAdjustment - stackAdj) / 16;
+ unsigned StackAdjBase = getStackAdjBase(RlistVal, IsRV64, IsEABI);
+ StackAdjustment -= StackAdjBase;
+ if (StackAdjustment % 16 != 0)
+ return false;
+ SpimmVal = StackAdjustment / 16;
if (SpimmVal > 3)
return false;
return true;
}
void printRlist(unsigned SlistEncode, raw_ostream &OS);
-void printSpimm(int64_t Spimm, raw_ostream &OS);
} // namespace RISCVZC
} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
index bd899495812f..04e02e9fa0ab 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -292,24 +292,25 @@ void RISCVInstPrinter::printRegReg(const MCInst *MI, unsigned OpNo,
O << ")";
}
-void RISCVInstPrinter::printSpimm(const MCInst *MI, unsigned OpNo,
- const MCSubtargetInfo &STI, raw_ostream &O) {
+void RISCVInstPrinter::printStackAdj(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O,
+ bool Negate) {
int64_t Imm = MI->getOperand(OpNo).getImm();
- unsigned Opcode = MI->getOpcode();
bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
bool IsEABI = STI.hasFeature(RISCV::FeatureRVE);
- int64_t Spimm = 0;
+ int64_t StackAdj = 0;
auto RlistVal = MI->getOperand(0).getImm();
assert(RlistVal != 16 && "Incorrect rlist.");
auto Base = RISCVZC::getStackAdjBase(RlistVal, IsRV64, IsEABI);
- Spimm = Imm + Base;
- assert((Spimm >= Base && Spimm <= Base + 48) && "Incorrect spimm");
- if (Opcode == RISCV::CM_PUSH)
- Spimm = -Spimm;
+ StackAdj = Imm + Base;
+ assert((StackAdj >= Base && StackAdj <= Base + 48) &&
+ "Incorrect stack adjust");
+ if (Negate)
+ StackAdj = -StackAdj;
// RAII guard for ANSI color escape sequences
WithMarkup ScopedMarkup = markup(O, Markup::Immediate);
- RISCVZC::printSpimm(Spimm, O);
+ O << StackAdj;
}
void RISCVInstPrinter::printVMaskReg(const MCInst *MI, unsigned OpNo,
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h
index 4512bd5f4c4b..77cc7a67e889 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h
@@ -52,8 +52,13 @@ public:
const MCSubtargetInfo &STI, raw_ostream &O);
void printRlist(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
- void printSpimm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
- raw_ostream &O);
+ void printStackAdj(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O,
+ bool Negate = false);
+ void printNegStackAdj(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O) {
+ return printStackAdj(MI, OpNo, STI, O, /*Negate*/ true);
+ }
void printRegReg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
// Autogenerated by tblgen.
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index 254a9a4bc0ef..b8e0f3a867f4 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -207,8 +207,6 @@ void RISCVMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
case VK_RISCV_TLS_GOT_HI:
case VK_RISCV_TLS_GD_HI:
case VK_RISCV_TLSDESC_HI:
- case VK_RISCV_TLSDESC_ADD_LO:
- case VK_RISCV_TLSDESC_LOAD_LO:
break;
}
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 4358a5b878e6..c3bae152993e 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -9,6 +9,7 @@
#include "RISCVMatInt.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/MC/MCInstBuilder.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -436,6 +437,43 @@ InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
return Res;
}
+void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI,
+ MCRegister DestReg, SmallVectorImpl<MCInst> &Insts) {
+ RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val, STI);
+
+ MCRegister SrcReg = RISCV::X0;
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ switch (Inst.getOpndKind()) {
+ case RISCVMatInt::Imm:
+ Insts.push_back(MCInstBuilder(Inst.getOpcode())
+ .addReg(DestReg)
+ .addImm(Inst.getImm()));
+ break;
+ case RISCVMatInt::RegX0:
+ Insts.push_back(MCInstBuilder(Inst.getOpcode())
+ .addReg(DestReg)
+ .addReg(SrcReg)
+ .addReg(RISCV::X0));
+ break;
+ case RISCVMatInt::RegReg:
+ Insts.push_back(MCInstBuilder(Inst.getOpcode())
+ .addReg(DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg));
+ break;
+ case RISCVMatInt::RegImm:
+ Insts.push_back(MCInstBuilder(Inst.getOpcode())
+ .addReg(DestReg)
+ .addReg(SrcReg)
+ .addImm(Inst.getImm()));
+ break;
+ }
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = DestReg;
+ }
+}
+
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI,
unsigned &ShiftAmt, unsigned &AddOpc) {
int64_t LoVal = SignExtend64<32>(Val);
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
index 780f685463f3..e87e0f325647 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
@@ -10,6 +10,7 @@
#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_MATINT_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCRegister.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include <cstdint>
@@ -48,6 +49,10 @@ using InstSeq = SmallVector<Inst, 8>;
// instruction selection.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI);
+// Helper to generate the generateInstSeq instruction sequence using MCInsts
+void generateMCInstSeq(int64_t Val, const MCSubtargetInfo &STI,
+ MCRegister DestReg, SmallVectorImpl<MCInst> &Insts);
+
// Helper to generate an instruction sequence that can materialize the given
// immediate value into a register using an additional temporary register. This
// handles cases where the constant can be generated by (ADD (SLLI X, C), X) or
diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 22736edc5f07..9fb84efd5b6f 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -24,8 +24,8 @@ include "RISCVSystemOperands.td"
// Registers, calling conventions, instruction descriptions.
//===----------------------------------------------------------------------===//
-include "RISCVSchedule.td"
include "RISCVRegisterInfo.td"
+include "RISCVSchedule.td"
include "RISCVCallingConv.td"
include "RISCVInstrInfo.td"
include "GISel/RISCVRegisterBanks.td"
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index 11b716f20f37..ad06f4774377 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -26,6 +26,19 @@ def CSR_ILP32D_LP64D
: CalleeSavedRegs<(add CSR_ILP32_LP64,
F8_D, F9_D, (sequence "F%u_D", 18, 27))>;
+defvar CSR_V = (add (sequence "V%u", 1, 7), (sequence "V%u", 24, 31),
+ V2M2, V4M2, V6M2, V24M2, V26M2, V28M2, V30M2,
+ V4M4, V24M4, V28M4, V24M8);
+
+def CSR_ILP32_LP64_V
+ : CalleeSavedRegs<(add CSR_ILP32_LP64, CSR_V)>;
+
+def CSR_ILP32F_LP64F_V
+ : CalleeSavedRegs<(add CSR_ILP32F_LP64F, CSR_V)>;
+
+def CSR_ILP32D_LP64D_V
+ : CalleeSavedRegs<(add CSR_ILP32D_LP64D, CSR_V)>;
+
// Needed for implementation of RISCVRegisterInfo::getNoPreservedMask()
def CSR_NoRegs : CalleeSavedRegs<(add)>;
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 0a314fdd41cb..173995f05b51 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -312,10 +312,19 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_gpr_even);
Register Hi =
TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_gpr_odd);
+
+ assert(MBBI->hasOneMemOperand() && "Expected mem operand");
+ MachineMemOperand *OldMMO = MBBI->memoperands().front();
+ MachineFunction *MF = MBB.getParent();
+ MachineMemOperand *MMOLo = MF->getMachineMemOperand(OldMMO, 0, 4);
+ MachineMemOperand *MMOHi = MF->getMachineMemOperand(OldMMO, 4, 4);
+
BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW))
.addReg(Lo, getKillRegState(MBBI->getOperand(0).isKill()))
.addReg(MBBI->getOperand(1).getReg())
- .add(MBBI->getOperand(2));
+ .add(MBBI->getOperand(2))
+ .setMemRefs(MMOLo);
+
if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
// FIXME: Zdinx RV32 can not work on unaligned memory.
assert(!STI->hasFastUnalignedAccess());
@@ -325,13 +334,15 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW))
.addReg(Hi, getKillRegState(MBBI->getOperand(0).isKill()))
.add(MBBI->getOperand(1))
- .add(MBBI->getOperand(2));
+ .add(MBBI->getOperand(2))
+ .setMemRefs(MMOHi);
} else {
assert(isInt<12>(MBBI->getOperand(2).getImm() + 4));
BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW))
.addReg(Hi, getKillRegState(MBBI->getOperand(0).isKill()))
.add(MBBI->getOperand(1))
- .addImm(MBBI->getOperand(2).getImm() + 4);
+ .addImm(MBBI->getOperand(2).getImm() + 4)
+ .setMemRefs(MMOHi);
}
MBBI->eraseFromParent();
return true;
@@ -349,6 +360,12 @@ bool RISCVExpandPseudo::expandRV32ZdinxLoad(MachineBasicBlock &MBB,
Register Hi =
TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_gpr_odd);
+ assert(MBBI->hasOneMemOperand() && "Expected mem operand");
+ MachineMemOperand *OldMMO = MBBI->memoperands().front();
+ MachineFunction *MF = MBB.getParent();
+ MachineMemOperand *MMOLo = MF->getMachineMemOperand(OldMMO, 0, 4);
+ MachineMemOperand *MMOHi = MF->getMachineMemOperand(OldMMO, 4, 4);
+
// If the register of operand 1 is equal to the Lo register, then swap the
// order of loading the Lo and Hi statements.
bool IsOp1EqualToLo = Lo == MBBI->getOperand(1).getReg();
@@ -356,7 +373,8 @@ bool RISCVExpandPseudo::expandRV32ZdinxLoad(MachineBasicBlock &MBB,
if (!IsOp1EqualToLo) {
BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Lo)
.addReg(MBBI->getOperand(1).getReg())
- .add(MBBI->getOperand(2));
+ .add(MBBI->getOperand(2))
+ .setMemRefs(MMOLo);
}
if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
@@ -365,20 +383,23 @@ bool RISCVExpandPseudo::expandRV32ZdinxLoad(MachineBasicBlock &MBB,
MBBI->getOperand(2).setOffset(Offset + 4);
BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Hi)
.addReg(MBBI->getOperand(1).getReg())
- .add(MBBI->getOperand(2));
+ .add(MBBI->getOperand(2))
+ .setMemRefs(MMOHi);
MBBI->getOperand(2).setOffset(Offset);
} else {
assert(isInt<12>(MBBI->getOperand(2).getImm() + 4));
BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Hi)
.addReg(MBBI->getOperand(1).getReg())
- .addImm(MBBI->getOperand(2).getImm() + 4);
+ .addImm(MBBI->getOperand(2).getImm() + 4)
+ .setMemRefs(MMOHi);
}
// Order: Hi, Lo
if (IsOp1EqualToLo) {
BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Lo)
.addReg(MBBI->getOperand(1).getReg())
- .add(MBBI->getOperand(2));
+ .add(MBBI->getOperand(2))
+ .setMemRefs(MMOLo);
}
MBBI->eraseFromParent();
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index f3e641e25018..6ef2289bb4be 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1226,6 +1226,10 @@ def TuneNoSinkSplatOperands
"false", "Disable sink splat operands to enable .vx, .vf,"
".wx, and .wf instructions">;
+def TuneNoStripWSuffix
+ : SubtargetFeature<"no-strip-w-suffix", "EnableStripWSuffix", "false",
+ "Disable strip W suffix">;
+
def TuneConditionalCompressedMoveFusion
: SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion",
"true", "Enable branch+c.mv fusion">;
diff --git a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
index fddbaa97d063..2089f5dda6fe 100644
--- a/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFoldMasks.cpp
@@ -47,10 +47,13 @@ public:
StringRef getPassName() const override { return "RISC-V Fold Masks"; }
private:
- bool convertToUnmasked(MachineInstr &MI, MachineInstr *MaskDef) const;
- bool convertVMergeToVMv(MachineInstr &MI, MachineInstr *MaskDef) const;
+ bool convertToUnmasked(MachineInstr &MI) const;
+ bool convertVMergeToVMv(MachineInstr &MI) const;
- bool isAllOnesMask(MachineInstr *MaskDef) const;
+ bool isAllOnesMask(const MachineInstr *MaskDef) const;
+
+ /// Maps uses of V0 to the corresponding def of V0.
+ DenseMap<const MachineInstr *, const MachineInstr *> V0Defs;
};
} // namespace
@@ -59,10 +62,9 @@ char RISCVFoldMasks::ID = 0;
INITIALIZE_PASS(RISCVFoldMasks, DEBUG_TYPE, "RISC-V Fold Masks", false, false)
-bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) const {
- if (!MaskDef)
- return false;
- assert(MaskDef->isCopy() && MaskDef->getOperand(0).getReg() == RISCV::V0);
+bool RISCVFoldMasks::isAllOnesMask(const MachineInstr *MaskDef) const {
+ assert(MaskDef && MaskDef->isCopy() &&
+ MaskDef->getOperand(0).getReg() == RISCV::V0);
Register SrcReg = TRI->lookThruCopyLike(MaskDef->getOperand(1).getReg(), MRI);
if (!SrcReg.isVirtual())
return false;
@@ -89,8 +91,7 @@ bool RISCVFoldMasks::isAllOnesMask(MachineInstr *MaskDef) const {
// Transform (VMERGE_VVM_<LMUL> false, false, true, allones, vl, sew) to
// (VMV_V_V_<LMUL> false, true, vl, sew). It may decrease uses of VMSET.
-bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI,
- MachineInstr *V0Def) const {
+bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI) const {
#define CASE_VMERGE_TO_VMV(lmul) \
case RISCV::PseudoVMERGE_VVM_##lmul: \
NewOpc = RISCV::PseudoVMV_V_V_##lmul; \
@@ -116,7 +117,7 @@ bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI,
return false;
assert(MI.getOperand(4).isReg() && MI.getOperand(4).getReg() == RISCV::V0);
- if (!isAllOnesMask(V0Def))
+ if (!isAllOnesMask(V0Defs.lookup(&MI)))
return false;
MI.setDesc(TII->get(NewOpc));
@@ -133,14 +134,13 @@ bool RISCVFoldMasks::convertVMergeToVMv(MachineInstr &MI,
return true;
}
-bool RISCVFoldMasks::convertToUnmasked(MachineInstr &MI,
- MachineInstr *MaskDef) const {
+bool RISCVFoldMasks::convertToUnmasked(MachineInstr &MI) const {
const RISCV::RISCVMaskedPseudoInfo *I =
RISCV::getMaskedPseudoInfo(MI.getOpcode());
if (!I)
return false;
- if (!isAllOnesMask(MaskDef))
+ if (!isAllOnesMask(V0Defs.lookup(&MI)))
return false;
// There are two classes of pseudos in the table - compares and
@@ -198,20 +198,26 @@ bool RISCVFoldMasks::runOnMachineFunction(MachineFunction &MF) {
// $v0:vr = COPY %mask:vr
// %x:vr = Pseudo_MASK %a:vr, %b:br, $v0:vr
//
- // Because $v0 isn't in SSA, keep track of it so we can check the mask operand
- // on each pseudo.
- MachineInstr *CurrentV0Def;
- for (MachineBasicBlock &MBB : MF) {
- CurrentV0Def = nullptr;
- for (MachineInstr &MI : MBB) {
- Changed |= convertToUnmasked(MI, CurrentV0Def);
- Changed |= convertVMergeToVMv(MI, CurrentV0Def);
+ // Because $v0 isn't in SSA, keep track of its definition at each use so we
+ // can check mask operands.
+ for (const MachineBasicBlock &MBB : MF) {
+ const MachineInstr *CurrentV0Def = nullptr;
+ for (const MachineInstr &MI : MBB) {
+ if (MI.readsRegister(RISCV::V0, TRI))
+ V0Defs[&MI] = CurrentV0Def;
if (MI.definesRegister(RISCV::V0, TRI))
CurrentV0Def = &MI;
}
}
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ Changed |= convertToUnmasked(MI);
+ Changed |= convertVMergeToVMv(MI);
+ }
+ }
+
return Changed;
}
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 8bac41372b5a..39075c81b292 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -388,6 +388,21 @@ getUnmanagedCSI(const MachineFunction &MF,
return NonLibcallCSI;
}
+static SmallVector<CalleeSavedInfo, 8>
+getRVVCalleeSavedInfo(const MachineFunction &MF,
+ const std::vector<CalleeSavedInfo> &CSI) {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ SmallVector<CalleeSavedInfo, 8> RVVCSI;
+
+ for (auto &CS : CSI) {
+ int FI = CS.getFrameIdx();
+ if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector)
+ RVVCSI.push_back(CS);
+ }
+
+ return RVVCSI;
+}
+
void RISCVFrameLowering::adjustStackForRVV(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -554,8 +569,10 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
if (RVFI->isPushable(MF) && FirstFrameSetup != MBB.end() &&
FirstFrameSetup->getOpcode() == RISCV::CM_PUSH) {
// Use available stack adjustment in push instruction to allocate additional
- // stack space.
- uint64_t Spimm = std::min(StackSize, (uint64_t)48);
+ // stack space. Align the stack size down to a multiple of 16. This is
+ // needed for RVE.
+ // FIXME: Can we increase the stack size to a multiple of 16 instead?
+ uint64_t Spimm = std::min(alignDown(StackSize, 16), (uint64_t)48);
FirstFrameSetup->getOperand(1).setImm(Spimm);
StackSize -= Spimm;
}
@@ -588,6 +605,10 @@ void RISCVFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const auto &Entry : CSI) {
int FrameIdx = Entry.getFrameIdx();
+ if (FrameIdx >= 0 &&
+ MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
+ continue;
+
int64_t Offset = MFI.getObjectOffset(FrameIdx);
Register Reg = Entry.getReg();
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
@@ -724,7 +745,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
- // Skip to before the restores of callee-saved registers
+ // Skip to before the restores of scalar callee-saved registers
// FIXME: assumes exactly one instruction is used to restore each
// callee-saved register.
auto LastFrameDestroy = MBBI;
@@ -776,8 +797,10 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
if (RVFI->isPushable(MF) && MBBI != MBB.end() &&
MBBI->getOpcode() == RISCV::CM_POP) {
// Use available stack adjustment in pop instruction to deallocate stack
- // space.
- uint64_t Spimm = std::min(StackSize, (uint64_t)48);
+ // space. Align the stack size down to a multiple of 16. This is needed for
+ // RVE.
+ // FIXME: Can we increase the stack size to a multiple of 16 instead?
+ uint64_t Spimm = std::min(alignDown(StackSize, 16), (uint64_t)48);
MBBI->getOperand(1).setImm(Spimm);
StackSize -= Spimm;
}
@@ -1025,15 +1048,24 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
MachineFrameInfo &MFI = MF.getFrameInfo();
// Create a buffer of RVV objects to allocate.
SmallVector<int, 8> ObjectsToAllocate;
- for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
- unsigned StackID = MFI.getStackID(I);
- if (StackID != TargetStackID::ScalableVector)
- continue;
- if (MFI.isDeadObjectIndex(I))
- continue;
+ auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
+ for (int I = FIBegin, E = FIEnd; I != E; ++I) {
+ unsigned StackID = MFI.getStackID(I);
+ if (StackID != TargetStackID::ScalableVector)
+ continue;
+ if (MFI.isDeadObjectIndex(I))
+ continue;
- ObjectsToAllocate.push_back(I);
- }
+ ObjectsToAllocate.push_back(I);
+ }
+ };
+ // First push RVV Callee Saved object, then push RVV stack object
+ std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
+ const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
+ if (!RVVCSI.empty())
+ pushRVVObjects(RVVCSI[0].getFrameIdx(),
+ RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
+ pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
// The minimum alignment is 16 bytes.
Align RVVStackAlign(16);
@@ -1483,13 +1515,19 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
// Manually spill values not spilled by libcall & Push/Pop.
const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
- for (auto &CS : UnmanagedCSI) {
- // Insert the spill to the stack frame.
- Register Reg = CS.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg), CS.getFrameIdx(),
- RC, TRI, Register());
- }
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
+
+ auto storeRegToStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
+ for (auto &CS : CSInfo) {
+ // Insert the spill to the stack frame.
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
+ CS.getFrameIdx(), RC, TRI, Register());
+ }
+ };
+ storeRegToStackSlot(UnmanagedCSI);
+ storeRegToStackSlot(RVVCSI);
return true;
}
@@ -1507,19 +1545,26 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
DL = MI->getDebugLoc();
// Manually restore values not restored by libcall & Push/Pop.
- // Keep the same order as in the prologue. There is no need to reverse the
- // order in the epilogue. In addition, the return address will be restored
- // first in the epilogue. It increases the opportunity to avoid the
- // load-to-use data hazard between loading RA and return by RA.
- // loadRegFromStackSlot can insert multiple instructions.
+ // Reverse the restore order in epilog. In addition, the return
+ // address will be restored first in the epilogue. It increases
+ // the opportunity to avoid the load-to-use data hazard between
+ // loading RA and return by RA. loadRegFromStackSlot can insert
+ // multiple instructions.
const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
- for (auto &CS : UnmanagedCSI) {
- Register Reg = CS.getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register());
- assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
- }
+ const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
+
+ auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
+ for (auto &CS : CSInfo) {
+ Register Reg = CS.getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
+ Register());
+ assert(MI != MBB.begin() &&
+ "loadRegFromStackSlot didn't insert any code!");
+ }
+ };
+ loadRegFromStackSlot(RVVCSI);
+ loadRegFromStackSlot(UnmanagedCSI);
RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
if (RVFI->isPushable(*MF)) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3aa28215efc2..f693cbd3bea5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -559,11 +560,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.hasStdExtDOrZdinx()) {
setOperationAction(FPLegalNodeTypes, MVT::f64, Legal);
+ if (!Subtarget.is64Bit())
+ setOperationAction(ISD::BITCAST, MVT::i64, Custom);
+
if (Subtarget.hasStdExtZfa()) {
setOperationAction(FPRndMode, MVT::f64, Legal);
setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
- if (!Subtarget.is64Bit())
- setOperationAction(ISD::BITCAST, MVT::i64, Custom);
} else {
if (Subtarget.is64Bit())
setOperationAction(FPRndMode, MVT::f64, Custom);
@@ -818,6 +820,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
Legal);
+ setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+
// Custom-lower extensions and truncations from/to mask types.
setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
VT, Custom);
@@ -1202,6 +1206,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(
{ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
+ setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+
// vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
@@ -1422,6 +1428,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL,
+ ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
ISD::INSERT_VECTOR_ELT, ISD::ABS});
if (Subtarget.hasVendorXTHeadMemPair())
setTargetDAGCombine({ISD::LOAD, ISD::STORE});
@@ -6071,8 +6078,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
return FPConv;
}
- if (VT == MVT::f64 && Op0VT == MVT::i64 && XLenVT == MVT::i32 &&
- Subtarget.hasStdExtZfa()) {
+ if (VT == MVT::f64 && Op0VT == MVT::i64 && XLenVT == MVT::i32) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitScalar(Op0, DL, MVT::i32, MVT::i32);
SDValue RetReg =
@@ -6611,6 +6617,30 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
// better than going through the stack, as the default expansion does.
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
+ MVT ContainerVT = VT;
+ if (VT.isFixedLengthVector())
+ ContainerVT = ::getContainerForFixedLengthVector(DAG, VT, Subtarget);
+
+ // Recursively split concat_vectors with more than 2 operands:
+ //
+ // concat_vector op1, op2, op3, op4
+ // ->
+ // concat_vector (concat_vector op1, op2), (concat_vector op3, op4)
+ //
+ // This reduces the length of the chain of vslideups and allows us to
+ // perform the vslideups at a smaller LMUL, limited to MF2.
+ if (Op.getNumOperands() > 2 &&
+ ContainerVT.bitsGE(getLMUL1VT(ContainerVT))) {
+ MVT HalfVT = VT.getHalfNumVectorElementsVT();
+ assert(isPowerOf2_32(Op.getNumOperands()));
+ size_t HalfNumOps = Op.getNumOperands() / 2;
+ SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, DL, HalfVT,
+ Op->ops().take_front(HalfNumOps));
+ SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, DL, HalfVT,
+ Op->ops().drop_front(HalfNumOps));
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
+ }
+
unsigned NumOpElts =
Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
SDValue Vec = DAG.getUNDEF(VT);
@@ -6761,6 +6791,22 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
if (!Op.getValueType().isVector())
return lowerSADDSAT_SSUBSAT(Op, DAG);
return lowerToScalableOp(Op, DAG);
+ case ISD::ABDS:
+ case ISD::ABDU: {
+ SDLoc dl(Op);
+ EVT VT = Op->getValueType(0);
+ SDValue LHS = DAG.getFreeze(Op->getOperand(0));
+ SDValue RHS = DAG.getFreeze(Op->getOperand(1));
+ bool IsSigned = Op->getOpcode() == ISD::ABDS;
+
+ // abds(lhs, rhs) -> sub(smax(lhs,rhs), smin(lhs,rhs))
+ // abdu(lhs, rhs) -> sub(umax(lhs,rhs), umin(lhs,rhs))
+ unsigned MaxOpc = IsSigned ? ISD::SMAX : ISD::UMAX;
+ unsigned MinOpc = IsSigned ? ISD::SMIN : ISD::UMIN;
+ SDValue Max = DAG.getNode(MaxOpc, dl, VT, LHS, RHS);
+ SDValue Min = DAG.getNode(MinOpc, dl, VT, LHS, RHS);
+ return DAG.getNode(ISD::SUB, dl, VT, Max, Min);
+ }
case ISD::ABS:
case ISD::VP_ABS:
return lowerABS(Op, DAG);
@@ -10340,9 +10386,15 @@ RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
+ MachineMemOperand *MMO = Load->getMemOperand();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MMO = MF.getMachineMemOperand(
+ MMO, MMO->getPointerInfo(),
+ MMO->getMemoryType().isValid()
+ ? LLT::scalable_vector(1, MMO->getMemoryType().getSizeInBits())
+ : MMO->getMemoryType());
SDValue NewLoad =
- DAG.getLoad(ContainerVT, DL, Load->getChain(), Load->getBasePtr(),
- Load->getMemOperand());
+ DAG.getLoad(ContainerVT, DL, Load->getChain(), Load->getBasePtr(), MMO);
SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
}
@@ -10400,9 +10452,17 @@ RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
const auto [MinVLMAX, MaxVLMAX] =
RISCVTargetLowering::computeVLMAXBounds(ContainerVT, Subtarget);
if (MinVLMAX == MaxVLMAX && MinVLMAX == VT.getVectorNumElements() &&
- getLMUL1VT(ContainerVT).bitsLE(ContainerVT))
+ getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
+ MachineMemOperand *MMO = Store->getMemOperand();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MMO = MF.getMachineMemOperand(
+ MMO, MMO->getPointerInfo(),
+ MMO->getMemoryType().isValid()
+ ? LLT::scalable_vector(1, MMO->getMemoryType().getSizeInBits())
+ : MMO->getMemoryType());
return DAG.getStore(Store->getChain(), DL, NewValue, Store->getBasePtr(),
- Store->getMemOperand());
+ MMO);
+ }
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
Subtarget);
@@ -12157,8 +12217,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue FPConv =
DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
- } else if (VT == MVT::i64 && Op0VT == MVT::f64 && XLenVT == MVT::i32 &&
- Subtarget.hasStdExtZfa()) {
+ } else if (VT == MVT::i64 && Op0VT == MVT::f64 && XLenVT == MVT::i32) {
SDValue NewReg = DAG.getNode(RISCVISD::SplitF64, DL,
DAG.getVTList(MVT::i32, MVT::i32), Op0);
SDValue RetReg = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64,
@@ -12876,6 +12935,58 @@ static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
}
+// add (zext, zext) -> zext (add (zext, zext))
+// sub (zext, zext) -> sext (sub (zext, zext))
+// mul (zext, zext) -> zext (mul (zext, zext))
+// sdiv (zext, zext) -> zext (sdiv (zext, zext))
+// udiv (zext, zext) -> zext (udiv (zext, zext))
+// srem (zext, zext) -> zext (srem (zext, zext))
+// urem (zext, zext) -> zext (urem (zext, zext))
+//
+// where the sum of the extend widths match, and the the range of the bin op
+// fits inside the width of the narrower bin op. (For profitability on rvv, we
+// use a power of two for both inner and outer extend.)
+static SDValue combineBinOpOfZExt(SDNode *N, SelectionDAG &DAG) {
+
+ EVT VT = N->getValueType(0);
+ if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return SDValue();
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ if (N0.getOpcode() != ISD::ZERO_EXTEND || N1.getOpcode() != ISD::ZERO_EXTEND)
+ return SDValue();
+ if (!N0.hasOneUse() || !N1.hasOneUse())
+ return SDValue();
+
+ SDValue Src0 = N0.getOperand(0);
+ SDValue Src1 = N1.getOperand(0);
+ EVT SrcVT = Src0.getValueType();
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT) ||
+ SrcVT != Src1.getValueType() || SrcVT.getScalarSizeInBits() < 8 ||
+ SrcVT.getScalarSizeInBits() >= VT.getScalarSizeInBits() / 2)
+ return SDValue();
+
+ LLVMContext &C = *DAG.getContext();
+ EVT ElemVT = VT.getVectorElementType().getHalfSizedIntegerVT(C);
+ EVT NarrowVT = EVT::getVectorVT(C, ElemVT, VT.getVectorElementCount());
+
+ Src0 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src0), NarrowVT, Src0);
+ Src1 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src1), NarrowVT, Src1);
+
+ // Src0 and Src1 are zero extended, so they're always positive if signed.
+ //
+ // sub can produce a negative from two positive operands, so it needs sign
+ // extended. Other nodes produce a positive from two positive operands, so
+ // zero extend instead.
+ unsigned OuterExtend =
+ N->getOpcode() == ISD::SUB ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+
+ return DAG.getNode(
+ OuterExtend, SDLoc(N), VT,
+ DAG.getNode(N->getOpcode(), SDLoc(N), NarrowVT, Src0, Src1));
+}
+
// Try to turn (add (xor bool, 1) -1) into (neg bool).
static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
@@ -12913,6 +13024,8 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
return V;
if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
return V;
+ if (SDValue V = combineBinOpOfZExt(N, DAG))
+ return V;
// fold (add (select lhs, rhs, cc, 0, y), x) ->
// (select lhs, rhs, cc, x, (add x, y))
@@ -12980,28 +13093,8 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // sub (zext, zext) -> sext (sub (zext, zext))
- // where the sum of the extend widths match, and the inner zexts
- // add at least one bit. (For profitability on rvv, we use a
- // power of two for both inner and outer extend.)
- if (VT.isVector() && Subtarget.getTargetLowering()->isTypeLegal(VT) &&
- N0.getOpcode() == N1.getOpcode() && N0.getOpcode() == ISD::ZERO_EXTEND &&
- N0.hasOneUse() && N1.hasOneUse()) {
- SDValue Src0 = N0.getOperand(0);
- SDValue Src1 = N1.getOperand(0);
- EVT SrcVT = Src0.getValueType();
- if (Subtarget.getTargetLowering()->isTypeLegal(SrcVT) &&
- SrcVT == Src1.getValueType() && SrcVT.getScalarSizeInBits() >= 8 &&
- SrcVT.getScalarSizeInBits() < VT.getScalarSizeInBits() / 2) {
- LLVMContext &C = *DAG.getContext();
- EVT ElemVT = VT.getVectorElementType().getHalfSizedIntegerVT(C);
- EVT NarrowVT = EVT::getVectorVT(C, ElemVT, VT.getVectorElementCount());
- Src0 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src0), NarrowVT, Src0);
- Src1 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src1), NarrowVT, Src1);
- return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT,
- DAG.getNode(ISD::SUB, SDLoc(N), NarrowVT, Src0, Src1));
- }
- }
+ if (SDValue V = combineBinOpOfZExt(N, DAG))
+ return V;
// fold (sub x, (select lhs, rhs, cc, 0, y)) ->
// (select lhs, rhs, cc, x, (sub x, y))
@@ -13292,6 +13385,9 @@ static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
}
+ if (SDValue V = combineBinOpOfZExt(N, DAG))
+ return V;
+
return SDValue();
}
@@ -13435,7 +13531,7 @@ struct CombineResult;
enum ExtKind : uint8_t { ZExt = 1 << 0, SExt = 1 << 1, FPExt = 1 << 2 };
/// Helper class for folding sign/zero extensions.
/// In particular, this class is used for the following combines:
-/// add | add_vl -> vwadd(u) | vwadd(u)_w
+/// add | add_vl | or disjoint -> vwadd(u) | vwadd(u)_w
/// sub | sub_vl -> vwsub(u) | vwsub(u)_w
/// mul | mul_vl -> vwmul(u) | vwmul_su
/// fadd -> vfwadd | vfwadd_w
@@ -13583,6 +13679,7 @@ struct NodeExtensionHelper {
case RISCVISD::ADD_VL:
case RISCVISD::VWADD_W_VL:
case RISCVISD::VWADDU_W_VL:
+ case ISD::OR:
return RISCVISD::VWADD_VL;
case ISD::SUB:
case RISCVISD::SUB_VL:
@@ -13605,6 +13702,7 @@ struct NodeExtensionHelper {
case RISCVISD::ADD_VL:
case RISCVISD::VWADD_W_VL:
case RISCVISD::VWADDU_W_VL:
+ case ISD::OR:
return RISCVISD::VWADDU_VL;
case ISD::SUB:
case RISCVISD::SUB_VL:
@@ -13650,6 +13748,7 @@ struct NodeExtensionHelper {
switch (Opcode) {
case ISD::ADD:
case RISCVISD::ADD_VL:
+ case ISD::OR:
return SupportsExt == ExtKind::SExt ? RISCVISD::VWADD_W_VL
: RISCVISD::VWADDU_W_VL;
case ISD::SUB:
@@ -13692,12 +13791,8 @@ struct NodeExtensionHelper {
SDValue NarrowElt = OrigOperand.getOperand(0);
MVT NarrowVT = NarrowElt.getSimpleValueType();
-
- unsigned ScalarBits = VT.getScalarSizeInBits();
- unsigned NarrowScalarBits = NarrowVT.getScalarSizeInBits();
-
- // Ensure the extension's semantic is equivalent to rvv vzext or vsext.
- if (ScalarBits != NarrowScalarBits * 2)
+ // i1 types are legal but we can't select V{S,Z}EXT_VLs with them.
+ if (NarrowVT.getVectorElementType() == MVT::i1)
break;
SupportsZExt = Opc == ISD::ZERO_EXTEND;
@@ -13774,6 +13869,10 @@ struct NodeExtensionHelper {
case ISD::MUL: {
return Root->getValueType(0).isScalableVector();
}
+ case ISD::OR: {
+ return Root->getValueType(0).isScalableVector() &&
+ Root->getFlags().hasDisjoint();
+ }
// Vector Widening Integer Add/Sub/Mul Instructions
case RISCVISD::ADD_VL:
case RISCVISD::MUL_VL:
@@ -13854,7 +13953,8 @@ struct NodeExtensionHelper {
switch (Root->getOpcode()) {
case ISD::ADD:
case ISD::SUB:
- case ISD::MUL: {
+ case ISD::MUL:
+ case ISD::OR: {
SDLoc DL(Root);
MVT VT = Root->getSimpleValueType(0);
return getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
@@ -13877,6 +13977,7 @@ struct NodeExtensionHelper {
switch (N->getOpcode()) {
case ISD::ADD:
case ISD::MUL:
+ case ISD::OR:
case RISCVISD::ADD_VL:
case RISCVISD::MUL_VL:
case RISCVISD::VWADD_W_VL:
@@ -13943,6 +14044,7 @@ struct CombineResult {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
+ case ISD::OR:
Merge = DAG.getUNDEF(Root->getValueType(0));
break;
}
@@ -14093,6 +14195,7 @@ NodeExtensionHelper::getSupportedFoldings(const SDNode *Root) {
switch (Root->getOpcode()) {
case ISD::ADD:
case ISD::SUB:
+ case ISD::OR:
case RISCVISD::ADD_VL:
case RISCVISD::SUB_VL:
case RISCVISD::FADD_VL:
@@ -14136,9 +14239,9 @@ NodeExtensionHelper::getSupportedFoldings(const SDNode *Root) {
/// Combine a binary operation to its equivalent VW or VW_W form.
/// The supported combines are:
-/// add_vl -> vwadd(u) | vwadd(u)_w
-/// sub_vl -> vwsub(u) | vwsub(u)_w
-/// mul_vl -> vwmul(u) | vwmul_su
+/// add | add_vl | or disjoint -> vwadd(u) | vwadd(u)_w
+/// sub | sub_vl -> vwsub(u) | vwsub(u)_w
+/// mul | mul_vl -> vwmul(u) | vwmul_su
/// fadd_vl -> vfwadd | vfwadd_w
/// fsub_vl -> vfwsub | vfwsub_w
/// fmul_vl -> vfwmul
@@ -15798,14 +15901,24 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
case ISD::AND:
return performANDCombine(N, DCI, Subtarget);
- case ISD::OR:
+ case ISD::OR: {
+ if (SDValue V = combineBinOp_VLToVWBinOp_VL(N, DCI, Subtarget))
+ return V;
return performORCombine(N, DCI, Subtarget);
+ }
case ISD::XOR:
return performXORCombine(N, DAG, Subtarget);
case ISD::MUL:
if (SDValue V = combineBinOp_VLToVWBinOp_VL(N, DCI, Subtarget))
return V;
return performMULCombine(N, DAG);
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SREM:
+ case ISD::UREM:
+ if (SDValue V = combineBinOpOfZExt(N, DAG))
+ return V;
+ break;
case ISD::FADD:
case ISD::UMAX:
case ISD::UMIN:
@@ -17029,6 +17142,23 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
return 1;
}
+bool RISCVTargetLowering::canCreateUndefOrPoisonForTargetNode(
+ SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
+ bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
+
+ // TODO: Add more target nodes.
+ switch (Op.getOpcode()) {
+ case RISCVISD::SELECT_CC:
+ // Integer select_cc cannot create poison.
+ // TODO: What are the FP poison semantics?
+ // TODO: This instruction blocks poison from the unselected operand, can
+ // we do anything with that?
+ return !Op.getValueType().isInteger();
+ }
+ return TargetLowering::canCreateUndefOrPoisonForTargetNode(
+ Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
+}
+
const Constant *
RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
assert(Ld && "Unexpected null LoadSDNode");
@@ -17928,33 +18058,12 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
return false;
}
-static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
- std::optional<unsigned> FirstMaskArgument,
- CCState &State, const RISCVTargetLowering &TLI) {
- const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
- if (RC == &RISCV::VRRegClass) {
- // Assign the first mask argument to V0.
- // This is an interim calling convention and it may be changed in the
- // future.
- if (FirstMaskArgument && ValNo == *FirstMaskArgument)
- return State.AllocateReg(RISCV::V0);
- return State.AllocateReg(ArgVRs);
- }
- if (RC == &RISCV::VRM2RegClass)
- return State.AllocateReg(ArgVRM2s);
- if (RC == &RISCV::VRM4RegClass)
- return State.AllocateReg(ArgVRM4s);
- if (RC == &RISCV::VRM8RegClass)
- return State.AllocateReg(ArgVRM8s);
- llvm_unreachable("Unhandled register class for ValueType");
-}
-
// Implements the RISC-V calling convention. Returns true upon failure.
bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument) {
+ RVVArgDispatcher &RVVDispatcher) {
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
assert(XLen == 32 || XLen == 64);
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
@@ -18123,7 +18232,7 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
else if (ValVT == MVT::f64 && !UseGPRForF64)
Reg = State.AllocateReg(ArgFPR64s);
else if (ValVT.isVector()) {
- Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
+ Reg = RVVDispatcher.getNextPhysReg();
if (!Reg) {
// For return values, the vector must be passed fully via registers or
// via the stack.
@@ -18209,9 +18318,13 @@ void RISCVTargetLowering::analyzeInputArgs(
unsigned NumArgs = Ins.size();
FunctionType *FType = MF.getFunction().getFunctionType();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Ins);
+ SmallVector<Type *, 4> TypeList;
+ if (IsRet)
+ TypeList.push_back(MF.getFunction().getReturnType());
+ else
+ for (const Argument &Arg : MF.getFunction().args())
+ TypeList.push_back(Arg.getType());
+ RVVArgDispatcher Dispatcher{&MF, this, TypeList};
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Ins[i].VT;
@@ -18226,7 +18339,7 @@ void RISCVTargetLowering::analyzeInputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
- FirstMaskArgument)) {
+ Dispatcher)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
<< ArgVT << '\n');
llvm_unreachable(nullptr);
@@ -18240,9 +18353,13 @@ void RISCVTargetLowering::analyzeOutputArgs(
CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
unsigned NumArgs = Outs.size();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Outs);
+ SmallVector<Type *, 4> TypeList;
+ if (IsRet)
+ TypeList.push_back(MF.getFunction().getReturnType());
+ else if (CLI)
+ for (const TargetLowering::ArgListEntry &Arg : CLI->getArgs())
+ TypeList.push_back(Arg.Ty);
+ RVVArgDispatcher Dispatcher{&MF, this, TypeList};
for (unsigned i = 0; i != NumArgs; i++) {
MVT ArgVT = Outs[i].VT;
@@ -18252,7 +18369,7 @@ void RISCVTargetLowering::analyzeOutputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
- FirstMaskArgument)) {
+ Dispatcher)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
<< ArgVT << "\n");
llvm_unreachable(nullptr);
@@ -18433,7 +18550,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument) {
+ RVVArgDispatcher &RVVDispatcher) {
if (LocVT == MVT::i32 || LocVT == MVT::i64) {
if (unsigned Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -18511,13 +18628,14 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
}
if (LocVT.isVector()) {
- if (unsigned Reg =
- allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
+ MCPhysReg AllocatedVReg = RVVDispatcher.getNextPhysReg();
+ if (AllocatedVReg) {
// Fixed-length vectors are located in the corresponding scalable-vector
// container types.
if (ValVT.isFixedLengthVector())
LocVT = TLI.getContainerForFixedLengthVector(LocVT);
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ State.addLoc(
+ CCValAssign::getReg(ValNo, ValVT, AllocatedVReg, LocVT, LocInfo));
} else {
// Try and pass the address via a "fast" GPR.
if (unsigned GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
@@ -18619,6 +18737,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
case CallingConv::Fast:
case CallingConv::SPIR_KERNEL:
case CallingConv::GRAAL:
+ case CallingConv::RISCV_VectorCall:
break;
case CallingConv::GHC:
if (Subtarget.isRVE())
@@ -19144,17 +19263,15 @@ bool RISCVTargetLowering::CanLowerReturn(
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Outs);
+ RVVArgDispatcher Dispatcher{&MF, this, MF.getFunction().getReturnType()};
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
- *this, FirstMaskArgument))
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true,
+ nullptr, *this, Dispatcher))
return false;
}
return true;
@@ -20951,6 +21068,121 @@ unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const {
return Subtarget.getMinimumJumpTableEntries();
}
+void RVVArgDispatcher::constructArgInfos(ArrayRef<Type *> TypeList) {
+ const DataLayout &DL = MF->getDataLayout();
+ const Function &F = MF->getFunction();
+ LLVMContext &Context = F.getContext();
+
+ bool FirstVMaskAssigned = false;
+ for (Type *Ty : TypeList) {
+ StructType *STy = dyn_cast<StructType>(Ty);
+ if (STy && STy->containsHomogeneousScalableVectorTypes()) {
+ Type *ElemTy = STy->getTypeAtIndex(0U);
+ EVT VT = TLI->getValueType(DL, ElemTy);
+ MVT RegisterVT =
+ TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT);
+
+ RVVArgInfos.push_back({STy->getNumElements(), RegisterVT, false});
+ } else {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(*TLI, DL, Ty, ValueVTs);
+
+ for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
+ ++Value) {
+ EVT VT = ValueVTs[Value];
+ MVT RegisterVT =
+ TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT);
+ unsigned NumRegs =
+ TLI->getNumRegistersForCallingConv(Context, F.getCallingConv(), VT);
+
+ // Skip non-RVV register type
+ if (!RegisterVT.isVector())
+ continue;
+
+ if (RegisterVT.isFixedLengthVector())
+ RegisterVT = TLI->getContainerForFixedLengthVector(RegisterVT);
+
+ if (!FirstVMaskAssigned &&
+ RegisterVT.getVectorElementType() == MVT::i1) {
+ RVVArgInfos.push_back({1, RegisterVT, true});
+ FirstVMaskAssigned = true;
+ } else {
+ RVVArgInfos.push_back({1, RegisterVT, false});
+ }
+
+ RVVArgInfos.insert(RVVArgInfos.end(), --NumRegs,
+ {1, RegisterVT, false});
+ }
+ }
+ }
+}
+
+void RVVArgDispatcher::allocatePhysReg(unsigned NF, unsigned LMul,
+ unsigned StartReg) {
+ assert((StartReg % LMul) == 0 &&
+ "Start register number should be multiple of lmul");
+ const MCPhysReg *VRArrays;
+ switch (LMul) {
+ default:
+ report_fatal_error("Invalid lmul");
+ case 1:
+ VRArrays = ArgVRs;
+ break;
+ case 2:
+ VRArrays = ArgVRM2s;
+ break;
+ case 4:
+ VRArrays = ArgVRM4s;
+ break;
+ case 8:
+ VRArrays = ArgVRM8s;
+ break;
+ }
+
+ for (unsigned i = 0; i < NF; ++i)
+ if (StartReg)
+ AllocatedPhysRegs.push_back(VRArrays[(StartReg - 8) / LMul + i]);
+ else
+ AllocatedPhysRegs.push_back(MCPhysReg());
+}
+
+/// This function determines if each RVV argument is passed by register, if the
+/// argument can be assigned to a VR, then give it a specific register.
+/// Otherwise, assign the argument to 0 which is a invalid MCPhysReg.
+void RVVArgDispatcher::compute() {
+ uint32_t AssignedMap = 0;
+ auto allocate = [&](const RVVArgInfo &ArgInfo) {
+ // Allocate first vector mask argument to V0.
+ if (ArgInfo.FirstVMask) {
+ AllocatedPhysRegs.push_back(RISCV::V0);
+ return;
+ }
+
+ unsigned RegsNeeded = divideCeil(
+ ArgInfo.VT.getSizeInBits().getKnownMinValue(), RISCV::RVVBitsPerBlock);
+ unsigned TotalRegsNeeded = ArgInfo.NF * RegsNeeded;
+ for (unsigned StartReg = 0; StartReg + TotalRegsNeeded <= NumArgVRs;
+ StartReg += RegsNeeded) {
+ uint32_t Map = ((1 << TotalRegsNeeded) - 1) << StartReg;
+ if ((AssignedMap & Map) == 0) {
+ allocatePhysReg(ArgInfo.NF, RegsNeeded, StartReg + 8);
+ AssignedMap |= Map;
+ return;
+ }
+ }
+
+ allocatePhysReg(ArgInfo.NF, RegsNeeded, 0);
+ };
+
+ for (unsigned i = 0; i < RVVArgInfos.size(); ++i)
+ allocate(RVVArgInfos[i]);
+}
+
+MCPhysReg RVVArgDispatcher::getNextPhysReg() {
+ assert(CurIdx < AllocatedPhysRegs.size() && "Index out of range");
+ return AllocatedPhysRegs[CurIdx++];
+}
+
namespace llvm::RISCVVIntrinsicsTable {
#define GET_RISCVVIntrinsicsTable_IMPL
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index f90cb4df6047..c28552354bf4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -24,6 +24,7 @@ namespace llvm {
class InstructionCost;
class RISCVSubtarget;
struct RISCVRegisterInfo;
+class RVVArgDispatcher;
namespace RISCVISD {
// clang-format off
@@ -585,6 +586,12 @@ public:
const SelectionDAG &DAG,
unsigned Depth) const override;
+ bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ bool PoisonOnly, bool ConsiderFlags,
+ unsigned Depth) const override;
+
const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
// This method returns the name of a target specific DAG node.
@@ -869,7 +876,7 @@ public:
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
private:
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
@@ -994,7 +1001,7 @@ private:
/// RISC-V doesn't have flags so it's better to perform the and/or in a GPR.
bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
return false;
- };
+ }
/// For available scheduling models FDIV + two independent FMULs are much
/// faster than two FDIVs.
@@ -1009,19 +1016,68 @@ private:
unsigned getMinimumJumpTableEntries() const override;
};
+/// As per the spec, the rules for passing vector arguments are as follows:
+///
+/// 1. For the first vector mask argument, use v0 to pass it.
+/// 2. For vector data arguments or rest vector mask arguments, starting from
+/// the v8 register, if a vector register group between v8-v23 that has not been
+/// allocated can be found and the first register number is a multiple of LMUL,
+/// then allocate this vector register group to the argument and mark these
+/// registers as allocated. Otherwise, pass it by reference and are replaced in
+/// the argument list with the address.
+/// 3. For tuple vector data arguments, starting from the v8 register, if
+/// NFIELDS consecutive vector register groups between v8-v23 that have not been
+/// allocated can be found and the first register number is a multiple of LMUL,
+/// then allocate these vector register groups to the argument and mark these
+/// registers as allocated. Otherwise, pass it by reference and are replaced in
+/// the argument list with the address.
+class RVVArgDispatcher {
+public:
+ static constexpr unsigned NumArgVRs = 16;
+
+ struct RVVArgInfo {
+ unsigned NF;
+ MVT VT;
+ bool FirstVMask = false;
+ };
+
+ RVVArgDispatcher(const MachineFunction *MF, const RISCVTargetLowering *TLI,
+ ArrayRef<Type *> TypeList)
+ : MF(MF), TLI(TLI) {
+ constructArgInfos(TypeList);
+ compute();
+ }
+
+ MCPhysReg getNextPhysReg();
+
+private:
+ SmallVector<RVVArgInfo, 4> RVVArgInfos;
+ SmallVector<MCPhysReg, 4> AllocatedPhysRegs;
+
+ const MachineFunction *MF = nullptr;
+ const RISCVTargetLowering *TLI = nullptr;
+
+ unsigned CurIdx = 0;
+
+ void constructArgInfos(ArrayRef<Type *> TypeList);
+ void compute();
+ void allocatePhysReg(unsigned NF = 1, unsigned LMul = 1,
+ unsigned StartReg = 0);
+};
+
namespace RISCV {
bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index f56f49ae2457..52c794446af0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -109,6 +109,8 @@ def Vrgather : RISCVVConstraint<!or(VS2Constraint.Value,
VMConstraint.Value)>;
def Vcompress : RISCVVConstraint<!or(VS2Constraint.Value,
VS1Constraint.Value)>;
+def Sha2Constraint : RISCVVConstraint<!or(VS2Constraint.Value,
+ VS1Constraint.Value)>;
// The following opcode names match those given in Table 19.1 in the
// RISC-V User-level ISA specification ("RISC-V base opcode map").
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index ede8c9809833..54e22d625781 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -24,3 +24,11 @@ def G_FCLASS : RISCVGenericInstruction {
let hasSideEffects = false;
}
def : GINodeEquiv<G_FCLASS, riscv_fclass>;
+
+// Pseudo equivalent to a RISCVISD::READ_VLENB.
+def G_READ_VLENB : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_READ_VLENB, riscv_read_vlenb>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 4ca300f9151e..5582de51b17d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -299,36 +299,36 @@ void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, MCRegister DstReg,
MCRegister SrcReg, bool KillSrc,
- unsigned Opc, unsigned NF) const {
+ RISCVII::VLMUL LMul, unsigned NF) const {
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
- RISCVII::VLMUL LMul;
+ unsigned Opc;
unsigned SubRegIdx;
unsigned VVOpc, VIOpc;
- switch (Opc) {
+ switch (LMul) {
default:
llvm_unreachable("Impossible LMUL for vector register copy.");
- case RISCV::VMV1R_V:
- LMul = RISCVII::LMUL_1;
+ case RISCVII::LMUL_1:
+ Opc = RISCV::VMV1R_V;
SubRegIdx = RISCV::sub_vrm1_0;
VVOpc = RISCV::PseudoVMV_V_V_M1;
VIOpc = RISCV::PseudoVMV_V_I_M1;
break;
- case RISCV::VMV2R_V:
- LMul = RISCVII::LMUL_2;
+ case RISCVII::LMUL_2:
+ Opc = RISCV::VMV2R_V;
SubRegIdx = RISCV::sub_vrm2_0;
VVOpc = RISCV::PseudoVMV_V_V_M2;
VIOpc = RISCV::PseudoVMV_V_I_M2;
break;
- case RISCV::VMV4R_V:
- LMul = RISCVII::LMUL_4;
+ case RISCVII::LMUL_4:
+ Opc = RISCV::VMV4R_V;
SubRegIdx = RISCV::sub_vrm4_0;
VVOpc = RISCV::PseudoVMV_V_V_M4;
VIOpc = RISCV::PseudoVMV_V_I_M4;
break;
- case RISCV::VMV8R_V:
+ case RISCVII::LMUL_8:
assert(NF == 1);
- LMul = RISCVII::LMUL_8;
+ Opc = RISCV::VMV8R_V;
SubRegIdx = RISCV::sub_vrm1_0; // There is no sub_vrm8_0.
VVOpc = RISCV::PseudoVMV_V_V_M8;
VIOpc = RISCV::PseudoVMV_V_I_M8;
@@ -505,87 +505,87 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// VR->VR copies.
if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V);
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1);
return;
}
if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V);
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_2);
return;
}
if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV4R_V);
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_4);
return;
}
if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV8R_V);
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_8);
return;
}
if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/2);
return;
}
if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_2,
/*NF=*/2);
return;
}
if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV4R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_4,
/*NF=*/2);
return;
}
if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/3);
return;
}
if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_2,
/*NF=*/3);
return;
}
if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/4);
return;
}
if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_2,
/*NF=*/4);
return;
}
if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/5);
return;
}
if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/6);
return;
}
if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/7);
return;
}
if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
- copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
+ copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCVII::LMUL_1,
/*NF=*/8);
return;
}
@@ -2050,6 +2050,9 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
case RISCVOp::OPERAND_RVKRNUM_2_14:
Ok = Imm >= 2 && Imm <= 14;
break;
+ case RISCVOp::OPERAND_SPIMM:
+ Ok = (Imm & 0xf) == 0;
+ break;
}
if (!Ok) {
ErrInfo = "Invalid immediate";
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 8a312ee5e779..dd049fca0597 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -69,7 +69,7 @@ public:
void copyPhysRegVector(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
- unsigned Opc, unsigned NF = 1) const;
+ RISCVII::VLMUL LMul, unsigned NF = 1) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg,
bool KillSrc) const override;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 966cdc433d0f..fd8777fdc121 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1259,6 +1259,10 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
}]>;
def : PatGprSimm12<or_is_add, ADDI>;
+def add_like : PatFrags<(ops node:$lhs, node:$rhs),
+ [(or_is_add node:$lhs, node:$rhs),
+ (add node:$lhs, node:$rhs)]>;
+
// negate of low bit can be done via two (compressible) shifts. The negate
// is never compressible since rs1 and rd can't be the same register.
def : Pat<(XLenVT (sub 0, (and_oneuse GPR:$rs, 1))),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index d7807c120378..e68fb42ece9f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -157,10 +157,8 @@ class SchedBinaryMC<string write, string read0, string read1,
// For instructions with three operands.
class SchedTernary<string write, string read0, string read1, string read2,
- string mx, int sew = 0, bit forceMasked = 0,
- bit forceMergeOpRead = 0>
- : SchedNary<write, [read0, read1, read2], mx, sew, forceMasked,
- forceMergeOpRead>;
+ string mx, int sew = 0, bit forceMasked = 0>
+ : SchedNary<write, [read0, read1, read2], mx, sew, forceMasked>;
class SchedTernaryMC<string write, string read0, string read1, string read2,
int sew = 0, bit forceMasked = 1>:
SchedNary<write, [read0, read1, read2], "WorstCase", sew, forceMasked>;
@@ -533,14 +531,6 @@ class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
: RVInstV<funct6, vs1, opv, (outs VR:$vd),
(ins VR:$vs2, VMaskOp:$vm),
opcodestr, "$vd, $vs2$vm">;
-
-// op vd, vs2 (use vs1 as instruction encoding)
-class VALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
- : RVInstV<funct6, vs1, opv, (outs VR:$vd),
- (ins VR:$vs2), opcodestr,
- "$vd, $vs2"> {
- let vm = 1;
-}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
//===----------------------------------------------------------------------===//
@@ -577,7 +567,7 @@ multiclass VALU_IV_X<string opcodestr, bits<6> funct6> {
}
multiclass VALU_IV_I<string opcodestr, bits<6> funct6> {
- def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
+ def I : VALUVI<funct6, opcodestr # ".vi">,
SchedUnaryMC<"WriteVIALUI", "ReadVIALUV">;
}
@@ -663,7 +653,7 @@ multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6>
: VALUNoVm_IV_V_X<opcodestr, funct6> {
- def I : VALUVINoVm<funct6, opcodestr # ".vi", simm5>,
+ def I : VALUVINoVm<funct6, opcodestr # ".vi">,
SchedUnaryMC<"WriteVICALUI", "ReadVICALUV", forceMasked=0>;
}
@@ -902,7 +892,7 @@ multiclass VCMP_IV_X<string opcodestr, bits<6> funct6> {
}
multiclass VCMP_IV_I<string opcodestr, bits<6> funct6> {
- def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
+ def I : VALUVI<funct6, opcodestr # ".vi">,
SchedUnaryMC<"WriteVICmpI", "ReadVICmpV">;
}
@@ -949,7 +939,7 @@ multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6> {
multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6>
: VSALU_IV_V_X<opcodestr, funct6> {
- def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
+ def I : VALUVI<funct6, opcodestr # ".vi">,
SchedUnaryMC<"WriteVSALUI", "ReadVSALUV">;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 8be4c7741ca1..8cdaa7f2e5ea 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2133,19 +2133,6 @@ multiclass VPseudoBinary<VReg RetClass,
}
}
-multiclass VPseudoBinaryNoMask<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- LMULInfo MInfo,
- string Constraint = "",
- int sew = 0> {
- let VLMul = MInfo.value, SEW=sew in {
- defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
- def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
- Constraint>;
- }
-}
-
multiclass VPseudoBinaryRoundingMode<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@@ -2212,7 +2199,8 @@ multiclass VPseudoTiedBinary<VReg RetClass,
def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
Constraint, TargetConstraintType>;
def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
- Constraint, TargetConstraintType>;
+ Constraint, TargetConstraintType>,
+ RISCVMaskedPseudo<MaskIdx=2>;
}
}
@@ -2225,7 +2213,8 @@ multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
def "_" # MInfo.MX # "_TIED":
VPseudoTiedBinaryNoMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
def "_" # MInfo.MX # "_MASK_TIED" :
- VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>;
+ VPseudoTiedBinaryMaskRoundingMode<RetClass, Op2Class, Constraint, TargetConstraintType>,
+ RISCVMaskedPseudo<MaskIdx=2>;
}
}
@@ -3372,22 +3361,6 @@ multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
}
}
-multiclass VPseudoVMAC_VV_VF_AAXA<string Constraint = ""> {
- foreach m = MxListF in {
- defm "" : VPseudoTernaryV_VV_AAXA<m, Constraint>,
- SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
- "ReadVFMulAddV", m.MX>;
- }
-
- foreach f = FPList in {
- foreach m = f.MxList in {
- defm "" : VPseudoTernaryV_VF_AAXA<m, f, Constraint>,
- SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
- "ReadVFMulAddV", m.MX>;
- }
- }
-}
-
multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
foreach m = MxListF in {
defm "" : VPseudoTernaryV_VV_AAXA_RM<m, Constraint>,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index a882b208a768..434b071e628a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -255,7 +255,7 @@ class binop_with_non_imm12<SDPatternOperator binop>
}];
}
def add_non_imm12 : binop_with_non_imm12<add>;
-def or_is_add_non_imm12 : binop_with_non_imm12<or_is_add>;
+def add_like_non_imm12 : binop_with_non_imm12<add_like>;
def Shifted32OnesMask : IntImmLeaf<XLenVT, [{
if (!Imm.isShiftedMask())
@@ -676,38 +676,38 @@ let Predicates = [HasStdExtZba] in {
foreach i = {1,2,3} in {
defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
- def : Pat<(XLenVT (add_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
+ def : Pat<(XLenVT (add_like_non_imm12 (shl GPR:$rs1, (XLenVT i)), GPR:$rs2)),
(shxadd GPR:$rs1, GPR:$rs2)>;
defvar pat = !cast<ComplexPattern>("sh"#i#"add_op");
// More complex cases use a ComplexPattern.
- def : Pat<(XLenVT (add_non_imm12 pat:$rs1, GPR:$rs2)),
+ def : Pat<(XLenVT (add_like_non_imm12 pat:$rs1, GPR:$rs2)),
(shxadd pat:$rs1, GPR:$rs2)>;
}
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
(SH1ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
(SH1ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
(SH1ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
(SH2ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
(SH2ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
(SH2ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
(SH3ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
(SH3ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
(SH3ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
(SH2ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i))),
GPR:$r)>;
-def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
(SH3ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i))),
GPR:$r)>;
@@ -756,46 +756,43 @@ def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
(SLLI_UW (XLenVT (SRLI GPR:$rs1, Shifted32OnesMask:$mask)),
Shifted32OnesMask:$mask)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
(ADD_UW GPR:$rs1, GPR:$rs2)>;
def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
-def : Pat<(i64 (or_is_add_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
- (ADD_UW GPR:$rs1, GPR:$rs2)>;
-
foreach i = {1,2,3} in {
defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
- def : Pat<(i64 (add_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
+ def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
(shxadd_uw GPR:$rs1, GPR:$rs2)>;
}
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
(SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
(SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
(SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
// More complex cases use a ComplexPattern.
foreach i = {1,2,3} in {
defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
- def : Pat<(i64 (add_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
+ def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
(!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
}
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
(SH1ADD (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
(SH2ADD (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
(SH3ADD (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
(SH1ADD_UW (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
(SH2ADD_UW (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
(SH3ADD_UW (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C3LeftShiftUW:$i)),
@@ -876,16 +873,13 @@ let Predicates = [HasStdExtZba, IsRV64] in {
def : Pat<(shl (i64 (zext i32:$rs1)), uimm5:$shamt),
(SLLI_UW GPR:$rs1, uimm5:$shamt)>;
-def : Pat<(i64 (add_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
+def : Pat<(i64 (add_like_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
(ADD_UW GPR:$rs1, GPR:$rs2)>;
def : Pat<(zext GPR:$src), (ADD_UW GPR:$src, (XLenVT X0))>;
-def : Pat<(i64 (or_is_add_non_imm12 (zext GPR:$rs1), GPR:$rs2)),
- (ADD_UW GPR:$rs1, GPR:$rs2)>;
-
foreach i = {1,2,3} in {
defvar shxadd = !cast<Instruction>("SH"#i#"ADD");
- def : Pat<(i32 (add_non_imm12 (shl GPR:$rs1, (i64 i)), GPR:$rs2)),
+ def : Pat<(i32 (add_like_non_imm12 (shl GPR:$rs1, (i64 i)), GPR:$rs2)),
(shxadd GPR:$rs1, GPR:$rs2)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
index 2c8451c5c4ce..2a4448d7881f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
@@ -41,10 +41,20 @@ def RlistAsmOperand : AsmOperandClass {
let DiagnosticType = "InvalidRlist";
}
-def SpimmAsmOperand : AsmOperandClass {
- let Name = "Spimm";
- let ParserMethod = "parseZcmpSpimm";
- let DiagnosticType = "InvalidSpimm";
+def StackAdjAsmOperand : AsmOperandClass {
+ let Name = "StackAdj";
+ let ParserMethod = "parseZcmpStackAdj";
+ let DiagnosticType = "InvalidStackAdj";
+ let PredicateMethod = "isSpimm";
+ let RenderMethod = "addSpimmOperands";
+}
+
+def NegStackAdjAsmOperand : AsmOperandClass {
+ let Name = "NegStackAdj";
+ let ParserMethod = "parseZcmpNegStackAdj";
+ let DiagnosticType = "InvalidStackAdj";
+ let PredicateMethod = "isSpimm";
+ let RenderMethod = "addSpimmOperands";
}
def rlist : Operand<OtherVT> {
@@ -59,12 +69,26 @@ def rlist : Operand<OtherVT> {
// 0~3 Reserved for EABI
return isUInt<4>(Imm) && Imm >= 4;
}];
- }
+}
+
+def stackadj : RISCVOp<OtherVT> {
+ let ParserMatchClass = StackAdjAsmOperand;
+ let PrintMethod = "printStackAdj";
+ let DecoderMethod = "decodeZcmpSpimm";
+ let OperandType = "OPERAND_SPIMM";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<2, 4>(Imm);
+ }];
+}
-def spimm : Operand<OtherVT> {
- let ParserMatchClass = SpimmAsmOperand;
- let PrintMethod = "printSpimm";
+def negstackadj : RISCVOp<OtherVT> {
+ let ParserMatchClass = NegStackAdjAsmOperand;
+ let PrintMethod = "printNegStackAdj";
let DecoderMethod = "decodeZcmpSpimm";
+ let OperandType = "OPERAND_SPIMM";
let MCOperandPredicate = [{
int64_t Imm;
if (!MCOp.evaluateAsConstantImm(Imm))
@@ -124,14 +148,15 @@ class RVZcArith_r<bits<5> funct5, string OpcodeStr> :
let Constraints = "$rd = $rd_wb";
}
-class RVInstZcCPPP<bits<5> funct5, string opcodestr>
- : RVInst16<(outs), (ins rlist:$rlist, spimm:$spimm),
- opcodestr, "$rlist, $spimm", [], InstFormatOther> {
+class RVInstZcCPPP<bits<5> funct5, string opcodestr,
+ DAGOperand immtype = stackadj>
+ : RVInst16<(outs), (ins rlist:$rlist, immtype:$stackadj),
+ opcodestr, "$rlist, $stackadj", [], InstFormatOther> {
bits<4> rlist;
- bits<16> spimm;
+ bits<16> stackadj;
let Inst{1-0} = 0b10;
- let Inst{3-2} = spimm{5-4};
+ let Inst{3-2} = stackadj{5-4};
let Inst{7-4} = rlist;
let Inst{12-8} = funct5;
let Inst{15-13} = 0b101;
@@ -195,7 +220,7 @@ def CM_MVSA01 : RVInst16CA<0b101011, 0b01, 0b10, (outs SR07:$rs1, SR07:$rs2),
let DecoderNamespace = "RVZcmp", Predicates = [HasStdExtZcmp] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 1, Uses = [X2], Defs = [X2] in
-def CM_PUSH : RVInstZcCPPP<0b11000, "cm.push">,
+def CM_PUSH : RVInstZcCPPP<0b11000, "cm.push", negstackadj>,
Sched<[WriteIALU, ReadIALU, ReadStoreData, ReadStoreData,
ReadStoreData, ReadStoreData, ReadStoreData, ReadStoreData,
ReadStoreData, ReadStoreData, ReadStoreData, ReadStoreData,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index b4bd074b7101..e66b061c760a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -67,31 +67,51 @@ class PALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
let Inst{6-0} = OPC_OP_P.Value;
}
-// op vd, vs2, imm, vm
-class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
+// op vd, vs2, vs1
+class PALUVVNoVmTernary<bits<6> funct6, RISCVVFormat opv, string opcodestr>
+ : RVInstVV<funct6, opv, (outs VR:$vd_wb),
+ (ins VR:$vd, VR:$vs2, VR:$vs1),
+ opcodestr, "$vd, $vs2, $vs1"> {
+ let Constraints = "$vd = $vd_wb";
+ let vm = 1;
+ let Inst{6-0} = OPC_OP_P.Value;
+}
+
+// op vd, vs2, imm
+class PALUVINoVm<bits<6> funct6, string opcodestr, Operand optype>
: VALUVINoVm<funct6, opcodestr, optype> {
let Inst{6-0} = OPC_OP_P.Value;
let Inst{14-12} = OPMVV.Value;
}
-// op vd, vs2 (use vs1 as instruction encoding)
-class PALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
- : VALUVs2NoVm<funct6, vs1, opv, opcodestr> {
+// op vd, vs2, imm where vd is also a source regardless of tail policy
+class PALUVINoVmBinary<bits<6> funct6, string opcodestr, Operand optype>
+ : RVInstIVI<funct6, (outs VR:$vd_wb),
+ (ins VR:$vd, VR:$vs2, optype:$imm),
+ opcodestr, "$vd, $vs2, $imm"> {
+ let Constraints = "$vd = $vd_wb";
+ let vm = 1;
let Inst{6-0} = OPC_OP_P.Value;
+ let Inst{14-12} = OPMVV.Value;
}
-multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1,
- RISCVVFormat opv, string opcodestr> {
- def NAME # _VV : PALUVs2NoVm<funct6_vv, vs1, opv, opcodestr # ".vv">;
- def NAME # _VS : PALUVs2NoVm<funct6_vs, vs1, opv, opcodestr # ".vs">;
+// op vd, vs2 (use vs1 as instruction encoding) where vd is also a source
+// regardless of tail policy
+class PALUVs2NoVmBinary<bits<6> funct6, bits<5> vs1, RISCVVFormat opv,
+ string opcodestr>
+ : RVInstV<funct6, vs1, opv, (outs VR:$vd_wb), (ins VR:$vd, VR:$vs2),
+ opcodestr, "$vd, $vs2"> {
+ let Constraints = "$vd = $vd_wb";
+ let vm = 1;
+ let Inst{6-0} = OPC_OP_P.Value;
}
-// vaeskf1.vi and vaeskf2.vi uses different opcode and format, we need
-// to customize one for them.
-class VAESKF_MV_I<bits<6> funct6, string opcodestr, Operand optype>
- : VALUVINoVm<funct6, opcodestr, optype> {
- let Inst{6-0} = OPC_OP_P.Value;
- let Inst{14-12} = OPMVV.Value;
+multiclass VAES_MV_V_S<bits<6> funct6_vv, bits<6> funct6_vs, bits<5> vs1,
+ RISCVVFormat opv, string opcodestr> {
+ let RVVConstraint = NoConstraint in
+ def NAME # _VV : PALUVs2NoVmBinary<funct6_vv, vs1, opv, opcodestr # ".vv">;
+ let RVVConstraint = VS2Constraint in
+ def NAME # _VS : PALUVs2NoVmBinary<funct6_vs, vs1, opv, opcodestr # ".vs">;
}
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
@@ -122,33 +142,35 @@ let Predicates = [HasStdExtZvkb] in {
} // Predicates = [HasStdExtZvkb]
let Predicates = [HasStdExtZvkg], RVVConstraint = NoConstraint in {
- def VGHSH_VV : PALUVVNoVm<0b101100, OPMVV, "vghsh.vv">;
- def VGMUL_VV : PALUVs2NoVm<0b101000, 0b10001, OPMVV, "vgmul.vv">;
+ def VGHSH_VV : PALUVVNoVmTernary<0b101100, OPMVV, "vghsh.vv">;
+ def VGMUL_VV : PALUVs2NoVmBinary<0b101000, 0b10001, OPMVV, "vgmul.vv">;
} // Predicates = [HasStdExtZvkg]
-let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = NoConstraint in {
- def VSHA2CH_VV : PALUVVNoVm<0b101110, OPMVV, "vsha2ch.vv">;
- def VSHA2CL_VV : PALUVVNoVm<0b101111, OPMVV, "vsha2cl.vv">;
- def VSHA2MS_VV : PALUVVNoVm<0b101101, OPMVV, "vsha2ms.vv">;
+let Predicates = [HasStdExtZvknhaOrZvknhb], RVVConstraint = Sha2Constraint in {
+ def VSHA2CH_VV : PALUVVNoVmTernary<0b101110, OPMVV, "vsha2ch.vv">;
+ def VSHA2CL_VV : PALUVVNoVmTernary<0b101111, OPMVV, "vsha2cl.vv">;
+ def VSHA2MS_VV : PALUVVNoVmTernary<0b101101, OPMVV, "vsha2ms.vv">;
} // Predicates = [HasStdExtZvknhaOrZvknhb]
-let Predicates = [HasStdExtZvkned], RVVConstraint = NoConstraint in {
+let Predicates = [HasStdExtZvkned]in {
defm VAESDF : VAES_MV_V_S<0b101000, 0b101001, 0b00001, OPMVV, "vaesdf">;
defm VAESDM : VAES_MV_V_S<0b101000, 0b101001, 0b00000, OPMVV, "vaesdm">;
defm VAESEF : VAES_MV_V_S<0b101000, 0b101001, 0b00011, OPMVV, "vaesef">;
defm VAESEM : VAES_MV_V_S<0b101000, 0b101001, 0b00010, OPMVV, "vaesem">;
- def VAESKF1_VI : VAESKF_MV_I<0b100010, "vaeskf1.vi", uimm5>;
- def VAESKF2_VI : VAESKF_MV_I<0b101010, "vaeskf2.vi", uimm5>;
- def VAESZ_VS : PALUVs2NoVm<0b101001, 0b00111, OPMVV, "vaesz.vs">;
+ def VAESKF1_VI : PALUVINoVm<0b100010, "vaeskf1.vi", uimm5>;
+ def VAESKF2_VI : PALUVINoVmBinary<0b101010, "vaeskf2.vi", uimm5>;
+ let RVVConstraint = VS2Constraint in
+ def VAESZ_VS : PALUVs2NoVmBinary<0b101001, 0b00111, OPMVV, "vaesz.vs">;
} // Predicates = [HasStdExtZvkned]
-let Predicates = [HasStdExtZvksed], RVVConstraint = NoConstraint in {
+let Predicates = [HasStdExtZvksed] in {
+ let RVVConstraint = NoConstraint in
def VSM4K_VI : PALUVINoVm<0b100001, "vsm4k.vi", uimm5>;
defm VSM4R : VAES_MV_V_S<0b101000, 0b101001, 0b10000, OPMVV, "vsm4r">;
} // Predicates = [HasStdExtZvksed]
-let Predicates = [HasStdExtZvksh], RVVConstraint = NoConstraint in {
- def VSM3C_VI : PALUVINoVm<0b101011, "vsm3c.vi", uimm5>;
+let Predicates = [HasStdExtZvksh], RVVConstraint = VS2Constraint in {
+ def VSM3C_VI : PALUVINoVmBinary<0b101011, "vsm3c.vi", uimm5>;
def VSM3ME_VV : PALUVVNoVm<0b100000, OPMVV, "vsm3me.vv">;
} // Predicates = [HasStdExtZvksh]
@@ -180,131 +202,135 @@ class ZvkMxSet<string vd_lmul> {
!eq(vd_lmul, "MF8") : [V_MF8]);
}
-class VPseudoUnaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
- Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
+class VPseudoBinaryNoMask_Zvk<DAGOperand RetClass, VReg OpClass> :
+ Pseudo<(outs RetClass:$rd_wb),
+ (ins RetClass:$rd, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = "$rd_wb = $rd";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoBinaryNoMask_Zvk<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- string Constraint> :
- Pseudo<(outs RetClass:$rd),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
+class VPseudoTernaryNoMask_Zvk<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class> :
+ Pseudo<(outs RetClass:$rd_wb),
+ (ins RetClass:$rd, Op1Class:$rs2, Op2Class:$rs1,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = "$rd_wb = $rd";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-multiclass VPseudoBinaryNoMask_Zvk<VReg RetClass,
+multiclass VPseudoBinaryNoMaskTU_Zvk<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ LMULInfo MInfo,
+ string Constraint = "",
+ int sew = 0> {
+ let VLMul = MInfo.value, SEW=sew in {
+ defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
+ def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
+ Constraint>;
+ }
+}
+
+multiclass VPseudoTernaryNoMask_Zvk<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
- LMULInfo MInfo,
- string Constraint = ""> {
+ LMULInfo MInfo> {
let VLMul = MInfo.value in
- def "_" # MInfo.MX : VPseudoBinaryNoMask_Zvk<RetClass, Op1Class, Op2Class,
- Constraint>;
+ def "_" # MInfo.MX : VPseudoTernaryNoMask_Zvk<RetClass, Op1Class, Op2Class>;
}
-multiclass VPseudoUnaryV_V_NoMask_Zvk<LMULInfo m, string Constraint = ""> {
+multiclass VPseudoBinaryV_V_NoMask_Zvk<LMULInfo m> {
let VLMul = m.value in {
- def "_VV_" # m.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, m.vrclass, Constraint>;
+ def "_VV_" # m.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass>;
}
}
-multiclass VPseudoUnaryV_S_NoMask_Zvk<LMULInfo m, string Constraint = ""> {
+multiclass VPseudoBinaryV_S_NoMask_Zvk<LMULInfo m> {
let VLMul = m.value in
foreach vs2_lmul = ZvkMxSet<m.MX>.vs2_lmuls in
- def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoUnaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass, Constraint>;
+ def "_VS_" # m.MX # "_" # vs2_lmul.MX : VPseudoBinaryNoMask_Zvk<m.vrclass, vs2_lmul.vrclass>;
}
-multiclass VPseudoVALU_V_NoMask_Zvk<string Constraint = ""> {
+multiclass VPseudoVALU_V_NoMask_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm "" : VPseudoUnaryV_V_NoMask_Zvk<m, Constraint>,
+ defm "" : VPseudoBinaryV_V_NoMask_Zvk<m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
-multiclass VPseudoVALU_S_NoMask_Zvk<string Constraint = ""> {
+multiclass VPseudoVALU_S_NoMask_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm "" : VPseudoUnaryV_S_NoMask_Zvk<m, Constraint>,
+ defm "" : VPseudoBinaryV_S_NoMask_Zvk<m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
-multiclass VPseudoVALU_V_S_NoMask_Zvk<string Constraint = ""> {
- defm "" : VPseudoVALU_V_NoMask_Zvk<Constraint>;
- defm "" : VPseudoVALU_S_NoMask_Zvk<Constraint>;
-}
+multiclass VPseudoVALU_V_S_NoMask_Zvk
+ : VPseudoVALU_V_NoMask_Zvk, VPseudoVALU_S_NoMask_Zvk;
-multiclass VPseudoVALU_VV_NoMask_Zvk<string Constraint = ""> {
+multiclass VPseudoVALU_VV_NoMask_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm _VV : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m,
- Constraint>,
+ defm _VV : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
-multiclass VPseudoVALU_VI_NoMask_Zvk<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVALU_VI_NoMask_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm _VI : VPseudoBinaryNoMask_Zvk<m.vrclass, m.vrclass, ImmType, m,
- Constraint>,
+ defm _VI : VPseudoTernaryNoMask_Zvk<m.vrclass, m.vrclass, uimm5, m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
-multiclass VPseudoVALU_VI_NoMaskTU_Zvk<Operand ImmType = uimm5, string Constraint = ""> {
+multiclass VPseudoVALU_VI_NoMaskTU_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm _VI : VPseudoBinaryNoMask<m.vrclass, m.vrclass, ImmType, m,
- Constraint>,
+ defm _VI : VPseudoBinaryNoMaskTU_Zvk<m.vrclass, m.vrclass, uimm5, m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
-multiclass VPseudoVALU_VV_NoMaskTU_Zvk<string Constraint = ""> {
+multiclass VPseudoVALU_VV_NoMaskTU_Zvk {
foreach m = MxListVF4 in {
defvar mx = m.MX;
defvar WriteVIALUV_MX = !cast<SchedWrite>("WriteVIALUV_" # mx);
defvar ReadVIALUV_MX = !cast<SchedRead>("ReadVIALUV_" # mx);
- defm _VV : VPseudoBinaryNoMask<m.vrclass, m.vrclass, m.vrclass, m,
- Constraint>,
+ defm _VV : VPseudoBinaryNoMaskTU_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>;
}
}
@@ -376,7 +402,7 @@ let Predicates = [HasStdExtZvkned] in {
defm PseudoVAESEF : VPseudoVALU_V_S_NoMask_Zvk;
defm PseudoVAESEM : VPseudoVALU_V_S_NoMask_Zvk;
defm PseudoVAESKF1 : VPseudoVALU_VI_NoMaskTU_Zvk;
- defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk<uimm5>;
+ defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk;
defm PseudoVAESZ : VPseudoVALU_S_NoMask_Zvk;
} // Predicates = [HasStdExtZvkned]
@@ -392,7 +418,7 @@ let Predicates = [HasStdExtZvksed] in {
} // Predicates = [HasStdExtZvksed]
let Predicates = [HasStdExtZvksh] in {
- defm PseudoVSM3C : VPseudoVALU_VI_NoMask_Zvk<uimm5>;
+ defm PseudoVSM3C : VPseudoVALU_VI_NoMask_Zvk;
defm PseudoVSM3ME : VPseudoVALU_VV_NoMaskTU_Zvk;
} // Predicates = [HasStdExtZvksh]
@@ -668,44 +694,44 @@ foreach vtiToWti = AllWidenableIntVectors in {
//===----------------------------------------------------------------------===//
class VPatUnaryNoMask_Zvk<string intrinsic_name,
- string inst,
- string kind,
- ValueType result_type,
- ValueType op2_type,
- int sew,
- LMULInfo vlmul,
- VReg result_reg_class,
- VReg op2_reg_class> :
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op2_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$rd),
(op2_type op2_reg_class:$rs2),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$rd),
(op2_type op2_reg_class:$rs2),
GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatUnaryNoMask_VS_Zvk<string intrinsic_name,
- string inst,
- string kind,
- ValueType result_type,
- ValueType op2_type,
- int sew,
- LMULInfo vlmul,
- LMULInfo vs2_lmul,
- VReg result_reg_class,
- VReg op2_reg_class> :
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op2_type,
+ int sew,
+ LMULInfo vlmul,
+ LMULInfo vs2_lmul,
+ VReg result_reg_class,
+ VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$rd),
(op2_type op2_reg_class:$rs2),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_"#vs2_lmul.MX)
- (result_type result_reg_class:$merge),
+ (result_type result_reg_class:$rd),
(op2_type op2_reg_class:$rs2),
GPR:$vl, sew, (XLenVT timm:$policy))>;
multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction,
- list<VTypeInfo> vtilist> {
+ list<VTypeInfo> vtilist> {
foreach vti = vtilist in
def : VPatUnaryNoMask_Zvk<intrinsic # "_vv", instruction, "VV",
vti.Vector, vti.Vector, vti.Log2SEW,
@@ -713,7 +739,7 @@ multiclass VPatUnaryV_V_NoMask_Zvk<string intrinsic, string instruction,
}
multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction,
- list<VTypeInfo> vtilist> {
+ list<VTypeInfo> vtilist> {
foreach vti = vtilist in
foreach vti_vs2 = ZvkI32IntegerVectors<vti.LMul.MX>.vs2_types in
def : VPatUnaryNoMask_VS_Zvk<intrinsic # "_vs", instruction, "VS",
@@ -722,13 +748,13 @@ multiclass VPatUnaryV_S_NoMaskVectorCrypto<string intrinsic, string instruction,
}
multiclass VPatUnaryV_V_S_NoMask_Zvk<string intrinsic, string instruction,
- list<VTypeInfo> vtilist> {
+ list<VTypeInfo> vtilist> {
defm : VPatUnaryV_V_NoMask_Zvk<intrinsic, instruction, vtilist>;
defm : VPatUnaryV_S_NoMaskVectorCrypto<intrinsic, instruction, vtilist>;
}
multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction,
- list<VTypeInfo> vtilist> {
+ list<VTypeInfo> vtilist> {
foreach vti = vtilist in
def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VV",
vti.Vector, vti.Vector, vti.Vector,
@@ -737,7 +763,8 @@ multiclass VPatBinaryV_VV_NoMask<string intrinsic, string instruction,
}
multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand imm_type = tuimm5> {
+ list<VTypeInfo> vtilist,
+ Operand imm_type = tuimm5> {
foreach vti = vtilist in
def : VPatTernaryNoMaskWithPolicy<intrinsic, instruction, "VI",
vti.Vector, vti.Vector, XLenVT,
@@ -746,7 +773,8 @@ multiclass VPatBinaryV_VI_NoMask<string intrinsic, string instruction,
}
multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand imm_type = tuimm5> {
+ list<VTypeInfo> vtilist,
+ Operand imm_type = tuimm5> {
foreach vti = vtilist in
def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VI_" # vti.LMul.MX,
vti.Vector, vti.Vector, XLenVT, vti.Log2SEW,
@@ -754,7 +782,7 @@ multiclass VPatBinaryV_VI_NoMaskTU<string intrinsic, string instruction,
}
multiclass VPatBinaryV_VV_NoMaskTU<string intrinsic, string instruction,
- list<VTypeInfo> vtilist> {
+ list<VTypeInfo> vtilist> {
foreach vti = vtilist in
def : VPatBinaryNoMaskTU<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Vector, vti.Vector, vti.Vector, vti.Log2SEW,
@@ -812,13 +840,14 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
}
multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction,
- string instruction2, list<VTypeInfo> vtilist>
+ string instruction2, list<VTypeInfo> vtilist>
: VPatBinaryV_VV<intrinsic, instruction, vtilist>,
VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>;
multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand ImmType = uimm6>
+ list<VTypeInfo> vtilist,
+ Operand ImmType = uimm6>
: VPatBinaryV_VV<intrinsic, instruction, vtilist>,
VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index af864ba0fbc4..3f423450618d 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -99,6 +99,13 @@ static unsigned log2LdstWidth(unsigned Opcode) {
switch (Opcode) {
default:
llvm_unreachable("Unexpected opcode");
+ case RISCV::LBU:
+ case RISCV::SB:
+ return 0;
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::SH:
+ return 1;
case RISCV::LW:
case RISCV::SW:
case RISCV::FLW:
@@ -112,17 +119,47 @@ static unsigned log2LdstWidth(unsigned Opcode) {
}
}
+// Return bit field size of immediate operand of Opcode.
+static unsigned offsetMask(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case RISCV::LBU:
+ case RISCV::SB:
+ return maskTrailingOnes<unsigned>(2U);
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::SH:
+ return maskTrailingOnes<unsigned>(1U);
+ case RISCV::LW:
+ case RISCV::SW:
+ case RISCV::FLW:
+ case RISCV::FSW:
+ case RISCV::LD:
+ case RISCV::SD:
+ case RISCV::FLD:
+ case RISCV::FSD:
+ return maskTrailingOnes<unsigned>(5U);
+ }
+}
+
// Return a mask for the offset bits of a non-stack-pointer based compressed
// load/store.
static uint8_t compressedLDSTOffsetMask(unsigned Opcode) {
- return 0x1f << log2LdstWidth(Opcode);
+ return offsetMask(Opcode) << log2LdstWidth(Opcode);
}
// Return true if Offset fits within a compressed stack-pointer based
// load/store.
static bool compressibleSPOffset(int64_t Offset, unsigned Opcode) {
- return log2LdstWidth(Opcode) == 2 ? isShiftedUInt<6, 2>(Offset)
- : isShiftedUInt<6, 3>(Offset);
+ // Compressed sp-based loads and stores only work for 32/64 bits.
+ switch (log2LdstWidth(Opcode)) {
+ case 2:
+ return isShiftedUInt<6, 2>(Offset);
+ case 3:
+ return isShiftedUInt<6, 3>(Offset);
+ }
+ return false;
}
// Given an offset for a load/store, return the adjustment required to the base
@@ -147,6 +184,10 @@ static bool isCompressibleLoad(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
+ case RISCV::LBU:
+ case RISCV::LH:
+ case RISCV::LHU:
+ return STI.hasStdExtZcb();
case RISCV::LW:
case RISCV::LD:
return STI.hasStdExtCOrZca();
@@ -164,6 +205,9 @@ static bool isCompressibleStore(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
+ case RISCV::SB:
+ case RISCV::SH:
+ return STI.hasStdExtZcb();
case RISCV::SW:
case RISCV::SD:
return STI.hasStdExtCOrZca();
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index dcf70e8cad64..39d420c2fbf0 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -672,7 +672,7 @@ bool RISCVOptWInstrs::stripWSuffixes(MachineFunction &MF,
const RISCVInstrInfo &TII,
const RISCVSubtarget &ST,
MachineRegisterInfo &MRI) {
- if (DisableStripWSuffix)
+ if (DisableStripWSuffix || !ST.enableStripWSuffix())
return false;
bool MadeChange = false;
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 10bf1e88d741..11c3f2d57eb0 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -71,6 +71,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
: CSR_Interrupt_SaveList;
}
+ bool HasVectorCSR =
+ MF->getFunction().getCallingConv() == CallingConv::RISCV_VectorCall;
+
switch (Subtarget.getTargetABI()) {
default:
llvm_unreachable("Unrecognized ABI");
@@ -79,12 +82,18 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_ILP32E_LP64E_SaveList;
case RISCVABI::ABI_ILP32:
case RISCVABI::ABI_LP64:
+ if (HasVectorCSR)
+ return CSR_ILP32_LP64_V_SaveList;
return CSR_ILP32_LP64_SaveList;
case RISCVABI::ABI_ILP32F:
case RISCVABI::ABI_LP64F:
+ if (HasVectorCSR)
+ return CSR_ILP32F_LP64F_V_SaveList;
return CSR_ILP32F_LP64F_SaveList;
case RISCVABI::ABI_ILP32D:
case RISCVABI::ABI_LP64D:
+ if (HasVectorCSR)
+ return CSR_ILP32D_LP64D_V_SaveList;
return CSR_ILP32D_LP64D_SaveList;
}
}
@@ -446,6 +455,13 @@ bool RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
(Lo12 & 0b11111) != 0) {
// Prefetch instructions require the offset to be 32 byte aligned.
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+ } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
+ Opc == RISCV::PseudoRV32ZdinxSD) &&
+ Lo12 >= 2044) {
+ // This instruction will be split into 2 instructions. The second
+ // instruction will add 4 to the immediate. If that would overflow 12
+ // bits, we can't fold the offset.
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
} else {
// We can encode an add with 12 bit signed immediate in the immediate
// operand of our user instruction. As a result, the remaining
@@ -658,12 +674,18 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
return CSR_ILP32E_LP64E_RegMask;
case RISCVABI::ABI_ILP32:
case RISCVABI::ABI_LP64:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32_LP64_V_RegMask;
return CSR_ILP32_LP64_RegMask;
case RISCVABI::ABI_ILP32F:
case RISCVABI::ABI_LP64F:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32F_LP64F_V_RegMask;
return CSR_ILP32F_LP64F_RegMask;
case RISCVABI::ABI_ILP32D:
case RISCVABI::ABI_LP64D:
+ if (CC == CallingConv::RISCV_VectorCall)
+ return CSR_ILP32D_LP64D_V_RegMask;
return CSR_ILP32D_LP64D_RegMask;
}
}
@@ -741,8 +763,11 @@ bool RISCVRegisterInfo::getRegAllocationHints(
bool NeedGPRC) -> void {
Register Reg = MO.getReg();
Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
- if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
- assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
+ // TODO: Support GPRPair subregisters? Need to be careful with even/odd
+ // registers. If the virtual register is an odd register of a pair and the
+ // physical register is even (or vice versa), we should not add the hint.
+ if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
+ !MO.getSubReg() && !VRRegMO.getSubReg()) {
if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
TwoAddrHints.insert(PhysReg);
}
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 225b57554c1d..90c4a7193ee3 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -64,9 +64,14 @@ def sub_vrm1_6 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_0>;
def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>;
// GPR sizes change with HwMode.
-// FIXME: Support HwMode in SubRegIndex?
-def sub_gpr_even : SubRegIndex<-1>;
-def sub_gpr_odd : SubRegIndex<-1, -1>;
+def sub_gpr_even : SubRegIndex<32> {
+ let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
+ [SubRegRange<32>, SubRegRange<64>]>;
+}
+def sub_gpr_odd : SubRegIndex<32, 32> {
+ let SubRegRanges = SubRegRangeByHwMode<[RV32, RV64],
+ [SubRegRange<32, 32>, SubRegRange<64, 64>]>;
+}
} // Namespace = "RISCV"
// Integer registers
@@ -573,7 +578,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
}
let RegInfos = RegInfoByHwMode<[RV32, RV64],
- [RegInfo<64, 64, 64>, RegInfo<128, 128, 128>]>,
+ [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>,
DecoderMethod = "DecodeGPRPairRegisterClass" in
def GPRPair : RegisterClass<"RISCV", [XLenPairFVT], 64, (add
X10_X11, X12_X13, X14_X15, X16_X17,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 8f46fdc2f7ca..38cdf3c47c64 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -897,74 +897,85 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I) {
- if (isa<VectorType>(Dst) && isa<VectorType>(Src)) {
- // FIXME: Need to compute legalizing cost for illegal types.
- if (!isTypeLegal(Src) || !isTypeLegal(Dst))
- return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+ bool IsVectorType = isa<VectorType>(Dst) && isa<VectorType>(Src);
+ if (!IsVectorType)
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
- // Skip if element size of Dst or Src is bigger than ELEN.
- if (Src->getScalarSizeInBits() > ST->getELen() ||
- Dst->getScalarSizeInBits() > ST->getELen())
- return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+ bool IsTypeLegal = isTypeLegal(Src) && isTypeLegal(Dst) &&
+ (Src->getScalarSizeInBits() <= ST->getELen()) &&
+ (Dst->getScalarSizeInBits() <= ST->getELen());
- int ISD = TLI->InstructionOpcodeToISD(Opcode);
- assert(ISD && "Invalid opcode");
-
- // FIXME: Need to consider vsetvli and lmul.
- int PowDiff = (int)Log2_32(Dst->getScalarSizeInBits()) -
- (int)Log2_32(Src->getScalarSizeInBits());
- switch (ISD) {
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- if (Src->getScalarSizeInBits() == 1) {
- // We do not use vsext/vzext to extend from mask vector.
- // Instead we use the following instructions to extend from mask vector:
- // vmv.v.i v8, 0
- // vmerge.vim v8, v8, -1, v0
- return 2;
- }
- return 1;
- case ISD::TRUNCATE:
- if (Dst->getScalarSizeInBits() == 1) {
- // We do not use several vncvt to truncate to mask vector. So we could
- // not use PowDiff to calculate it.
- // Instead we use the following instructions to truncate to mask vector:
- // vand.vi v8, v8, 1
- // vmsne.vi v0, v8, 0
- return 2;
- }
- [[fallthrough]];
- case ISD::FP_EXTEND:
- case ISD::FP_ROUND:
- // Counts of narrow/widen instructions.
- return std::abs(PowDiff);
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT:
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP:
- if (Src->getScalarSizeInBits() == 1 || Dst->getScalarSizeInBits() == 1) {
- // The cost of convert from or to mask vector is different from other
- // cases. We could not use PowDiff to calculate it.
- // For mask vector to fp, we should use the following instructions:
- // vmv.v.i v8, 0
- // vmerge.vim v8, v8, -1, v0
- // vfcvt.f.x.v v8, v8
-
- // And for fp vector to mask, we use:
- // vfncvt.rtz.x.f.w v9, v8
- // vand.vi v8, v9, 1
- // vmsne.vi v0, v8, 0
- return 3;
- }
- if (std::abs(PowDiff) <= 1)
- return 1;
- // Backend could lower (v[sz]ext i8 to double) to vfcvt(v[sz]ext.f8 i8),
- // so it only need two conversion.
- if (Src->isIntOrIntVectorTy())
- return 2;
- // Counts of narrow/widen instructions.
- return std::abs(PowDiff);
+ // FIXME: Need to compute legalizing cost for illegal types.
+ if (!IsTypeLegal)
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+
+ std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
+
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ int PowDiff = (int)Log2_32(Dst->getScalarSizeInBits()) -
+ (int)Log2_32(Src->getScalarSizeInBits());
+ switch (ISD) {
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND: {
+ const unsigned SrcEltSize = Src->getScalarSizeInBits();
+ if (SrcEltSize == 1) {
+ // We do not use vsext/vzext to extend from mask vector.
+ // Instead we use the following instructions to extend from mask vector:
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, -1, v0
+ return getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM},
+ DstLT.second, CostKind);
+ }
+ if ((PowDiff < 1) || (PowDiff > 3))
+ return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+ unsigned SExtOp[] = {RISCV::VSEXT_VF2, RISCV::VSEXT_VF4, RISCV::VSEXT_VF8};
+ unsigned ZExtOp[] = {RISCV::VZEXT_VF2, RISCV::VZEXT_VF4, RISCV::VZEXT_VF8};
+ unsigned Op =
+ (ISD == ISD::SIGN_EXTEND) ? SExtOp[PowDiff - 1] : ZExtOp[PowDiff - 1];
+ return getRISCVInstructionCost(Op, DstLT.second, CostKind);
+ }
+ case ISD::TRUNCATE:
+ if (Dst->getScalarSizeInBits() == 1) {
+ // We do not use several vncvt to truncate to mask vector. So we could
+ // not use PowDiff to calculate it.
+ // Instead we use the following instructions to truncate to mask vector:
+ // vand.vi v8, v8, 1
+ // vmsne.vi v0, v8, 0
+ return 2;
+ }
+ [[fallthrough]];
+ case ISD::FP_EXTEND:
+ case ISD::FP_ROUND:
+ // Counts of narrow/widen instructions.
+ return std::abs(PowDiff);
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ if (Src->getScalarSizeInBits() == 1 || Dst->getScalarSizeInBits() == 1) {
+ // The cost of convert from or to mask vector is different from other
+ // cases. We could not use PowDiff to calculate it.
+ // For mask vector to fp, we should use the following instructions:
+ // vmv.v.i v8, 0
+ // vmerge.vim v8, v8, -1, v0
+ // vfcvt.f.x.v v8, v8
+
+ // And for fp vector to mask, we use:
+ // vfncvt.rtz.x.f.w v9, v8
+ // vand.vi v8, v9, 1
+ // vmsne.vi v0, v8, 0
+ return 3;
}
+ if (std::abs(PowDiff) <= 1)
+ return 1;
+ // Backend could lower (v[sz]ext i8 to double) to vfcvt(v[sz]ext.f8 i8),
+ // so it only need two conversion.
+ if (Src->isIntOrIntVectorTy())
+ return 2;
+ // Counts of narrow/widen instructions.
+ return std::abs(PowDiff);
}
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
}
@@ -1001,6 +1012,51 @@ RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
return getArithmeticReductionCost(Instruction::And, Ty, FMF, CostKind);
}
+ if (IID == Intrinsic::maximum || IID == Intrinsic::minimum) {
+ SmallVector<unsigned, 3> Opcodes;
+ InstructionCost ExtraCost = 0;
+ switch (IID) {
+ case Intrinsic::maximum:
+ if (FMF.noNaNs()) {
+ Opcodes = {RISCV::VFREDMAX_VS, RISCV::VFMV_F_S};
+ } else {
+ Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMAX_VS,
+ RISCV::VFMV_F_S};
+ // Cost of Canonical Nan + branch
+ // lui a0, 523264
+ // fmv.w.x fa0, a0
+ Type *DstTy = Ty->getScalarType();
+ const unsigned EltTyBits = DstTy->getScalarSizeInBits();
+ Type *SrcTy = IntegerType::getIntNTy(DstTy->getContext(), EltTyBits);
+ ExtraCost = 1 +
+ getCastInstrCost(Instruction::UIToFP, DstTy, SrcTy,
+ TTI::CastContextHint::None, CostKind) +
+ getCFInstrCost(Instruction::Br, CostKind);
+ }
+ break;
+
+ case Intrinsic::minimum:
+ if (FMF.noNaNs()) {
+ Opcodes = {RISCV::VFREDMIN_VS, RISCV::VFMV_F_S};
+ } else {
+ Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMIN_VS,
+ RISCV::VFMV_F_S};
+ // Cost of Canonical Nan + branch
+ // lui a0, 523264
+ // fmv.w.x fa0, a0
+ Type *DstTy = Ty->getScalarType();
+ const unsigned EltTyBits = DL.getTypeSizeInBits(DstTy);
+ Type *SrcTy = IntegerType::getIntNTy(DstTy->getContext(), EltTyBits);
+ ExtraCost = 1 +
+ getCastInstrCost(Instruction::UIToFP, DstTy, SrcTy,
+ TTI::CastContextHint::None, CostKind) +
+ getCFInstrCost(Instruction::Br, CostKind);
+ }
+ break;
+ }
+ return ExtraCost + getRISCVInstructionCost(Opcodes, LT.second, CostKind);
+ }
+
// IR Reduction is composed by two vmv and one rvv reduction instruction.
InstructionCost BaseCost = 2;
@@ -1630,3 +1686,17 @@ bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
return false;
return true;
}
+
+bool RISCVTTIImpl::areInlineCompatible(const Function *Caller,
+ const Function *Callee) const {
+ const TargetMachine &TM = getTLI()->getTargetMachine();
+
+ const FeatureBitset &CallerBits =
+ TM.getSubtargetImpl(*Caller)->getFeatureBits();
+ const FeatureBitset &CalleeBits =
+ TM.getSubtargetImpl(*Callee)->getFeatureBits();
+
+ // Inline a callee if its target-features are a subset of the callers
+ // target-features.
+ return (CallerBits & CalleeBits) == CalleeBits;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 8daf6845dc8b..ac32aea4ce2b 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -60,6 +60,9 @@ public:
: BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const;
+
/// Return the cost of materializing an immediate for a value operand of
/// a store instruction.
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt
index afc26dda4c68..7001ac382f41 100644
--- a/llvm/lib/Target/SPIRV/CMakeLists.txt
+++ b/llvm/lib/Target/SPIRV/CMakeLists.txt
@@ -17,6 +17,7 @@ add_llvm_target(SPIRVCodeGen
SPIRVAsmPrinter.cpp
SPIRVBuiltins.cpp
SPIRVCallLowering.cpp
+ SPIRVCommandLine.cpp
SPIRVDuplicatesTracker.cpp
SPIRVEmitIntrinsics.cpp
SPIRVGlobalRegistry.cpp
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 0478fc33cedc..950f9df28dd3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -370,12 +370,10 @@ static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister,
/// Helper function for building a load instruction for loading a builtin global
/// variable of \p BuiltinValue value.
-static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder,
- SPIRVType *VariableType,
- SPIRVGlobalRegistry *GR,
- SPIRV::BuiltIn::BuiltIn BuiltinValue,
- LLT LLType,
- Register Reg = Register(0)) {
+static Register buildBuiltinVariableLoad(
+ MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
+ SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
+ Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
Register NewRegister =
MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
MIRBuilder.getMRI()->setType(NewRegister,
@@ -387,8 +385,9 @@ static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder,
// Set up the global OpVariable with the necessary builtin decorations.
Register Variable = GR->buildGlobalVariable(
NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
- SPIRV::StorageClass::Input, nullptr, true, true,
- SPIRV::LinkageType::Import, MIRBuilder, false);
+ SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
+ /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
+ false);
// Load the value from the global variable.
Register LoadedRegister =
@@ -1341,6 +1340,22 @@ static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call,
return true;
}
+static bool generateWaveInst(const SPIRV::IncomingCall *Call,
+ MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ SPIRV::BuiltIn::BuiltIn Value =
+ SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
+
+ // For now, we only support a single Wave intrinsic with a single return type.
+ assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
+ LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
+
+ return buildBuiltinVariableLoad(
+ MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
+ /* isConst= */ false, /* hasLinkageTy= */ false);
+}
+
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call,
MachineIRBuilder &MIRBuilder,
SPIRVGlobalRegistry *GR) {
@@ -2229,6 +2244,8 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
return generateBarrierInst(Call.get(), MIRBuilder, GR);
case SPIRV::Dot:
return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
+ case SPIRV::Wave:
+ return generateWaveInst(Call.get(), MIRBuilder, GR);
case SPIRV::GetQuery:
return generateGetQueryInst(Call.get(), MIRBuilder, GR);
case SPIRV::ImageSizeQuery:
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index ee4f13d89c3c..660000fb548d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -41,6 +41,7 @@ def Variable : BuiltinGroup;
def Atomic : BuiltinGroup;
def Barrier : BuiltinGroup;
def Dot : BuiltinGroup;
+def Wave : BuiltinGroup;
def GetQuery : BuiltinGroup;
def ImageSizeQuery : BuiltinGroup;
def ImageMiscQuery : BuiltinGroup;
@@ -1143,6 +1144,7 @@ defm : DemangledGetBuiltin<"get_global_size", OpenCL_std, GetQuery, GlobalSize>;
defm : DemangledGetBuiltin<"get_group_id", OpenCL_std, GetQuery, WorkgroupId>;
defm : DemangledGetBuiltin<"get_enqueued_local_size", OpenCL_std, GetQuery, EnqueuedWorkgroupSize>;
defm : DemangledGetBuiltin<"get_num_groups", OpenCL_std, GetQuery, NumWorkgroups>;
+defm : DemangledGetBuiltin<"__hlsl_wave_get_lane_index", GLSL_std_450, Wave, SubgroupLocalInvocationId>;
//===----------------------------------------------------------------------===//
// Class defining an image query builtin record used for lowering the OpenCL
diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
index 6f23f055b8c2..ad4e72a3128b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
@@ -201,21 +201,30 @@ static SPIRVType *getArgSPIRVType(const Function &F, unsigned ArgIdx,
if (!isPointerTy(OriginalArgType))
return GR->getOrCreateSPIRVType(OriginalArgType, MIRBuilder, ArgAccessQual);
- // In case OriginalArgType is of pointer type, there are three possibilities:
+ Argument *Arg = F.getArg(ArgIdx);
+ Type *ArgType = Arg->getType();
+ if (isTypedPointerTy(ArgType)) {
+ SPIRVType *ElementType = GR->getOrCreateSPIRVType(
+ cast<TypedPointerType>(ArgType)->getElementType(), MIRBuilder);
+ return GR->getOrCreateSPIRVPointerType(
+ ElementType, MIRBuilder,
+ addressSpaceToStorageClass(getPointerAddressSpace(ArgType), ST));
+ }
+
+ // In case OriginalArgType is of untyped pointer type, there are three
+ // possibilities:
// 1) This is a pointer of an LLVM IR element type, passed byval/byref.
// 2) This is an OpenCL/SPIR-V builtin type if there is spv_assign_type
- // intrinsic assigning a TargetExtType.
+ // intrinsic assigning a TargetExtType.
// 3) This is a pointer, try to retrieve pointer element type from a
// spv_assign_ptr_type intrinsic or otherwise use default pointer element
// type.
- Argument *Arg = F.getArg(ArgIdx);
- if (HasPointeeTypeAttr(Arg)) {
- Type *ByValRefType = Arg->hasByValAttr() ? Arg->getParamByValType()
- : Arg->getParamByRefType();
- SPIRVType *ElementType = GR->getOrCreateSPIRVType(ByValRefType, MIRBuilder);
+ if (hasPointeeTypeAttr(Arg)) {
+ SPIRVType *ElementType =
+ GR->getOrCreateSPIRVType(getPointeeTypeByAttr(Arg), MIRBuilder);
return GR->getOrCreateSPIRVPointerType(
ElementType, MIRBuilder,
- addressSpaceToStorageClass(getPointerAddressSpace(Arg->getType()), ST));
+ addressSpaceToStorageClass(getPointerAddressSpace(ArgType), ST));
}
for (auto User : Arg->users()) {
@@ -493,9 +502,15 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
Register ResVReg =
Info.OrigRet.Regs.empty() ? Register(0) : Info.OrigRet.Regs[0];
const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
- // TODO: check that it's OCL builtin, then apply OpenCL_std.
- if (!DemangledName.empty() && CF && CF->isDeclaration() &&
- ST->canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
+
+ bool isFunctionDecl = CF && CF->isDeclaration();
+ bool canUseOpenCL = ST->canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std);
+ bool canUseGLSL = ST->canUseExtInstSet(SPIRV::InstructionSet::GLSL_std_450);
+ assert(canUseGLSL != canUseOpenCL &&
+ "Scenario where both sets are enabled is not supported.");
+
+ if (isFunctionDecl && !DemangledName.empty() &&
+ (canUseGLSL || canUseOpenCL)) {
SmallVector<Register, 8> ArgVRegs;
for (auto Arg : Info.OrigArgs) {
assert(Arg.Regs.size() == 1 && "Call arg has multiple VRegs");
@@ -504,12 +519,15 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
if (!GR->getSPIRVTypeForVReg(Arg.Regs[0]))
GR->assignSPIRVTypeToVReg(SPIRVTy, Arg.Regs[0], MF);
}
- if (auto Res = SPIRV::lowerBuiltin(
- DemangledName, SPIRV::InstructionSet::OpenCL_std, MIRBuilder,
- ResVReg, OrigRetTy, ArgVRegs, GR))
+ auto instructionSet = canUseOpenCL ? SPIRV::InstructionSet::OpenCL_std
+ : SPIRV::InstructionSet::GLSL_std_450;
+ if (auto Res =
+ SPIRV::lowerBuiltin(DemangledName, instructionSet, MIRBuilder,
+ ResVReg, OrigRetTy, ArgVRegs, GR))
return *Res;
}
- if (CF && CF->isDeclaration() && !GR->find(CF, &MF).isValid()) {
+
+ if (isFunctionDecl && !GR->find(CF, &MF).isValid()) {
// Emit the type info and forward function declaration to the first MBB
// to ensure VReg definition dependencies are valid across all MBBs.
MachineIRBuilder FirstBlockBuilder;
diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
new file mode 100644
index 000000000000..691e6ee0e582
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp
@@ -0,0 +1,101 @@
+//===--- SPIRVCommandLine.cpp ---- Command Line Options ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of classes and functions needed for
+// processing, parsing, and using CLI options for the SPIR-V backend.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPIRVCommandLine.h"
+#include "llvm/ADT/StringRef.h"
+#include <algorithm>
+#include <map>
+
+#define DEBUG_TYPE "spirv-commandline"
+
+using namespace llvm;
+
+static const std::map<std::string, SPIRV::Extension::Extension>
+ SPIRVExtensionMap = {
+ {"SPV_EXT_shader_atomic_float_add",
+ SPIRV::Extension::Extension::SPV_EXT_shader_atomic_float_add},
+ {"SPV_EXT_shader_atomic_float16_add",
+ SPIRV::Extension::Extension::SPV_EXT_shader_atomic_float16_add},
+ {"SPV_EXT_shader_atomic_float_min_max",
+ SPIRV::Extension::Extension::SPV_EXT_shader_atomic_float_min_max},
+ {"SPV_INTEL_arbitrary_precision_integers",
+ SPIRV::Extension::Extension::SPV_INTEL_arbitrary_precision_integers},
+ {"SPV_INTEL_optnone", SPIRV::Extension::Extension::SPV_INTEL_optnone},
+ {"SPV_INTEL_usm_storage_classes",
+ SPIRV::Extension::Extension::SPV_INTEL_usm_storage_classes},
+ {"SPV_INTEL_subgroups",
+ SPIRV::Extension::Extension::SPV_INTEL_subgroups},
+ {"SPV_KHR_uniform_group_instructions",
+ SPIRV::Extension::Extension::SPV_KHR_uniform_group_instructions},
+ {"SPV_KHR_no_integer_wrap_decoration",
+ SPIRV::Extension::Extension::SPV_KHR_no_integer_wrap_decoration},
+ {"SPV_KHR_float_controls",
+ SPIRV::Extension::Extension::SPV_KHR_float_controls},
+ {"SPV_KHR_expect_assume",
+ SPIRV::Extension::Extension::SPV_KHR_expect_assume},
+ {"SPV_KHR_bit_instructions",
+ SPIRV::Extension::Extension::SPV_KHR_bit_instructions},
+ {"SPV_KHR_linkonce_odr",
+ SPIRV::Extension::Extension::SPV_KHR_linkonce_odr},
+ {"SPV_INTEL_bfloat16_conversion",
+ SPIRV::Extension::Extension::SPV_INTEL_bfloat16_conversion},
+ {"SPV_KHR_subgroup_rotate",
+ SPIRV::Extension::Extension::SPV_KHR_subgroup_rotate},
+ {"SPV_INTEL_variable_length_array",
+ SPIRV::Extension::Extension::SPV_INTEL_variable_length_array},
+ {"SPV_INTEL_function_pointers",
+ SPIRV::Extension::Extension::SPV_INTEL_function_pointers},
+};
+
+bool SPIRVExtensionsParser::parse(cl::Option &O, llvm::StringRef ArgName,
+ llvm::StringRef ArgValue,
+ std::set<SPIRV::Extension::Extension> &Vals) {
+ llvm::SmallVector<llvm::StringRef, 10> Tokens;
+ ArgValue.split(Tokens, ",", -1, false);
+ std::sort(Tokens.begin(), Tokens.end());
+
+ std::set<SPIRV::Extension::Extension> EnabledExtensions;
+
+ for (const auto &Token : Tokens) {
+ if (Token == "all") {
+ for (const auto &[ExtensionName, ExtensionEnum] : SPIRVExtensionMap)
+ EnabledExtensions.insert(ExtensionEnum);
+
+ continue;
+ }
+
+ if (Token.empty() || (!Token.starts_with("+") && !Token.starts_with("-")))
+ return O.error("Invalid extension list format: " + Token.str());
+
+ llvm::StringRef ExtensionName = Token.substr(1);
+ auto NameValuePair = SPIRVExtensionMap.find(ExtensionName.str());
+
+ if (NameValuePair == SPIRVExtensionMap.end())
+ return O.error("Unknown SPIR-V extension: " + Token.str());
+
+ if (Token.starts_with("+")) {
+ EnabledExtensions.insert(NameValuePair->second);
+ } else if (EnabledExtensions.count(NameValuePair->second)) {
+ if (std::find(Tokens.begin(), Tokens.end(), "+" + ExtensionName.str()) !=
+ Tokens.end())
+ return O.error(
+ "Extension cannot be allowed and disallowed at the same time: " +
+ ExtensionName.str());
+
+ EnabledExtensions.erase(NameValuePair->second);
+ }
+ }
+
+ Vals = std::move(EnabledExtensions);
+ return false;
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.h b/llvm/lib/Target/SPIRV/SPIRVCommandLine.h
new file mode 100644
index 000000000000..741d829b2ab8
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.h
@@ -0,0 +1,38 @@
+//===--- SPIRVCommandLine.h ---- Command Line Options -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains classes and functions needed for processing, parsing, and
+// using CLI options for the SPIR-V backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPIRV_COMMANDLINE_H
+#define LLVM_LIB_TARGET_SPIRV_COMMANDLINE_H
+
+#include "MCTargetDesc/SPIRVBaseInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include <set>
+
+namespace llvm {
+
+/// Command line parser for toggling SPIR-V extensions.
+struct SPIRVExtensionsParser
+ : public cl::parser<std::set<SPIRV::Extension::Extension>> {
+public:
+ SPIRVExtensionsParser(cl::Option &O)
+ : cl::parser<std::set<SPIRV::Extension::Extension>>(O) {}
+
+ /// Parses SPIR-V extension name from CLI arguments.
+ ///
+ /// \return Returns true on error.
+ bool parse(cl::Option &O, StringRef ArgName, StringRef ArgValue,
+ std::set<SPIRV::Extension::Extension> &Vals);
+};
+
+} // namespace llvm
+#endif // LLVM_LIB_TARGET_SPIRV_COMMANDLINE_H
diff --git a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
index d82fb2df4539..7c32bb1968ef 100644
--- a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.cpp
@@ -39,6 +39,7 @@ void SPIRVGeneralDuplicatesTracker::buildDepsGraph(
prebuildReg2Entry(GT, Reg2Entry);
prebuildReg2Entry(FT, Reg2Entry);
prebuildReg2Entry(AT, Reg2Entry);
+ prebuildReg2Entry(MT, Reg2Entry);
prebuildReg2Entry(ST, Reg2Entry);
for (auto &Op2E : Reg2Entry) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.h b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.h
index 96cc621791e9..2ec3fb35ca04 100644
--- a/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.h
+++ b/llvm/lib/Target/SPIRV/SPIRVDuplicatesTracker.h
@@ -262,6 +262,7 @@ class SPIRVGeneralDuplicatesTracker {
SPIRVDuplicatesTracker<GlobalVariable> GT;
SPIRVDuplicatesTracker<Function> FT;
SPIRVDuplicatesTracker<Argument> AT;
+ SPIRVDuplicatesTracker<MachineInstr> MT;
SPIRVDuplicatesTracker<SPIRV::SpecialTypeDescriptor> ST;
// NOTE: using MOs instead of regs to get rid of MF dependency to be able
@@ -306,6 +307,10 @@ public:
AT.add(Arg, MF, R);
}
+ void add(const MachineInstr *MI, const MachineFunction *MF, Register R) {
+ MT.add(MI, MF, R);
+ }
+
void add(const SPIRV::SpecialTypeDescriptor &TD, const MachineFunction *MF,
Register R) {
ST.add(TD, MF, R);
@@ -337,6 +342,10 @@ public:
return AT.find(const_cast<Argument *>(Arg), MF);
}
+ Register find(const MachineInstr *MI, const MachineFunction *MF) {
+ return MT.find(const_cast<MachineInstr *>(MI), MF);
+ }
+
Register find(const SPIRV::SpecialTypeDescriptor &TD,
const MachineFunction *MF) {
return ST.find(TD, MF);
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 458af9229ed7..7c5a38fa48d0 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -14,6 +14,7 @@
#include "SPIRV.h"
#include "SPIRVBuiltins.h"
#include "SPIRVMetadata.h"
+#include "SPIRVSubtarget.h"
#include "SPIRVTargetMachine.h"
#include "SPIRVUtils.h"
#include "llvm/IR/IRBuilder.h"
@@ -53,14 +54,22 @@ class SPIRVEmitIntrinsics
: public FunctionPass,
public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
SPIRVTargetMachine *TM = nullptr;
+ SPIRVGlobalRegistry *GR = nullptr;
Function *F = nullptr;
bool TrackConstants = true;
DenseMap<Instruction *, Constant *> AggrConsts;
+ DenseMap<Instruction *, Type *> AggrConstTypes;
DenseSet<Instruction *> AggrStores;
- // deduce values type
- DenseMap<Value *, Type *> DeducedElTys;
+ // deduce element type of untyped pointers
Type *deduceElementType(Value *I);
+ Type *deduceElementTypeHelper(Value *I);
+ Type *deduceElementTypeHelper(Value *I, std::unordered_set<Value *> &Visited);
+
+ // deduce nested types of composites
+ Type *deduceNestedTypeHelper(User *U);
+ Type *deduceNestedTypeHelper(User *U, Type *Ty,
+ std::unordered_set<Value *> &Visited);
void preprocessCompositeConstants(IRBuilder<> &B);
void preprocessUndefs(IRBuilder<> &B);
@@ -92,6 +101,9 @@ class SPIRVEmitIntrinsics
void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B);
void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B);
void processParamTypes(Function *F, IRBuilder<> &B);
+ Type *deduceFunParamElementType(Function *F, unsigned OpIdx);
+ Type *deduceFunParamElementType(Function *F, unsigned OpIdx,
+ std::unordered_set<Function *> &FVisited);
public:
static char ID;
@@ -166,13 +178,20 @@ static inline void reportFatalOnTokenType(const Instruction *I) {
// Deduce and return a successfully deduced Type of the Instruction,
// or nullptr otherwise.
-static Type *deduceElementTypeHelper(Value *I,
- std::unordered_set<Value *> &Visited,
- DenseMap<Value *, Type *> &DeducedElTys) {
+Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(Value *I) {
+ std::unordered_set<Value *> Visited;
+ return deduceElementTypeHelper(I, Visited);
+}
+
+Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
+ Value *I, std::unordered_set<Value *> &Visited) {
+ // allow to pass nullptr as an argument
+ if (!I)
+ return nullptr;
+
// maybe already known
- auto It = DeducedElTys.find(I);
- if (It != DeducedElTys.end())
- return It->second;
+ if (Type *KnownTy = GR->findDeducedElementType(I))
+ return KnownTy;
// maybe a cycle
if (Visited.find(I) != Visited.end())
@@ -182,26 +201,105 @@ static Type *deduceElementTypeHelper(Value *I,
// fallback value in case when we fail to deduce a type
Type *Ty = nullptr;
// look for known basic patterns of type inference
- if (auto *Ref = dyn_cast<AllocaInst>(I))
+ if (auto *Ref = dyn_cast<AllocaInst>(I)) {
Ty = Ref->getAllocatedType();
- else if (auto *Ref = dyn_cast<GetElementPtrInst>(I))
+ } else if (auto *Ref = dyn_cast<GetElementPtrInst>(I)) {
Ty = Ref->getResultElementType();
- else if (auto *Ref = dyn_cast<GlobalValue>(I))
+ } else if (auto *Ref = dyn_cast<GlobalValue>(I)) {
Ty = Ref->getValueType();
- else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I))
- Ty = deduceElementTypeHelper(Ref->getPointerOperand(), Visited,
- DeducedElTys);
+ if (Value *Op = Ref->getNumOperands() > 0 ? Ref->getOperand(0) : nullptr) {
+ if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
+ if (Type *NestedTy = deduceElementTypeHelper(Op, Visited))
+ Ty = TypedPointerType::get(NestedTy, PtrTy->getAddressSpace());
+ } else {
+ Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), Ty, Visited);
+ }
+ }
+ } else if (auto *Ref = dyn_cast<AddrSpaceCastInst>(I)) {
+ Ty = deduceElementTypeHelper(Ref->getPointerOperand(), Visited);
+ } else if (auto *Ref = dyn_cast<BitCastInst>(I)) {
+ if (Type *Src = Ref->getSrcTy(), *Dest = Ref->getDestTy();
+ isPointerTy(Src) && isPointerTy(Dest))
+ Ty = deduceElementTypeHelper(Ref->getOperand(0), Visited);
+ }
// remember the found relationship
- if (Ty)
- DeducedElTys[I] = Ty;
+ if (Ty) {
+ // specify nested types if needed, otherwise return unchanged
+ GR->addDeducedElementType(I, Ty);
+ }
return Ty;
}
-Type *SPIRVEmitIntrinsics::deduceElementType(Value *I) {
+// Re-create a type of the value if it has untyped pointer fields, also nested.
+// Return the original value type if no corrections of untyped pointer
+// information is found or needed.
+Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U) {
std::unordered_set<Value *> Visited;
- if (Type *Ty = deduceElementTypeHelper(I, Visited, DeducedElTys))
+ return deduceNestedTypeHelper(U, U->getType(), Visited);
+}
+
+Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
+ User *U, Type *OrigTy, std::unordered_set<Value *> &Visited) {
+ if (!U)
+ return OrigTy;
+
+ // maybe already known
+ if (Type *KnownTy = GR->findDeducedCompositeType(U))
+ return KnownTy;
+
+ // maybe a cycle
+ if (Visited.find(U) != Visited.end())
+ return OrigTy;
+ Visited.insert(U);
+
+ if (dyn_cast<StructType>(OrigTy)) {
+ SmallVector<Type *> Tys;
+ bool Change = false;
+ for (unsigned i = 0; i < U->getNumOperands(); ++i) {
+ Value *Op = U->getOperand(i);
+ Type *OpTy = Op->getType();
+ Type *Ty = OpTy;
+ if (Op) {
+ if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
+ if (Type *NestedTy = deduceElementTypeHelper(Op, Visited))
+ Ty = TypedPointerType::get(NestedTy, PtrTy->getAddressSpace());
+ } else {
+ Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited);
+ }
+ }
+ Tys.push_back(Ty);
+ Change |= Ty != OpTy;
+ }
+ if (Change) {
+ Type *NewTy = StructType::create(Tys);
+ GR->addDeducedCompositeType(U, NewTy);
+ return NewTy;
+ }
+ } else if (auto *ArrTy = dyn_cast<ArrayType>(OrigTy)) {
+ if (Value *Op = U->getNumOperands() > 0 ? U->getOperand(0) : nullptr) {
+ Type *OpTy = ArrTy->getElementType();
+ Type *Ty = OpTy;
+ if (auto *PtrTy = dyn_cast<PointerType>(OpTy)) {
+ if (Type *NestedTy = deduceElementTypeHelper(Op, Visited))
+ Ty = TypedPointerType::get(NestedTy, PtrTy->getAddressSpace());
+ } else {
+ Ty = deduceNestedTypeHelper(dyn_cast<User>(Op), OpTy, Visited);
+ }
+ if (Ty != OpTy) {
+ Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
+ GR->addDeducedCompositeType(U, NewTy);
+ return NewTy;
+ }
+ }
+ }
+
+ return OrigTy;
+}
+
+Type *SPIRVEmitIntrinsics::deduceElementType(Value *I) {
+ if (Type *Ty = deduceElementTypeHelper(I))
return Ty;
return IntegerType::getInt8Ty(I->getContext());
}
@@ -245,6 +343,7 @@ void SPIRVEmitIntrinsics::preprocessUndefs(IRBuilder<> &B) {
Worklist.push(IntrUndef);
I->replaceUsesOfWith(Op, IntrUndef);
AggrConsts[IntrUndef] = AggrUndef;
+ AggrConstTypes[IntrUndef] = AggrUndef->getType();
}
}
}
@@ -270,6 +369,7 @@ void SPIRVEmitIntrinsics::preprocessCompositeConstants(IRBuilder<> &B) {
I->replaceUsesOfWith(Op, CCI);
KeepInst = true;
SEI.AggrConsts[CCI] = AggrC;
+ SEI.AggrConstTypes[CCI] = SEI.deduceNestedTypeHelper(AggrC);
};
if (auto *AggrC = dyn_cast<ConstantAggregate>(Op)) {
@@ -384,8 +484,7 @@ void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
Pointer = BC->getOperand(0);
// Do not emit spv_ptrcast if Pointer's element type is ExpectedElementType
- std::unordered_set<Value *> Visited;
- Type *PointerElemTy = deduceElementTypeHelper(Pointer, Visited, DeducedElTys);
+ Type *PointerElemTy = deduceElementTypeHelper(Pointer);
if (PointerElemTy == ExpectedElementType)
return;
@@ -444,8 +543,8 @@ void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
CallInst *CI = buildIntrWithMD(
Intrinsic::spv_assign_ptr_type, {Pointer->getType()},
ExpectedElementTypeConst, Pointer, {B.getInt32(AddressSpace)}, B);
- DeducedElTys[CI] = ExpectedElementType;
- DeducedElTys[Pointer] = ExpectedElementType;
+ GR->addDeducedElementType(CI, ExpectedElementType);
+ GR->addDeducedElementType(Pointer, ExpectedElementType);
return;
}
@@ -486,25 +585,29 @@ void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
Function *CalledF = CI->getCalledFunction();
SmallVector<Type *, 4> CalledArgTys;
bool HaveTypes = false;
- for (auto &CalledArg : CalledF->args()) {
- if (!isPointerTy(CalledArg.getType())) {
+ for (unsigned OpIdx = 0; OpIdx < CalledF->arg_size(); ++OpIdx) {
+ Argument *CalledArg = CalledF->getArg(OpIdx);
+ Type *ArgType = CalledArg->getType();
+ if (!isPointerTy(ArgType)) {
CalledArgTys.push_back(nullptr);
- continue;
- }
- auto It = DeducedElTys.find(&CalledArg);
- Type *ParamTy = It != DeducedElTys.end() ? It->second : nullptr;
- if (!ParamTy) {
- for (User *U : CalledArg.users()) {
- if (Instruction *Inst = dyn_cast<Instruction>(U)) {
- std::unordered_set<Value *> Visited;
- ParamTy = deduceElementTypeHelper(Inst, Visited, DeducedElTys);
- if (ParamTy)
- break;
+ } else if (isTypedPointerTy(ArgType)) {
+ CalledArgTys.push_back(cast<TypedPointerType>(ArgType)->getElementType());
+ HaveTypes = true;
+ } else {
+ Type *ElemTy = GR->findDeducedElementType(CalledArg);
+ if (!ElemTy && hasPointeeTypeAttr(CalledArg))
+ ElemTy = getPointeeTypeByAttr(CalledArg);
+ if (!ElemTy) {
+ for (User *U : CalledArg->users()) {
+ if (Instruction *Inst = dyn_cast<Instruction>(U)) {
+ if ((ElemTy = deduceElementTypeHelper(Inst)) != nullptr)
+ break;
+ }
}
}
+ HaveTypes |= ElemTy != nullptr;
+ CalledArgTys.push_back(ElemTy);
}
- HaveTypes |= ParamTy != nullptr;
- CalledArgTys.push_back(ParamTy);
}
std::string DemangledName =
@@ -694,6 +797,10 @@ void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
if (GV.getName() == "llvm.global.annotations")
return;
if (GV.hasInitializer() && !isa<UndefValue>(GV.getInitializer())) {
+ // Deduce element type and store results in Global Registry.
+ // Result is ignored, because TypedPointerType is not supported
+ // by llvm IR general logic.
+ deduceElementTypeHelper(&GV);
Constant *Init = GV.getInitializer();
Type *Ty = isAggrToReplace(Init) ? B.getInt32Ty() : Init->getType();
Constant *Const = isAggrToReplace(Init) ? B.getInt32(1) : Init;
@@ -720,7 +827,7 @@ void SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *I,
unsigned AddressSpace = getPointerAddressSpace(I->getType());
CallInst *CI = buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {I->getType()},
EltTyConst, I, {B.getInt32(AddressSpace)}, B);
- DeducedElTys[CI] = ElemTy;
+ GR->addDeducedElementType(CI, ElemTy);
}
void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
@@ -733,9 +840,10 @@ void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I,
if (auto *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::spv_const_composite ||
II->getIntrinsicID() == Intrinsic::spv_undef) {
- auto t = AggrConsts.find(II);
- assert(t != AggrConsts.end());
- TypeToAssign = t->second->getType();
+ auto It = AggrConstTypes.find(II);
+ if (It == AggrConstTypes.end())
+ report_fatal_error("Unknown composite intrinsic type");
+ TypeToAssign = It->second;
}
}
Constant *Const = UndefValue::get(TypeToAssign);
@@ -795,70 +903,99 @@ void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I,
}
}
-void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
- DenseMap<unsigned, Argument *> Args;
- unsigned i = 0;
- for (Argument &Arg : F->args()) {
- if (isUntypedPointerTy(Arg.getType()) &&
- DeducedElTys.find(&Arg) == DeducedElTys.end() &&
- !HasPointeeTypeAttr(&Arg))
- Args[i] = &Arg;
- i++;
- }
- if (Args.size() == 0)
- return;
+Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *F,
+ unsigned OpIdx) {
+ std::unordered_set<Function *> FVisited;
+ return deduceFunParamElementType(F, OpIdx, FVisited);
+}
+
+Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
+ Function *F, unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
+ // maybe a cycle
+ if (FVisited.find(F) != FVisited.end())
+ return nullptr;
+ FVisited.insert(F);
- // Args contains opaque pointers without element type definition
- B.SetInsertPointPastAllocas(F);
std::unordered_set<Value *> Visited;
+ SmallVector<std::pair<Function *, unsigned>> Lookup;
+ // search in function's call sites
for (User *U : F->users()) {
CallInst *CI = dyn_cast<CallInst>(U);
- if (!CI)
+ if (!CI || OpIdx >= CI->arg_size())
continue;
- for (unsigned OpIdx = 0; OpIdx < CI->arg_size() && Args.size() > 0;
- OpIdx++) {
- auto It = Args.find(OpIdx);
- Argument *Arg = It == Args.end() ? nullptr : It->second;
- if (!Arg)
- continue;
- Value *OpArg = CI->getArgOperand(OpIdx);
- if (!isPointerTy(OpArg->getType()))
+ Value *OpArg = CI->getArgOperand(OpIdx);
+ if (!isPointerTy(OpArg->getType()))
+ continue;
+ // maybe we already know operand's element type
+ if (Type *KnownTy = GR->findDeducedElementType(OpArg))
+ return KnownTy;
+ // search in actual parameter's users
+ for (User *OpU : OpArg->users()) {
+ Instruction *Inst = dyn_cast<Instruction>(OpU);
+ if (!Inst || Inst == CI)
continue;
- // maybe we already know the operand's element type
- auto DeducedIt = DeducedElTys.find(OpArg);
- Type *ElemTy =
- DeducedIt == DeducedElTys.end() ? nullptr : DeducedIt->second;
- if (!ElemTy) {
- for (User *OpU : OpArg->users()) {
- if (Instruction *Inst = dyn_cast<Instruction>(OpU)) {
- Visited.clear();
- ElemTy = deduceElementTypeHelper(Inst, Visited, DeducedElTys);
- if (ElemTy)
- break;
- }
- }
+ Visited.clear();
+ if (Type *Ty = deduceElementTypeHelper(Inst, Visited))
+ return Ty;
+ }
+ // check if it's a formal parameter of the outer function
+ if (!CI->getParent() || !CI->getParent()->getParent())
+ continue;
+ Function *OuterF = CI->getParent()->getParent();
+ if (FVisited.find(OuterF) != FVisited.end())
+ continue;
+ for (unsigned i = 0; i < OuterF->arg_size(); ++i) {
+ if (OuterF->getArg(i) == OpArg) {
+ Lookup.push_back(std::make_pair(OuterF, i));
+ break;
}
- if (ElemTy) {
- unsigned AddressSpace = getPointerAddressSpace(Arg->getType());
+ }
+ }
+
+ // search in function parameters
+ for (auto &Pair : Lookup) {
+ if (Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
+ return Ty;
+ }
+
+ return nullptr;
+}
+
+void SPIRVEmitIntrinsics::processParamTypes(Function *F, IRBuilder<> &B) {
+ B.SetInsertPointPastAllocas(F);
+ for (unsigned OpIdx = 0; OpIdx < F->arg_size(); ++OpIdx) {
+ Argument *Arg = F->getArg(OpIdx);
+ if (!isUntypedPointerTy(Arg->getType()))
+ continue;
+
+ Type *ElemTy = GR->findDeducedElementType(Arg);
+ if (!ElemTy) {
+ if (hasPointeeTypeAttr(Arg) &&
+ (ElemTy = getPointeeTypeByAttr(Arg)) != nullptr) {
+ GR->addDeducedElementType(Arg, ElemTy);
+ } else if ((ElemTy = deduceFunParamElementType(F, OpIdx)) != nullptr) {
CallInst *AssignPtrTyCI = buildIntrWithMD(
Intrinsic::spv_assign_ptr_type, {Arg->getType()},
- Constant::getNullValue(ElemTy), Arg, {B.getInt32(AddressSpace)}, B);
- DeducedElTys[AssignPtrTyCI] = ElemTy;
- DeducedElTys[Arg] = ElemTy;
- Args.erase(It);
+ Constant::getNullValue(ElemTy), Arg,
+ {B.getInt32(getPointerAddressSpace(Arg->getType()))}, B);
+ GR->addDeducedElementType(AssignPtrTyCI, ElemTy);
+ GR->addDeducedElementType(Arg, ElemTy);
}
}
- if (Args.size() == 0)
- break;
}
}
bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
if (Func.isDeclaration())
return false;
+
+ const SPIRVSubtarget &ST = TM->getSubtarget<SPIRVSubtarget>(Func);
+ GR = ST.getSPIRVGlobalRegistry();
+
F = &Func;
IRBuilder<> B(Func.getContext());
AggrConsts.clear();
+ AggrConstTypes.clear();
AggrStores.clear();
// StoreInst's operand type can be changed during the next transformations,
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 42f8397a3023..9592f3e81b40 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -123,6 +123,7 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems,
SPIRVType *ElemType,
MachineIRBuilder &MIRBuilder) {
auto EleOpc = ElemType->getOpcode();
+ (void)EleOpc;
assert((EleOpc == SPIRV::OpTypeInt || EleOpc == SPIRV::OpTypeFloat ||
EleOpc == SPIRV::OpTypeBool) &&
"Invalid vector element type");
@@ -479,6 +480,7 @@ Register SPIRVGlobalRegistry::buildGlobalVariable(
GVar = M->getGlobalVariable(Name);
if (GVar == nullptr) {
const Type *Ty = getTypeForSPIRVType(BaseType); // TODO: check type.
+ // Module takes ownership of the global var.
GVar = new GlobalVariable(*M, const_cast<Type *>(Ty), false,
GlobalValue::ExternalLinkage, nullptr,
Twine(Name));
@@ -721,11 +723,14 @@ SPIRVType *SPIRVGlobalRegistry::createSPIRVType(
AddrSpace = PType->getAddressSpace();
else
report_fatal_error("Unable to convert LLVM type to SPIRVType", true);
- SPIRVType *SpvElementType;
- // At the moment, all opaque pointers correspond to i8 element type.
- // TODO: change the implementation once opaque pointers are supported
- // in the SPIR-V specification.
- SpvElementType = getOrCreateSPIRVIntegerType(8, MIRBuilder);
+
+ SPIRVType *SpvElementType = nullptr;
+ if (auto PType = dyn_cast<TypedPointerType>(Ty))
+ SpvElementType = getOrCreateSPIRVType(PType->getElementType(), MIRBuilder,
+ AccQual, EmitIR);
+ else
+ SpvElementType = getOrCreateSPIRVIntegerType(8, MIRBuilder);
+
// Get access to information about available extensions
const SPIRVSubtarget *ST =
static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
index da480b22a525..e0099e529447 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
@@ -41,9 +41,13 @@ class SPIRVGlobalRegistry {
// map a Function to its definition (as a machine instruction operand)
DenseMap<const Function *, const MachineOperand *> FunctionToInstr;
+ DenseMap<const MachineInstr *, const Function *> FunctionToInstrRev;
// map function pointer (as a machine instruction operand) to the used
// Function
DenseMap<const MachineOperand *, const Function *> InstrToFunction;
+ // Maps Functions to their calls (in a form of the machine instruction,
+ // OpFunctionCall) that happened before the definition is available
+ DenseMap<const Function *, SmallVector<MachineInstr *>> ForwardCalls;
// Look for an equivalent of the newType in the map. Return the equivalent
// if it's found, otherwise insert newType to the map and return the type.
@@ -59,6 +63,13 @@ class SPIRVGlobalRegistry {
// Holds the maximum ID we have in the module.
unsigned Bound;
+ // Maps values associated with untyped pointers into deduced element types of
+ // untyped pointers.
+ DenseMap<Value *, Type *> DeducedElTys;
+ // Maps composite values to deduced types where untyped pointers are replaced
+ // with typed ones
+ DenseMap<Value *, Type *> DeducedNestedTys;
+
// Add a new OpTypeXXX instruction without checking for duplicates.
SPIRVType *createSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder,
SPIRV::AccessQualifier::AccessQualifier AQ =
@@ -94,6 +105,14 @@ public:
DT.add(Arg, MF, R);
}
+ void add(const MachineInstr *MI, MachineFunction *MF, Register R) {
+ DT.add(MI, MF, R);
+ }
+
+ Register find(const MachineInstr *MI, MachineFunction *MF) {
+ return DT.find(MI, MF);
+ }
+
Register find(const Constant *C, MachineFunction *MF) {
return DT.find(C, MF);
}
@@ -114,6 +133,37 @@ public:
void setBound(unsigned V) { Bound = V; }
unsigned getBound() { return Bound; }
+ // Deduced element types of untyped pointers and composites:
+ // - Add a record to the map of deduced element types.
+ void addDeducedElementType(Value *Val, Type *Ty) { DeducedElTys[Val] = Ty; }
+ // - Find a record in the map of deduced element types.
+ Type *findDeducedElementType(const Value *Val) {
+ auto It = DeducedElTys.find(Val);
+ return It == DeducedElTys.end() ? nullptr : It->second;
+ }
+ // - Add a record to the map of deduced composite types.
+ void addDeducedCompositeType(Value *Val, Type *Ty) {
+ DeducedNestedTys[Val] = Ty;
+ }
+ // - Find a record in the map of deduced composite types.
+ Type *findDeducedCompositeType(const Value *Val) {
+ auto It = DeducedNestedTys.find(Val);
+ return It == DeducedNestedTys.end() ? nullptr : It->second;
+ }
+ // - Find a type of the given Global value
+ Type *getDeducedGlobalValueType(const GlobalValue *Global) {
+ // we may know element type if it was deduced earlier
+ Type *ElementTy = findDeducedElementType(Global);
+ if (!ElementTy) {
+ // or we may know element type if it's associated with a composite
+ // value
+ if (Value *GlobalElem =
+ Global->getNumOperands() > 0 ? Global->getOperand(0) : nullptr)
+ ElementTy = findDeducedCompositeType(GlobalElem);
+ }
+ return ElementTy ? ElementTy : Global->getValueType();
+ }
+
// Map a machine operand that represents a use of a function via function
// pointer to a machine operand that represents the function definition.
// Return either the register or invalid value, because we have no context for
@@ -125,18 +175,56 @@ public:
auto ResReg = FunctionToInstr.find(ResF->second);
return ResReg == FunctionToInstr.end() ? nullptr : ResReg->second;
}
+
+ // Map a Function to a machine instruction that represents the function
+ // definition.
+ const MachineInstr *getFunctionDefinition(const Function *F) {
+ if (!F)
+ return nullptr;
+ auto MOIt = FunctionToInstr.find(F);
+ return MOIt == FunctionToInstr.end() ? nullptr : MOIt->second->getParent();
+ }
+
+ // Map a Function to a machine instruction that represents the function
+ // definition.
+ const Function *getFunctionByDefinition(const MachineInstr *MI) {
+ if (!MI)
+ return nullptr;
+ auto FIt = FunctionToInstrRev.find(MI);
+ return FIt == FunctionToInstrRev.end() ? nullptr : FIt->second;
+ }
+
// map function pointer (as a machine instruction operand) to the used
// Function
void recordFunctionPointer(const MachineOperand *MO, const Function *F) {
InstrToFunction[MO] = F;
}
+
// map a Function to its definition (as a machine instruction)
void recordFunctionDefinition(const Function *F, const MachineOperand *MO) {
FunctionToInstr[F] = MO;
+ FunctionToInstrRev[MO->getParent()] = F;
}
+
// Return true if any OpConstantFunctionPointerINTEL were generated
bool hasConstFunPtr() { return !InstrToFunction.empty(); }
+ // Add a record about forward function call.
+ void addForwardCall(const Function *F, MachineInstr *MI) {
+ auto It = ForwardCalls.find(F);
+ if (It == ForwardCalls.end())
+ ForwardCalls[F] = {MI};
+ else
+ It->second.push_back(MI);
+ }
+
+ // Map a Function to the vector of machine instructions that represents
+ // forward function calls or to nullptr if not found.
+ SmallVector<MachineInstr *> *getForwardCalls(const Function *F) {
+ auto It = ForwardCalls.find(F);
+ return It == ForwardCalls.end() ? nullptr : &It->second;
+ }
+
// Get or create a SPIR-V type corresponding the given LLVM IR type,
// and map it to the given VReg by creating an ASSIGN_TYPE instruction.
SPIRVType *assignTypeToVReg(const Type *Type, Register VReg,
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
index 55b4c47c197d..4f5c1dc4f90b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.cpp
@@ -86,8 +86,8 @@ bool SPIRVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
// when there is a type mismatch between results and operand types.
static void validatePtrTypes(const SPIRVSubtarget &STI,
MachineRegisterInfo *MRI, SPIRVGlobalRegistry &GR,
- MachineInstr &I, SPIRVType *ResType,
- unsigned OpIdx) {
+ MachineInstr &I, unsigned OpIdx,
+ SPIRVType *ResType, const Type *ResTy = nullptr) {
Register OpReg = I.getOperand(OpIdx).getReg();
SPIRVType *TypeInst = MRI->getVRegDef(OpReg);
SPIRVType *OpType = GR.getSPIRVTypeForVReg(
@@ -97,7 +97,13 @@ static void validatePtrTypes(const SPIRVSubtarget &STI,
if (!ResType || !OpType || OpType->getOpcode() != SPIRV::OpTypePointer)
return;
SPIRVType *ElemType = GR.getSPIRVTypeForVReg(OpType->getOperand(2).getReg());
- if (!ElemType || ElemType == ResType)
+ if (!ElemType)
+ return;
+ bool IsSameMF =
+ ElemType->getParent()->getParent() == ResType->getParent()->getParent();
+ bool IsEqualTypes = IsSameMF ? ElemType == ResType
+ : GR.getTypeForSPIRVType(ElemType) == ResTy;
+ if (IsEqualTypes)
return;
// There is a type mismatch between results and operand types
// and we insert a bitcast before the instruction to keep SPIR-V code valid
@@ -105,7 +111,11 @@ static void validatePtrTypes(const SPIRVSubtarget &STI,
static_cast<SPIRV::StorageClass::StorageClass>(
OpType->getOperand(1).getImm());
MachineIRBuilder MIB(I);
- SPIRVType *NewPtrType = GR.getOrCreateSPIRVPointerType(ResType, MIB, SC);
+ SPIRVType *NewBaseType =
+ IsSameMF ? ResType
+ : GR.getOrCreateSPIRVType(
+ ResTy, MIB, SPIRV::AccessQualifier::ReadWrite, false);
+ SPIRVType *NewPtrType = GR.getOrCreateSPIRVPointerType(NewBaseType, MIB, SC);
if (!GR.isBitcastCompatible(NewPtrType, OpType))
report_fatal_error(
"insert validation bitcast: incompatible result and operand types");
@@ -123,6 +133,74 @@ static void validatePtrTypes(const SPIRVSubtarget &STI,
I.getOperand(OpIdx).setReg(NewReg);
}
+// Insert a bitcast before the function call instruction to keep SPIR-V code
+// valid when there is a type mismatch between actual and expected types of an
+// argument:
+// %formal = OpFunctionParameter %formal_type
+// ...
+// %res = OpFunctionCall %ty %fun %actual ...
+// implies that %actual is of %formal_type, and in case of opaque pointers.
+// We may need to insert a bitcast to ensure this.
+void validateFunCallMachineDef(const SPIRVSubtarget &STI,
+ MachineRegisterInfo *DefMRI,
+ MachineRegisterInfo *CallMRI,
+ SPIRVGlobalRegistry &GR, MachineInstr &FunCall,
+ MachineInstr *FunDef) {
+ if (FunDef->getOpcode() != SPIRV::OpFunction)
+ return;
+ unsigned OpIdx = 3;
+ for (FunDef = FunDef->getNextNode();
+ FunDef && FunDef->getOpcode() == SPIRV::OpFunctionParameter &&
+ OpIdx < FunCall.getNumOperands();
+ FunDef = FunDef->getNextNode(), OpIdx++) {
+ SPIRVType *DefPtrType = DefMRI->getVRegDef(FunDef->getOperand(1).getReg());
+ SPIRVType *DefElemType =
+ DefPtrType && DefPtrType->getOpcode() == SPIRV::OpTypePointer
+ ? GR.getSPIRVTypeForVReg(DefPtrType->getOperand(2).getReg())
+ : nullptr;
+ if (DefElemType) {
+ const Type *DefElemTy = GR.getTypeForSPIRVType(DefElemType);
+ // Switch GR context to the call site instead of the (default) definition
+ // side
+ GR.setCurrentFunc(*FunCall.getParent()->getParent());
+ validatePtrTypes(STI, CallMRI, GR, FunCall, OpIdx, DefElemType,
+ DefElemTy);
+ GR.setCurrentFunc(*FunDef->getParent()->getParent());
+ }
+ }
+}
+
+// Ensure there is no mismatch between actual and expected arg types: calls
+// with a processed definition. Return Function pointer if it's a forward
+// call (ahead of definition), and nullptr otherwise.
+const Function *validateFunCall(const SPIRVSubtarget &STI,
+ MachineRegisterInfo *MRI,
+ SPIRVGlobalRegistry &GR,
+ MachineInstr &FunCall) {
+ const GlobalValue *GV = FunCall.getOperand(2).getGlobal();
+ const Function *F = dyn_cast<Function>(GV);
+ MachineInstr *FunDef =
+ const_cast<MachineInstr *>(GR.getFunctionDefinition(F));
+ if (!FunDef)
+ return F;
+ validateFunCallMachineDef(STI, MRI, MRI, GR, FunCall, FunDef);
+ return nullptr;
+}
+
+// Ensure there is no mismatch between actual and expected arg types: calls
+// ahead of a processed definition.
+void validateForwardCalls(const SPIRVSubtarget &STI,
+ MachineRegisterInfo *DefMRI, SPIRVGlobalRegistry &GR,
+ MachineInstr &FunDef) {
+ const Function *F = GR.getFunctionByDefinition(&FunDef);
+ if (SmallVector<MachineInstr *> *FwdCalls = GR.getForwardCalls(F))
+ for (MachineInstr *FunCall : *FwdCalls) {
+ MachineRegisterInfo *CallMRI =
+ &FunCall->getParent()->getParent()->getRegInfo();
+ validateFunCallMachineDef(STI, DefMRI, CallMRI, GR, *FunCall, &FunDef);
+ }
+}
+
// TODO: the logic of inserting additional bitcast's is to be moved
// to pre-IRTranslation passes eventually
void SPIRVTargetLowering::finalizeLowering(MachineFunction &MF) const {
@@ -137,14 +215,28 @@ void SPIRVTargetLowering::finalizeLowering(MachineFunction &MF) const {
switch (MI.getOpcode()) {
case SPIRV::OpLoad:
// OpLoad <ResType>, ptr %Op implies that %Op is a pointer to <ResType>
- validatePtrTypes(STI, MRI, GR, MI,
- GR.getSPIRVTypeForVReg(MI.getOperand(0).getReg()), 2);
+ validatePtrTypes(STI, MRI, GR, MI, 2,
+ GR.getSPIRVTypeForVReg(MI.getOperand(0).getReg()));
break;
case SPIRV::OpStore:
// OpStore ptr %Op, <Obj> implies that %Op points to the <Obj>'s type
- validatePtrTypes(STI, MRI, GR, MI,
- GR.getSPIRVTypeForVReg(MI.getOperand(1).getReg()), 0);
+ validatePtrTypes(STI, MRI, GR, MI, 0,
+ GR.getSPIRVTypeForVReg(MI.getOperand(1).getReg()));
break;
+
+ case SPIRV::OpFunctionCall:
+ // ensure there is no mismatch between actual and expected arg types:
+ // calls with a processed definition
+ if (MI.getNumOperands() > 3)
+ if (const Function *F = validateFunCall(STI, MRI, GR, MI))
+ GR.addForwardCall(F, &MI);
+ break;
+ case SPIRV::OpFunction:
+ // ensure there is no mismatch between actual and expected arg types:
+ // calls ahead of a processed definition
+ validateForwardCalls(STI, MRI, GR, MI);
+ break;
+
// ensure that LLVM IR bitwise instructions result in logical SPIR-V
// instructions when applied to bool type
case SPIRV::OpBitwiseOrS:
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 5bb8f6084f96..f4525e713c98 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -231,6 +231,9 @@ private:
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
MachineInstr &I) const;
+
+ bool wrapIntoSpecConstantOp(MachineInstr &I,
+ SmallVector<Register> &CompositeArgs) const;
};
} // end anonymous namespace
@@ -499,6 +502,7 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
Register GV = I.getOperand(1).getReg();
MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
+ (void)II;
assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
(*II).getOpcode() == TargetOpcode::COPY ||
(*II).getOpcode() == SPIRV::OpVariable) &&
@@ -771,10 +775,13 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
// TODO: check if we have such GV, add init, use buildGlobalVariable.
- Type *LLVMArrTy = ArrayType::get(
- IntegerType::get(GR.CurMF->getFunction().getContext(), 8), Num);
- GlobalVariable *GV =
- new GlobalVariable(LLVMArrTy, true, GlobalValue::InternalLinkage);
+ Function &CurFunction = GR.CurMF->getFunction();
+ Type *LLVMArrTy =
+ ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
+ // Module takes ownership of the global var.
+ GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
+ true, GlobalValue::InternalLinkage,
+ Constant::getNullValue(LLVMArrTy));
Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
GR.add(GV, GR.CurMF, VarReg);
@@ -1245,6 +1252,24 @@ static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
return N;
}
+// Return true if the type represents a constant register
+static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef) {
+ if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
+ OpDef->getOperand(1).isReg()) {
+ if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
+ OpDef = RefDef;
+ }
+ return OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
+ OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
+}
+
+// Return true if the virtual register represents a constant
+static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
+ if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
+ return isConstReg(MRI, OpDef);
+ return false;
+}
+
bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
@@ -1262,16 +1287,7 @@ bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
// check if we may construct a constant vector
Register OpReg = I.getOperand(OpIdx).getReg();
- bool IsConst = false;
- if (SPIRVType *OpDef = MRI->getVRegDef(OpReg)) {
- if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
- OpDef->getOperand(1).isReg()) {
- if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
- OpDef = RefDef;
- }
- IsConst = OpDef->getOpcode() == TargetOpcode::G_CONSTANT ||
- OpDef->getOpcode() == TargetOpcode::G_FCONSTANT;
- }
+ bool IsConst = isConstReg(MRI, OpReg);
if (!IsConst && N < 2)
report_fatal_error(
@@ -1624,6 +1640,48 @@ bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
return Res.constrainAllUses(TII, TRI, RBI);
}
+// Maybe wrap a value into OpSpecConstantOp
+bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
+ MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
+ bool Result = true;
+ unsigned Lim = I.getNumExplicitOperands();
+ for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
+ Register OpReg = I.getOperand(i).getReg();
+ SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
+ SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
+ if (!OpDefine || !OpType || isConstReg(MRI, OpDefine) ||
+ OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST) {
+ // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
+ // by selectAddrSpaceCast()
+ CompositeArgs.push_back(OpReg);
+ continue;
+ }
+ MachineFunction *MF = I.getMF();
+ Register WrapReg = GR.find(OpDefine, MF);
+ if (WrapReg.isValid()) {
+ CompositeArgs.push_back(WrapReg);
+ continue;
+ }
+ // Create a new register for the wrapper
+ WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
+ GR.add(OpDefine, MF, WrapReg);
+ CompositeArgs.push_back(WrapReg);
+ // Decorate the wrapper register and generate a new instruction
+ MRI->setType(WrapReg, LLT::pointer(0, 32));
+ GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
+ MachineBasicBlock &BB = *I.getParent();
+ Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
+ .addDef(WrapReg)
+ .addUse(GR.getSPIRVTypeID(OpType))
+ .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
+ .addUse(OpReg)
+ .constrainAllUses(TII, TRI, RBI);
+ if (!Result)
+ break;
+ }
+ return Result;
+}
+
bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
const SPIRVType *ResType,
MachineInstr &I) const {
@@ -1662,17 +1720,21 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
case Intrinsic::spv_const_composite: {
// If no values are attached, the composite is null constant.
bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
- unsigned Opcode =
- IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
+ // Select a proper instruction.
+ unsigned Opcode = SPIRV::OpConstantNull;
+ SmallVector<Register> CompositeArgs;
+ if (!IsNull) {
+ Opcode = SPIRV::OpConstantComposite;
+ if (!wrapIntoSpecConstantOp(I, CompositeArgs))
+ return false;
+ }
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
.addDef(ResVReg)
.addUse(GR.getSPIRVTypeID(ResType));
// skip type MD node we already used when generated assign.type for this
if (!IsNull) {
- for (unsigned i = I.getNumExplicitDefs() + 1;
- i < I.getNumExplicitOperands(); ++i) {
- MIB.addUse(I.getOperand(i).getReg());
- }
+ for (Register OpReg : CompositeArgs)
+ MIB.addUse(OpReg);
}
return MIB.constrainAllUses(TII, TRI, RBI);
}
@@ -1835,7 +1897,7 @@ bool SPIRVInstructionSelector::selectGlobalValue(
// FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
MachineIRBuilder MIRBuilder(I);
const GlobalValue *GV = I.getOperand(1).getGlobal();
- Type *GVType = GV->getValueType();
+ Type *GVType = GR.getDeducedGlobalValueType(GV);
SPIRVType *PointerBaseType;
if (GVType->isArrayTy()) {
SPIRVType *ArrayElementType =
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 00d0cbd76373..40c3e5f9c6bd 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -658,7 +658,7 @@ void RequirementHandler::initAvailableCapabilitiesForVulkan(
// Provided by all supported Vulkan versions.
addAvailableCaps({Capability::Int16, Capability::Int64, Capability::Float16,
- Capability::Float64});
+ Capability::Float64, Capability::GroupNonUniform});
}
} // namespace SPIRV
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index d547f91ba4a5..b133f0ae85de 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -186,7 +186,10 @@ static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR,
}
case TargetOpcode::G_GLOBAL_VALUE: {
MIB.setInsertPt(*MI->getParent(), MI);
- Type *Ty = MI->getOperand(1).getGlobal()->getType();
+ const GlobalValue *Global = MI->getOperand(1).getGlobal();
+ Type *ElementTy = GR->getDeducedGlobalValueType(Global);
+ auto *Ty = TypedPointerType::get(ElementTy,
+ Global->getType()->getAddressSpace());
SpirvTy = GR->getOrCreateSPIRVType(Ty, MIB);
break;
}
@@ -543,6 +546,7 @@ static void processSwitches(MachineFunction &MF, SPIRVGlobalRegistry *GR,
Register Dst = ICMP->getOperand(0).getReg();
MachineOperand &PredOp = ICMP->getOperand(1);
const auto CC = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
+ (void)CC;
assert((CC == CmpInst::ICMP_EQ || CC == CmpInst::ICMP_ULE) &&
MRI.hasOneUse(Dst) && MRI.hasOneDef(CompareReg));
uint64_t Value = getIConstVal(ICMP->getOperand(3).getReg(), &MRI);
diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
index 38caa7c8ea0a..f3864b56e1e9 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
@@ -12,6 +12,7 @@
#include "SPIRVSubtarget.h"
#include "SPIRV.h"
+#include "SPIRVCommandLine.h"
#include "SPIRVGlobalRegistry.h"
#include "SPIRVLegalizerInfo.h"
#include "SPIRVRegisterBankInfo.h"
@@ -27,80 +28,15 @@ using namespace llvm;
#define GET_SUBTARGETINFO_CTOR
#include "SPIRVGenSubtargetInfo.inc"
-cl::list<SPIRV::Extension::Extension> Extensions(
- "spirv-extensions", cl::desc("SPIR-V extensions"), cl::ZeroOrMore,
- cl::Hidden,
- cl::values(
- clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float_add,
- "SPV_EXT_shader_atomic_float_add",
- "Adds atomic add instruction on floating-point numbers."),
- clEnumValN(
- SPIRV::Extension::SPV_EXT_shader_atomic_float16_add,
- "SPV_EXT_shader_atomic_float16_add",
- "Extends the SPV_EXT_shader_atomic_float_add extension to support "
- "atomically adding to 16-bit floating-point numbers in memory."),
- clEnumValN(
- SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max,
- "SPV_EXT_shader_atomic_float_min_max",
- "Adds atomic min and max instruction on floating-point numbers."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_arbitrary_precision_integers,
- "SPV_INTEL_arbitrary_precision_integers",
- "Allows generating arbitrary width integer types."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_optnone, "SPV_INTEL_optnone",
- "Adds OptNoneINTEL value for Function Control mask that "
- "indicates a request to not optimize the function."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_usm_storage_classes,
- "SPV_INTEL_usm_storage_classes",
- "Introduces two new storage classes that are sub classes of "
- "the CrossWorkgroup storage class "
- "that provides additional information that can enable "
- "optimization."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_subgroups, "SPV_INTEL_subgroups",
- "Allows work items in a subgroup to share data without the "
- "use of local memory and work group barriers, and to "
- "utilize specialized hardware to load and store blocks of "
- "data from images or buffers."),
- clEnumValN(SPIRV::Extension::SPV_KHR_uniform_group_instructions,
- "SPV_KHR_uniform_group_instructions",
- "Allows support for additional group operations within "
- "uniform control flow."),
- clEnumValN(SPIRV::Extension::SPV_KHR_no_integer_wrap_decoration,
- "SPV_KHR_no_integer_wrap_decoration",
- "Adds decorations to indicate that a given instruction does "
- "not cause integer wrapping."),
- clEnumValN(
- SPIRV::Extension::SPV_KHR_float_controls, "SPV_KHR_float_controls",
- "Provides new execution modes to control floating-point "
- "computations by overriding an implementation’s default behavior "
- "for rounding modes, denormals, signed zero, and infinities."),
- clEnumValN(SPIRV::Extension::SPV_KHR_expect_assume,
- "SPV_KHR_expect_assume",
- "Provides additional information to a compiler, similar to "
- "the llvm.assume and llvm.expect intrinsics."),
- clEnumValN(SPIRV::Extension::SPV_KHR_bit_instructions,
- "SPV_KHR_bit_instructions",
- "This enables bit instructions to be used by SPIR-V modules "
- "without requiring the Shader capability."),
- clEnumValN(
- SPIRV::Extension::SPV_KHR_linkonce_odr, "SPV_KHR_linkonce_odr",
- "Allows to use the LinkOnceODR linkage type that is to let "
- "a function or global variable to be merged with other functions "
- "or global variables of the same name when linkage occurs."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_bfloat16_conversion,
- "SPV_INTEL_bfloat16_conversion",
- "Adds instructions to convert between single-precision "
- "32-bit floating-point values and 16-bit bfloat16 values."),
- clEnumValN(SPIRV::Extension::SPV_KHR_subgroup_rotate,
- "SPV_KHR_subgroup_rotate",
- "Adds a new instruction that enables rotating values across "
- "invocations within a subgroup."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_variable_length_array,
- "SPV_INTEL_variable_length_array",
- "Allows to allocate local arrays whose number of elements "
- "is unknown at compile time."),
- clEnumValN(SPIRV::Extension::SPV_INTEL_function_pointers,
- "SPV_INTEL_function_pointers",
- "Allows translation of function pointers.")));
+static cl::opt<bool>
+ SPVTranslatorCompat("translator-compatibility-mode",
+ cl::desc("SPIR-V Translator compatibility mode"),
+ cl::Optional, cl::init(false));
+
+static cl::opt<std::set<SPIRV::Extension::Extension>, false,
+ SPIRVExtensionsParser>
+ Extensions("spirv-ext",
+ cl::desc("Specify list of enabled SPIR-V extensions"));
// Compare version numbers, but allow 0 to mean unspecified.
static bool isAtLeastVer(uint32_t Target, uint32_t VerToCompareTo) {
@@ -157,8 +93,9 @@ bool SPIRVSubtarget::isAtLeastOpenCLVer(uint32_t VerToCompareTo) const {
}
// If the SPIR-V version is >= 1.4 we can call OpPtrEqual and OpPtrNotEqual.
+// In SPIR-V Translator compatibility mode this feature is not available.
bool SPIRVSubtarget::canDirectlyComparePointers() const {
- return isAtLeastVer(SPIRVVersion, 14);
+ return !SPVTranslatorCompat && isAtLeastVer(SPIRVVersion, 14);
}
void SPIRVSubtarget::initAvailableExtensions() {
@@ -166,8 +103,7 @@ void SPIRVSubtarget::initAvailableExtensions() {
if (!isOpenCLEnv())
return;
- for (auto Extension : Extensions)
- AvailableExtensions.insert(Extension);
+ AvailableExtensions.insert(Extensions.begin(), Extensions.end());
}
// TODO: use command line args for this rather than just defaults.
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 8dbbd9049844..ff102e318469 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -1611,3 +1611,4 @@ multiclass OpcodeOperand<bits<32> value> {
// TODO: implement other mnemonics.
defm InBoundsPtrAccessChain : OpcodeOperand<70>;
defm PtrCastToGeneric : OpcodeOperand<121>;
+defm Bitcast : OpcodeOperand<124>;
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index fc7502479fdc..c87c1293c622 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -306,10 +306,12 @@ static bool isNonMangledOCLBuiltin(StringRef Name) {
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name) {
bool IsNonMangledOCL = isNonMangledOCLBuiltin(Name);
bool IsNonMangledSPIRV = Name.starts_with("__spirv_");
+ bool IsNonMangledHLSL = Name.starts_with("__hlsl_");
bool IsMangled = Name.starts_with("_Z");
- if (!IsNonMangledOCL && !IsNonMangledSPIRV && !IsMangled)
- return std::string();
+ // Otherwise use simple demangling to return the function name.
+ if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
+ return Name.str();
// Try to use the itanium demangler.
if (char *DemangledName = itaniumDemangle(Name.data())) {
@@ -317,9 +319,6 @@ std::string getOclOrSpirvBuiltinDemangledName(StringRef Name) {
free(DemangledName);
return Result;
}
- // Otherwise use simple demangling to return the function name.
- if (IsNonMangledOCL || IsNonMangledSPIRV)
- return Name.str();
// Autocheck C++, maybe need to do explicit check of the source language.
// OpenCL C++ built-ins are declared in cl namespace.
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index eb87349f0941..c2c3475e1a93 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -127,8 +127,26 @@ inline unsigned getPointerAddressSpace(const Type *T) {
}
// Return true if the Argument is decorated with a pointee type
-inline bool HasPointeeTypeAttr(Argument *Arg) {
- return Arg->hasByValAttr() || Arg->hasByRefAttr();
+inline bool hasPointeeTypeAttr(Argument *Arg) {
+ return Arg->hasByValAttr() || Arg->hasByRefAttr() || Arg->hasStructRetAttr();
+}
+
+// Return the pointee type of the argument or nullptr otherwise
+inline Type *getPointeeTypeByAttr(Argument *Arg) {
+ if (Arg->hasByValAttr())
+ return Arg->getParamByValType();
+ if (Arg->hasStructRetAttr())
+ return Arg->getParamStructRetType();
+ if (Arg->hasByRefAttr())
+ return Arg->getParamByRefType();
+ return nullptr;
+}
+
+inline Type *reconstructFunctionType(Function *F) {
+ SmallVector<Type *> ArgTys;
+ for (unsigned i = 0; i < F->arg_size(); ++i)
+ ArgTys.push_back(F->getArg(i)->getType());
+ return FunctionType::get(F->getReturnType(), ArgTys, F->isVarArg());
}
} // namespace llvm
diff --git a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index be4ec1e9dce2..67e2b9d7c997 100644
--- a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -307,7 +307,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert((Kind == k_Register) && "Invalid access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index a58e8e0dfedf..f2c04215d12d 100644
--- a/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -227,7 +227,7 @@ public:
bool isReg(RegisterKind RegKind) const {
return Kind == KindReg && Reg.Kind == RegKind;
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == KindReg && "Not a register");
return Reg.Num;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index 80c994a32ea9..4897b37d8eb1 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -66,22 +66,6 @@ SystemZFrameLowering::create(const SystemZSubtarget &STI) {
return std::make_unique<SystemZELFFrameLowering>();
}
-MachineBasicBlock::iterator SystemZFrameLowering::eliminateCallFramePseudoInstr(
- MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const {
- switch (MI->getOpcode()) {
- case SystemZ::ADJCALLSTACKDOWN:
- case SystemZ::ADJCALLSTACKUP:
- assert(hasReservedCallFrame(MF) &&
- "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
- return MBB.erase(MI);
- break;
-
- default:
- llvm_unreachable("Unexpected call frame instruction");
- }
-}
-
namespace {
struct SZFrameSortingObj {
bool IsValid = false; // True if we care about this Object.
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index 95f30e3c0d99..03ce8882c4de 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -41,9 +41,6 @@ public:
}
bool hasReservedCallFrame(const MachineFunction &MF) const override;
- MachineBasicBlock::iterator
- eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const override;
};
class SystemZELFFrameLowering : public SystemZFrameLowering {
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index bc60d14ee700..efffd669d268 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -293,6 +293,15 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
+ // Mark sign/zero extending atomic loads as legal, which will make
+ // DAGCombiner fold extensions into atomic loads if possible.
+ setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64,
+ {MVT::i8, MVT::i16, MVT::i32}, Legal);
+ setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i32,
+ {MVT::i8, MVT::i16}, Legal);
+ setAtomicLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i16,
+ MVT::i8, Legal);
+
// We can use the CC result of compare-and-swap to implement
// the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
@@ -372,6 +381,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
// Handle prefetches with PFD or PFDRL.
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
+ // Handle readcyclecounter with STCKF.
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
+
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
// Assume by default that all vector operations need to be expanded.
for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
@@ -6077,6 +6089,27 @@ SDValue SystemZTargetLowering::lowerIS_FPCLASS(SDValue Op,
return getCCResult(DAG, Intr);
}
+SDValue SystemZTargetLowering::lowerREADCYCLECOUNTER(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Chain = Op.getOperand(0);
+
+ // STCKF only supports a memory operand, so we have to use a temporary.
+ SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
+ int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ MachinePointerInfo MPI =
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
+
+ // Use STCFK to store the TOD clock into the temporary.
+ SDValue StoreOps[] = {Chain, StackPtr};
+ Chain = DAG.getMemIntrinsicNode(
+ SystemZISD::STCKF, DL, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
+ MPI, MaybeAlign(), MachineMemOperand::MOStore);
+
+ // And read it back from there.
+ return DAG.getLoad(MVT::i64, DL, Chain, StackPtr, MPI);
+}
+
SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
@@ -6199,6 +6232,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerIS_FPCLASS(Op, DAG);
case ISD::GET_ROUNDING:
return lowerGET_ROUNDING(Op, DAG);
+ case ISD::READCYCLECOUNTER:
+ return lowerREADCYCLECOUNTER(Op, DAG);
default:
llvm_unreachable("Unexpected node to lower");
}
@@ -6425,6 +6460,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(STRV);
OPCODE(VLER);
OPCODE(VSTER);
+ OPCODE(STCKF);
OPCODE(PREFETCH);
OPCODE(ADA_ENTRY);
}
@@ -6587,27 +6623,6 @@ SDValue SystemZTargetLowering::combineTruncateExtract(
return SDValue();
}
-// Replace ALoad with a new ATOMIC_LOAD with a result that is extended to VT
-// per ETy.
-static SDValue extendAtomicLoad(AtomicSDNode *ALoad, EVT VT, SelectionDAG &DAG,
- ISD::LoadExtType ETy) {
- if (VT.getSizeInBits() > 64)
- return SDValue();
- EVT OrigVT = ALoad->getValueType(0);
- assert(OrigVT.getSizeInBits() < VT.getSizeInBits() && "VT should be wider.");
- EVT MemoryVT = ALoad->getMemoryVT();
- auto *NewALoad = dyn_cast<AtomicSDNode>(DAG.getAtomic(
- ISD::ATOMIC_LOAD, SDLoc(ALoad), MemoryVT, VT, ALoad->getChain(),
- ALoad->getBasePtr(), ALoad->getMemOperand()));
- NewALoad->setExtensionType(ETy);
- DAG.ReplaceAllUsesOfValueWith(
- SDValue(ALoad, 0),
- DAG.getNode(ISD::TRUNCATE, SDLoc(ALoad), OrigVT, SDValue(NewALoad, 0)));
- // Update the chain uses.
- DAG.ReplaceAllUsesOfValueWith(SDValue(ALoad, 1), SDValue(NewALoad, 1));
- return SDValue(NewALoad, 0);
-}
-
SDValue SystemZTargetLowering::combineZERO_EXTEND(
SDNode *N, DAGCombinerInfo &DCI) const {
// Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
@@ -6654,12 +6669,6 @@ SDValue SystemZTargetLowering::combineZERO_EXTEND(
}
}
- // Fold into ATOMIC_LOAD unless it is already sign extending.
- if (auto *ALoad = dyn_cast<AtomicSDNode>(N0))
- if (ALoad->getOpcode() == ISD::ATOMIC_LOAD &&
- ALoad->getExtensionType() != ISD::SEXTLOAD)
- return extendAtomicLoad(ALoad, VT, DAG, ISD::ZEXTLOAD);
-
return SDValue();
}
@@ -6712,12 +6721,6 @@ SDValue SystemZTargetLowering::combineSIGN_EXTEND(
}
}
- // Fold into ATOMIC_LOAD unless it is already zero extending.
- if (auto *ALoad = dyn_cast<AtomicSDNode>(N0))
- if (ALoad->getOpcode() == ISD::ATOMIC_LOAD &&
- ALoad->getExtensionType() != ISD::ZEXTLOAD)
- return extendAtomicLoad(ALoad, VT, DAG, ISD::SEXTLOAD);
-
return SDValue();
}
@@ -6985,6 +6988,17 @@ SDValue SystemZTargetLowering::combineSTORE(
}
}
+ // Combine STORE (READCYCLECOUNTER) into STCKF.
+ if (!SN->isTruncatingStore() &&
+ Op1.getOpcode() == ISD::READCYCLECOUNTER &&
+ Op1.hasOneUse() &&
+ N->getOperand(0).reachesChainWithoutSideEffects(SDValue(Op1.getNode(), 1))) {
+ SDValue Ops[] = { Op1.getOperand(0), N->getOperand(2) };
+ return DAG.getMemIntrinsicNode(SystemZISD::STCKF, SDLoc(N),
+ DAG.getVTList(MVT::Other),
+ Ops, MemVT, SN->getMemOperand());
+ }
+
// Transform a store of an i128 moved from GPRs into two separate stores.
if (MemVT == MVT::i128 && SN->isSimple() && ISD::isNormalStore(SN)) {
SDValue LoPart, HiPart;
@@ -8159,6 +8173,27 @@ static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects,
MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
}
+MachineBasicBlock *
+SystemZTargetLowering::emitAdjCallStack(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ MachineFunction &MF = *BB->getParent();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
+ assert(TFL->hasReservedCallFrame(MF) &&
+ "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
+ (void)TFL;
+ // Get the MaxCallFrameSize value and erase MI since it serves no further
+ // purpose as the call frame is statically reserved in the prolog. Set
+ // AdjustsStack as MI is *not* mapped as a frame instruction.
+ uint32_t NumBytes = MI.getOperand(0).getImm();
+ if (NumBytes > MFI.getMaxCallFrameSize())
+ MFI.setMaxCallFrameSize(NumBytes);
+ MFI.setAdjustsStack(true);
+
+ MI.eraseFromParent();
+ return BB;
+}
+
// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr &MI,
@@ -9362,6 +9397,10 @@ getBackchainAddress(SDValue SP, SelectionDAG &DAG) const {
MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *MBB) const {
switch (MI.getOpcode()) {
+ case SystemZ::ADJCALLSTACKDOWN:
+ case SystemZ::ADJCALLSTACKUP:
+ return emitAdjCallStack(MI, MBB);
+
case SystemZ::Select32:
case SystemZ::Select64:
case SystemZ::Select128:
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 406a13b9281c..7140287a886c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -378,6 +378,9 @@ enum NodeType : unsigned {
// Element swapping load/store. Same operands as regular load/store.
VLER, VSTER,
+ // Use STORE CLOCK FAST to store current TOD clock value.
+ STCKF,
+
// Prefetch from the second operand using the 4-bit control code in
// the first operand. The code is 1 for a load prefetch and 2 for
// a store prefetch.
@@ -717,6 +720,7 @@ private:
SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const;
SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
bool canTreatAsByteVector(EVT VT) const;
SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
@@ -756,6 +760,8 @@ private:
MachineBasicBlock *Target) const;
// Implement EmitInstrWithCustomInserter for individual operation types.
+ MachineBasicBlock *emitAdjCallStack(MachineInstr &MI,
+ MachineBasicBlock *BB) const;
MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const;
MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB,
unsigned StoreOpcode, unsigned STOCOpcode,
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 2a6dce863c28..950548abcfa9 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -59,7 +59,7 @@ static uint64_t allOnes(unsigned int Count) {
void SystemZInstrInfo::anchor() {}
SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
- : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
+ : SystemZGenInstrInfo(-1, -1),
RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
STI(sti) {}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 96ea65b6c3d8..7f3a143aad97 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -13,9 +13,9 @@ def IsTargetELF : Predicate<"Subtarget->isTargetELF()">;
// Stack allocation
//===----------------------------------------------------------------------===//
-// The callseq_start node requires the hasSideEffects flag, even though these
-// instructions are noops on SystemZ.
-let hasNoSchedulingInfo = 1, hasSideEffects = 1 in {
+// These pseudos carry values needed to compute the MaxcallFrameSize of the
+// function. The callseq_start node requires the hasSideEffects flag.
+let usesCustomInserter = 1, hasNoSchedulingInfo = 1, hasSideEffects = 1 in {
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
[(callseq_start timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrSystem.td b/llvm/lib/Target/SystemZ/SystemZInstrSystem.td
index e26417ddb72d..497844bec85d 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrSystem.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrSystem.td
@@ -339,7 +339,7 @@ let hasSideEffects = 1 in
// Store clock (fast / extended).
let hasSideEffects = 1, Defs = [CC] in {
def STCK : StoreInherentS<"stck", 0xB205, null_frag, 8>;
- def STCKF : StoreInherentS<"stckf", 0xB27C, null_frag, 8>;
+ def STCKF : StoreInherentS<"stckf", 0xB27C, z_stckf, 8>;
def STCKE : StoreInherentS<"stcke", 0xB278, null_frag, 16>;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZOperators.td b/llvm/lib/Target/SystemZ/SystemZOperators.td
index 6c4e33a6aa7f..1611436b01b7 100644
--- a/llvm/lib/Target/SystemZ/SystemZOperators.td
+++ b/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -132,6 +132,8 @@ def SDT_ZIPM : SDTypeProfile<1, 1,
def SDT_ZPrefetch : SDTypeProfile<0, 2,
[SDTCisVT<0, i32>,
SDTCisPtrTy<1>]>;
+def SDT_ZStoreInherent : SDTypeProfile<0, 1,
+ [SDTCisPtrTy<0>]>;
def SDT_ZTBegin : SDTypeProfile<1, 2,
[SDTCisVT<0, i32>,
SDTCisPtrTy<1>,
@@ -307,6 +309,8 @@ def z_loadeswap : SDNode<"SystemZISD::VLER", SDTLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def z_storeeswap : SDNode<"SystemZISD::VSTER", SDTStore,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def z_stckf : SDNode<"SystemZISD::STCKF", SDT_ZStoreInherent,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def z_tdc : SDNode<"SystemZISD::TDC", SDT_ZTest>;
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index e4adb7be5649..5bdbaf47064d 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -265,7 +265,7 @@ SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
return TTI::TCC_Free;
break;
diff --git a/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
index f9e30a3a9378..691fe8fe3aa4 100644
--- a/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
+++ b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
@@ -344,7 +344,7 @@ public:
return StringRef(Tok.Data, Tok.Length);
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert((Kind == k_Register) && "Invalid access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
index 3cc4d50271eb..020c0d6229d2 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -100,7 +100,7 @@ struct WebAssemblyOperand : public MCParsedAsmOperand {
bool isReg() const override { return false; }
bool isBrList() const { return Kind == BrList; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
llvm_unreachable("Assembly inspects a register operand");
return 0;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 0427fe473f8e..027ee1086bf4 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -129,27 +129,23 @@
///
/// If there are calls to setjmp()
///
-/// 2) In the function entry that calls setjmp, initialize setjmpTable and
-/// sejmpTableSize as follows:
-/// setjmpTableSize = 4;
-/// setjmpTable = (int *) malloc(40);
-/// setjmpTable[0] = 0;
-/// setjmpTable and setjmpTableSize are used to call saveSetjmp() function in
-/// Emscripten compiler-rt.
+/// 2) In the function entry that calls setjmp, initialize
+/// functionInvocationId as follows:
+///
+/// functionInvocationId = alloca(4)
+///
+/// Note: the alloca size is not important as this pointer is
+/// merely used for pointer comparisions.
///
/// 3) Lower
/// setjmp(env)
/// into
-/// setjmpTable = saveSetjmp(env, label, setjmpTable, setjmpTableSize);
-/// setjmpTableSize = getTempRet0();
-/// For each dynamic setjmp call, setjmpTable stores its ID (a number which
-/// is incrementally assigned from 0) and its label (a unique number that
-/// represents each callsite of setjmp). When we need more entries in
-/// setjmpTable, it is reallocated in saveSetjmp() in Emscripten's
-/// compiler-rt and it will return the new table address, and assign the new
-/// table size in setTempRet0(). saveSetjmp also stores the setjmp's ID into
-/// the buffer 'env'. A BB with setjmp is split into two after setjmp call in
-/// order to make the post-setjmp BB the possible destination of longjmp BB.
+/// __wasm_setjmp(env, label, functionInvocationId)
+///
+/// __wasm_setjmp records the necessary info (the label and
+/// functionInvocationId) to the "env".
+/// A BB with setjmp is split into two after setjmp call in order to
+/// make the post-setjmp BB the possible destination of longjmp BB.
///
/// 4) Lower every call that might longjmp into
/// __THREW__ = 0;
@@ -158,8 +154,7 @@
/// __THREW__ = 0;
/// %__threwValue.val = __threwValue;
/// if (%__THREW__.val != 0 & %__threwValue.val != 0) {
-/// %label = testSetjmp(mem[%__THREW__.val], setjmpTable,
-/// setjmpTableSize);
+/// %label = __wasm_setjmp_test(%__THREW__.val, functionInvocationId);
/// if (%label == 0)
/// emscripten_longjmp(%__THREW__.val, %__threwValue.val);
/// setTempRet0(%__threwValue.val);
@@ -173,16 +168,16 @@
/// ...
/// default: goto splitted next BB
/// }
-/// testSetjmp examines setjmpTable to see if there is a matching setjmp
-/// call. After calling an invoke wrapper, if a longjmp occurred, __THREW__
-/// will be the address of matching jmp_buf buffer and __threwValue be the
-/// second argument to longjmp. mem[%__THREW__.val] is a setjmp ID that is
-/// stored in saveSetjmp. testSetjmp returns a setjmp label, a unique ID to
-/// each setjmp callsite. Label 0 means this longjmp buffer does not
-/// correspond to one of the setjmp callsites in this function, so in this
-/// case we just chain the longjmp to the caller. Label -1 means no longjmp
-/// occurred. Otherwise we jump to the right post-setjmp BB based on the
-/// label.
+///
+/// __wasm_setjmp_test examines the jmp buf to see if it was for a matching
+/// setjmp call. After calling an invoke wrapper, if a longjmp occurred,
+/// __THREW__ will be the address of matching jmp_buf buffer and
+/// __threwValue be the second argument to longjmp.
+/// __wasm_setjmp_test returns a setjmp label, a unique ID to each setjmp
+/// callsite. Label 0 means this longjmp buffer does not correspond to one
+/// of the setjmp callsites in this function, so in this case we just chain
+/// the longjmp to the caller. Label -1 means no longjmp occurred.
+/// Otherwise we jump to the right post-setjmp BB based on the label.
///
/// * Wasm setjmp / longjmp handling
/// This mode still uses some Emscripten library functions but not JavaScript's
@@ -199,45 +194,44 @@
/// If there are calls to setjmp()
///
/// 2) and 3): The same as 2) and 3) in Emscripten SjLj.
-/// (setjmpTable/setjmpTableSize initialization + setjmp callsite
-/// transformation)
+/// (functionInvocationId initialization + setjmp callsite transformation)
///
/// 4) Create a catchpad with a wasm.catch() intrinsic, which returns the value
-/// thrown by __wasm_longjmp function. In Emscripten library, we have this
-/// struct:
+/// thrown by __wasm_longjmp function. In the runtime library, we have an
+/// equivalent of the following struct:
///
/// struct __WasmLongjmpArgs {
/// void *env;
/// int val;
/// };
-/// struct __WasmLongjmpArgs __wasm_longjmp_args;
///
-/// The thrown value here is a pointer to __wasm_longjmp_args struct object. We
-/// use this struct to transfer two values by throwing a single value. Wasm
-/// throw and catch instructions are capable of throwing and catching multiple
-/// values, but it also requires multivalue support that is currently not very
-/// reliable.
+/// The thrown value here is a pointer to the struct. We use this struct to
+/// transfer two values by throwing a single value. Wasm throw and catch
+/// instructions are capable of throwing and catching multiple values, but
+/// it also requires multivalue support that is currently not very reliable.
/// TODO Switch to throwing and catching two values without using the struct
///
/// All longjmpable function calls will be converted to an invoke that will
/// unwind to this catchpad in case a longjmp occurs. Within the catchpad, we
-/// test the thrown values using testSetjmp function as we do for Emscripten
-/// SjLj. The main difference is, in Emscripten SjLj, we need to transform every
-/// longjmpable callsite into a sequence of code including testSetjmp() call; in
-/// Wasm SjLj we do the testing in only one place, in this catchpad.
+/// test the thrown values using __wasm_setjmp_test function as we do for
+/// Emscripten SjLj. The main difference is, in Emscripten SjLj, we need to
+/// transform every longjmpable callsite into a sequence of code including
+/// __wasm_setjmp_test() call; in Wasm SjLj we do the testing in only one
+/// place, in this catchpad.
///
-/// After testing calling testSetjmp(), if the longjmp does not correspond to
-/// one of the setjmps within the current function, it rethrows the longjmp
-/// by calling __wasm_longjmp(). If it corresponds to one of setjmps in the
-/// function, we jump to the beginning of the function, which contains a switch
-/// to each post-setjmp BB. Again, in Emscripten SjLj, this switch is added for
-/// every longjmpable callsite; in Wasm SjLj we do this only once at the top of
-/// the function. (after setjmpTable/setjmpTableSize initialization)
+/// After testing calling __wasm_setjmp_test(), if the longjmp does not
+/// correspond to one of the setjmps within the current function, it rethrows
+/// the longjmp by calling __wasm_longjmp(). If it corresponds to one of
+/// setjmps in the function, we jump to the beginning of the function, which
+/// contains a switch to each post-setjmp BB. Again, in Emscripten SjLj, this
+/// switch is added for every longjmpable callsite; in Wasm SjLj we do this
+/// only once at the top of the function. (after functionInvocationId
+/// initialization)
///
/// The below is the pseudocode for what we have described
///
/// entry:
-/// Initialize setjmpTable and setjmpTableSize
+/// Initialize functionInvocationId
///
/// setjmp.dispatch:
/// switch %label {
@@ -260,7 +254,7 @@
/// %longjmp.args = wasm.catch() ;; struct __WasmLongjmpArgs
/// %env = load 'env' field from __WasmLongjmpArgs
/// %val = load 'val' field from __WasmLongjmpArgs
-/// %label = testSetjmp(mem[%env], setjmpTable, setjmpTableSize);
+/// %label = __wasm_setjmp_test(%env, functionInvocationId);
/// if (%label == 0)
/// __wasm_longjmp(%env, %val)
/// catchret to %setjmp.dispatch
@@ -309,8 +303,8 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
Function *ResumeF = nullptr; // __resumeException() (Emscripten)
Function *EHTypeIDF = nullptr; // llvm.eh.typeid.for() (intrinsic)
Function *EmLongjmpF = nullptr; // emscripten_longjmp() (Emscripten)
- Function *SaveSetjmpF = nullptr; // saveSetjmp() (Emscripten)
- Function *TestSetjmpF = nullptr; // testSetjmp() (Emscripten)
+ Function *WasmSetjmpF = nullptr; // __wasm_setjmp() (Emscripten)
+ Function *WasmSetjmpTestF = nullptr; // __wasm_setjmp_test() (Emscripten)
Function *WasmLongjmpF = nullptr; // __wasm_longjmp() (Emscripten)
Function *CatchF = nullptr; // wasm.catch() (intrinsic)
@@ -335,18 +329,17 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
bool runEHOnFunction(Function &F);
bool runSjLjOnFunction(Function &F);
void handleLongjmpableCallsForEmscriptenSjLj(
- Function &F, InstVector &SetjmpTableInsts,
- InstVector &SetjmpTableSizeInsts,
+ Function &F, Instruction *FunctionInvocationId,
SmallVectorImpl<PHINode *> &SetjmpRetPHIs);
void
- handleLongjmpableCallsForWasmSjLj(Function &F, InstVector &SetjmpTableInsts,
- InstVector &SetjmpTableSizeInsts,
+ handleLongjmpableCallsForWasmSjLj(Function &F,
+ Instruction *FunctionInvocationId,
SmallVectorImpl<PHINode *> &SetjmpRetPHIs);
Function *getFindMatchingCatch(Module &M, unsigned NumClauses);
Value *wrapInvoke(CallBase *CI);
void wrapTestSetjmp(BasicBlock *BB, DebugLoc DL, Value *Threw,
- Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label,
+ Value *FunctionInvocationId, Value *&Label,
Value *&LongjmpResult, BasicBlock *&CallEmLongjmpBB,
PHINode *&CallEmLongjmpBBThrewPHI,
PHINode *&CallEmLongjmpBBThrewValuePHI,
@@ -618,7 +611,7 @@ static bool canLongjmp(const Value *Callee) {
// There are functions in Emscripten's JS glue code or compiler-rt
if (CalleeName == "__resumeException" || CalleeName == "llvm_eh_typeid_for" ||
- CalleeName == "saveSetjmp" || CalleeName == "testSetjmp" ||
+ CalleeName == "__wasm_setjmp" || CalleeName == "__wasm_setjmp_test" ||
CalleeName == "getTempRet0" || CalleeName == "setTempRet0")
return false;
@@ -687,11 +680,12 @@ static bool isEmAsmCall(const Value *Callee) {
CalleeName == "emscripten_asm_const_async_on_main_thread";
}
-// Generate testSetjmp function call seqence with preamble and postamble.
-// The code this generates is equivalent to the following JavaScript code:
+// Generate __wasm_setjmp_test function call seqence with preamble and
+// postamble. The code this generates is equivalent to the following
+// JavaScript code:
// %__threwValue.val = __threwValue;
// if (%__THREW__.val != 0 & %__threwValue.val != 0) {
-// %label = testSetjmp(mem[%__THREW__.val], setjmpTable, setjmpTableSize);
+// %label = __wasm_setjmp_test(%__THREW__.val, functionInvocationId);
// if (%label == 0)
// emscripten_longjmp(%__THREW__.val, %__threwValue.val);
// setTempRet0(%__threwValue.val);
@@ -703,10 +697,10 @@ static bool isEmAsmCall(const Value *Callee) {
// As output parameters. returns %label, %longjmp_result, and the BB the last
// instruction (%longjmp_result = ...) is in.
void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
- BasicBlock *BB, DebugLoc DL, Value *Threw, Value *SetjmpTable,
- Value *SetjmpTableSize, Value *&Label, Value *&LongjmpResult,
- BasicBlock *&CallEmLongjmpBB, PHINode *&CallEmLongjmpBBThrewPHI,
- PHINode *&CallEmLongjmpBBThrewValuePHI, BasicBlock *&EndBB) {
+ BasicBlock *BB, DebugLoc DL, Value *Threw, Value *FunctionInvocationId,
+ Value *&Label, Value *&LongjmpResult, BasicBlock *&CallEmLongjmpBB,
+ PHINode *&CallEmLongjmpBBThrewPHI, PHINode *&CallEmLongjmpBBThrewValuePHI,
+ BasicBlock *&EndBB) {
Function *F = BB->getParent();
Module *M = F->getParent();
LLVMContext &C = M->getContext();
@@ -743,16 +737,14 @@ void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
CallEmLongjmpBBThrewValuePHI->addIncoming(ThrewValue, ThenBB1);
}
- // %label = testSetjmp(mem[%__THREW__.val], setjmpTable, setjmpTableSize);
+ // %label = __wasm_setjmp_test(%__THREW__.val, functionInvocationId);
// if (%label == 0)
IRB.SetInsertPoint(ThenBB1);
BasicBlock *EndBB2 = BasicBlock::Create(C, "if.end2", F);
Value *ThrewPtr =
IRB.CreateIntToPtr(Threw, getAddrPtrType(M), Threw->getName() + ".p");
- Value *LoadedThrew = IRB.CreateLoad(getAddrIntType(M), ThrewPtr,
- ThrewPtr->getName() + ".loaded");
- Value *ThenLabel = IRB.CreateCall(
- TestSetjmpF, {LoadedThrew, SetjmpTable, SetjmpTableSize}, "label");
+ Value *ThenLabel = IRB.CreateCall(WasmSetjmpTestF,
+ {ThrewPtr, FunctionInvocationId}, "label");
Value *Cmp2 = IRB.CreateICmpEQ(ThenLabel, IRB.getInt32(0));
IRB.CreateCondBr(Cmp2, CallEmLongjmpBB, EndBB2);
@@ -1007,17 +999,17 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
Type *Int8PtrTy = IRB.getPtrTy();
Type *Int32PtrTy = IRB.getPtrTy();
Type *Int32Ty = IRB.getInt32Ty();
- // Register saveSetjmp function
+
+ // Register __wasm_setjmp function
FunctionType *SetjmpFTy = SetjmpF->getFunctionType();
FunctionType *FTy = FunctionType::get(
- Int32PtrTy,
- {SetjmpFTy->getParamType(0), Int32Ty, Int32PtrTy, Int32Ty}, false);
- SaveSetjmpF = getEmscriptenFunction(FTy, "saveSetjmp", &M);
+ IRB.getVoidTy(), {SetjmpFTy->getParamType(0), Int32Ty, Int32PtrTy},
+ false);
+ WasmSetjmpF = getEmscriptenFunction(FTy, "__wasm_setjmp", &M);
- // Register testSetjmp function
- FTy = FunctionType::get(Int32Ty,
- {getAddrIntType(&M), Int32PtrTy, Int32Ty}, false);
- TestSetjmpF = getEmscriptenFunction(FTy, "testSetjmp", &M);
+ // Register __wasm_setjmp_test function
+ FTy = FunctionType::get(Int32Ty, {Int32PtrTy, Int32PtrTy}, false);
+ WasmSetjmpTestF = getEmscriptenFunction(FTy, "__wasm_setjmp_test", &M);
// wasm.catch() will be lowered down to wasm 'catch' instruction in
// instruction selection.
@@ -1063,7 +1055,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
if (V && V->use_empty())
V->eraseFromParent();
for (auto *V : {GetTempRet0F, SetTempRet0F, ResumeF, EHTypeIDF, EmLongjmpF,
- SaveSetjmpF, TestSetjmpF, WasmLongjmpF, CatchF})
+ WasmSetjmpF, WasmSetjmpTestF, WasmLongjmpF, CatchF})
if (V && V->use_empty())
V->eraseFromParent();
@@ -1268,42 +1260,20 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
LLVMContext &C = F.getContext();
IRBuilder<> IRB(C);
SmallVector<Instruction *, 64> ToErase;
- // Vector of %setjmpTable values
- SmallVector<Instruction *, 4> SetjmpTableInsts;
- // Vector of %setjmpTableSize values
- SmallVector<Instruction *, 4> SetjmpTableSizeInsts;
// Setjmp preparation
- // This instruction effectively means %setjmpTableSize = 4.
- // We create this as an instruction intentionally, and we don't want to fold
- // this instruction to a constant 4, because this value will be used in
- // SSAUpdater.AddAvailableValue(...) later.
BasicBlock *Entry = &F.getEntryBlock();
DebugLoc FirstDL = getOrCreateDebugLoc(&*Entry->begin(), F.getSubprogram());
SplitBlock(Entry, &*Entry->getFirstInsertionPt());
- BinaryOperator *SetjmpTableSize = BinaryOperator::Create(
- Instruction::Add, IRB.getInt32(4), IRB.getInt32(0), "setjmpTableSize",
- Entry->getTerminator()->getIterator());
- SetjmpTableSize->setDebugLoc(FirstDL);
- // setjmpTable = (int *) malloc(40);
- Type *IntPtrTy = getAddrIntType(&M);
- Constant *size = ConstantInt::get(IntPtrTy, 40);
- IRB.SetInsertPoint(SetjmpTableSize);
- auto *SetjmpTable = IRB.CreateMalloc(IntPtrTy, IRB.getInt32Ty(), size,
- nullptr, nullptr, "setjmpTable");
- SetjmpTable->setDebugLoc(FirstDL);
- // CallInst::CreateMalloc may return a bitcast instruction if the result types
- // mismatch. We need to set the debug loc for the original call too.
- auto *MallocCall = SetjmpTable->stripPointerCasts();
- if (auto *MallocCallI = dyn_cast<Instruction>(MallocCall)) {
- MallocCallI->setDebugLoc(FirstDL);
- }
- // setjmpTable[0] = 0;
- IRB.CreateStore(IRB.getInt32(0), SetjmpTable);
- SetjmpTableInsts.push_back(SetjmpTable);
- SetjmpTableSizeInsts.push_back(SetjmpTableSize);
+ IRB.SetInsertPoint(Entry->getTerminator()->getIterator());
+ // This alloca'ed pointer is used by the runtime to identify function
+ // invocations. It's just for pointer comparisons. It will never be
+ // dereferenced.
+ Instruction *FunctionInvocationId =
+ IRB.CreateAlloca(IRB.getInt32Ty(), nullptr, "functionInvocationId");
+ FunctionInvocationId->setDebugLoc(FirstDL);
// Setjmp transformation
SmallVector<PHINode *, 4> SetjmpRetPHIs;
@@ -1350,92 +1320,22 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
// 0, because index 0 means the longjmp is not ours to handle.
IRB.SetInsertPoint(CI);
Value *Args[] = {CI->getArgOperand(0), IRB.getInt32(SetjmpRetPHIs.size()),
- SetjmpTable, SetjmpTableSize};
- Instruction *NewSetjmpTable =
- IRB.CreateCall(SaveSetjmpF, Args, "setjmpTable");
- Instruction *NewSetjmpTableSize =
- IRB.CreateCall(GetTempRet0F, std::nullopt, "setjmpTableSize");
- SetjmpTableInsts.push_back(NewSetjmpTable);
- SetjmpTableSizeInsts.push_back(NewSetjmpTableSize);
+ FunctionInvocationId};
+ IRB.CreateCall(WasmSetjmpF, Args);
ToErase.push_back(CI);
}
// Handle longjmpable calls.
if (EnableEmSjLj)
- handleLongjmpableCallsForEmscriptenSjLj(
- F, SetjmpTableInsts, SetjmpTableSizeInsts, SetjmpRetPHIs);
+ handleLongjmpableCallsForEmscriptenSjLj(F, FunctionInvocationId,
+ SetjmpRetPHIs);
else // EnableWasmSjLj
- handleLongjmpableCallsForWasmSjLj(F, SetjmpTableInsts, SetjmpTableSizeInsts,
- SetjmpRetPHIs);
+ handleLongjmpableCallsForWasmSjLj(F, FunctionInvocationId, SetjmpRetPHIs);
// Erase everything we no longer need in this function
for (Instruction *I : ToErase)
I->eraseFromParent();
- // Free setjmpTable buffer before each return instruction + function-exiting
- // call
- SmallVector<Instruction *, 16> ExitingInsts;
- for (BasicBlock &BB : F) {
- Instruction *TI = BB.getTerminator();
- if (isa<ReturnInst>(TI))
- ExitingInsts.push_back(TI);
- // Any 'call' instruction with 'noreturn' attribute exits the function at
- // this point. If this throws but unwinds to another EH pad within this
- // function instead of exiting, this would have been an 'invoke', which
- // happens if we use Wasm EH or Wasm SjLJ.
- for (auto &I : BB) {
- if (auto *CI = dyn_cast<CallInst>(&I)) {
- bool IsNoReturn = CI->hasFnAttr(Attribute::NoReturn);
- if (Function *CalleeF = CI->getCalledFunction())
- IsNoReturn |= CalleeF->hasFnAttribute(Attribute::NoReturn);
- if (IsNoReturn)
- ExitingInsts.push_back(&I);
- }
- }
- }
- for (auto *I : ExitingInsts) {
- DebugLoc DL = getOrCreateDebugLoc(I, F.getSubprogram());
- // If this existing instruction is a call within a catchpad, we should add
- // it as "funclet" to the operand bundle of 'free' call
- SmallVector<OperandBundleDef, 1> Bundles;
- if (auto *CB = dyn_cast<CallBase>(I))
- if (auto Bundle = CB->getOperandBundle(LLVMContext::OB_funclet))
- Bundles.push_back(OperandBundleDef(*Bundle));
- IRB.SetInsertPoint(I);
- auto *Free = IRB.CreateFree(SetjmpTable, Bundles);
- Free->setDebugLoc(DL);
- }
-
- // Every call to saveSetjmp can change setjmpTable and setjmpTableSize
- // (when buffer reallocation occurs)
- // entry:
- // setjmpTableSize = 4;
- // setjmpTable = (int *) malloc(40);
- // setjmpTable[0] = 0;
- // ...
- // somebb:
- // setjmpTable = saveSetjmp(env, label, setjmpTable, setjmpTableSize);
- // setjmpTableSize = getTempRet0();
- // So we need to make sure the SSA for these variables is valid so that every
- // saveSetjmp and testSetjmp calls have the correct arguments.
- SSAUpdater SetjmpTableSSA;
- SSAUpdater SetjmpTableSizeSSA;
- SetjmpTableSSA.Initialize(PointerType::get(C, 0), "setjmpTable");
- SetjmpTableSizeSSA.Initialize(Type::getInt32Ty(C), "setjmpTableSize");
- for (Instruction *I : SetjmpTableInsts)
- SetjmpTableSSA.AddAvailableValue(I->getParent(), I);
- for (Instruction *I : SetjmpTableSizeInsts)
- SetjmpTableSizeSSA.AddAvailableValue(I->getParent(), I);
-
- for (auto &U : make_early_inc_range(SetjmpTable->uses()))
- if (auto *I = dyn_cast<Instruction>(U.getUser()))
- if (I->getParent() != Entry)
- SetjmpTableSSA.RewriteUse(U);
- for (auto &U : make_early_inc_range(SetjmpTableSize->uses()))
- if (auto *I = dyn_cast<Instruction>(U.getUser()))
- if (I->getParent() != Entry)
- SetjmpTableSizeSSA.RewriteUse(U);
-
// Finally, our modifications to the cfg can break dominance of SSA variables.
// For example, in this code,
// if (x()) { .. setjmp() .. }
@@ -1454,21 +1354,13 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
// setjmp. Refer to 4) of "Emscripten setjmp/longjmp handling" section in the
// comments at top of the file for details.
void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForEmscriptenSjLj(
- Function &F, InstVector &SetjmpTableInsts, InstVector &SetjmpTableSizeInsts,
+ Function &F, Instruction *FunctionInvocationId,
SmallVectorImpl<PHINode *> &SetjmpRetPHIs) {
Module &M = *F.getParent();
LLVMContext &C = F.getContext();
IRBuilder<> IRB(C);
SmallVector<Instruction *, 64> ToErase;
- // We need to pass setjmpTable and setjmpTableSize to testSetjmp function.
- // These values are defined in the beginning of the function and also in each
- // setjmp callsite, but we don't know which values we should use at this
- // point. So here we arbitraily use the ones defined in the beginning of the
- // function, and SSAUpdater will later update them to the correct values.
- Instruction *SetjmpTable = *SetjmpTableInsts.begin();
- Instruction *SetjmpTableSize = *SetjmpTableSizeInsts.begin();
-
// call.em.longjmp BB that will be shared within the function.
BasicBlock *CallEmLongjmpBB = nullptr;
// PHI node for the loaded value of __THREW__ global variable in
@@ -1601,7 +1493,7 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForEmscriptenSjLj(
IRB.SetInsertPoint(NormalBB);
IRB.CreateBr(Tail);
- BB = NormalBB; // New insertion point to insert testSetjmp()
+ BB = NormalBB; // New insertion point to insert __wasm_setjmp_test()
}
}
@@ -1610,16 +1502,15 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForEmscriptenSjLj(
// right setjmp-tail if so
ToErase.push_back(BB->getTerminator());
- // Generate a function call to testSetjmp function and preamble/postamble
- // code to figure out (1) whether longjmp occurred (2) if longjmp
- // occurred, which setjmp it corresponds to
+ // Generate a function call to __wasm_setjmp_test function and
+ // preamble/postamble code to figure out (1) whether longjmp
+ // occurred (2) if longjmp occurred, which setjmp it corresponds to
Value *Label = nullptr;
Value *LongjmpResult = nullptr;
BasicBlock *EndBB = nullptr;
- wrapTestSetjmp(BB, CI->getDebugLoc(), Threw, SetjmpTable, SetjmpTableSize,
- Label, LongjmpResult, CallEmLongjmpBB,
- CallEmLongjmpBBThrewPHI, CallEmLongjmpBBThrewValuePHI,
- EndBB);
+ wrapTestSetjmp(BB, CI->getDebugLoc(), Threw, FunctionInvocationId, Label,
+ LongjmpResult, CallEmLongjmpBB, CallEmLongjmpBBThrewPHI,
+ CallEmLongjmpBBThrewValuePHI, EndBB);
assert(Label && LongjmpResult && EndBB);
// Create switch instruction
@@ -1658,7 +1549,7 @@ static BasicBlock *getCleanupRetUnwindDest(const CleanupPadInst *CPI) {
// BBs. Refer to 4) of "Wasm setjmp/longjmp handling" section in the comments at
// top of the file for details.
void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
- Function &F, InstVector &SetjmpTableInsts, InstVector &SetjmpTableSizeInsts,
+ Function &F, Instruction *FunctionInvocationId,
SmallVectorImpl<PHINode *> &SetjmpRetPHIs) {
Module &M = *F.getParent();
LLVMContext &C = F.getContext();
@@ -1682,18 +1573,13 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
DebugLoc FirstDL = getOrCreateDebugLoc(&*Entry->begin(), F.getSubprogram());
IRB.SetCurrentDebugLocation(FirstDL);
- // Arbitrarily use the ones defined in the beginning of the function.
- // SSAUpdater will later update them to the correct values.
- Instruction *SetjmpTable = *SetjmpTableInsts.begin();
- Instruction *SetjmpTableSize = *SetjmpTableSizeInsts.begin();
-
// Add setjmp.dispatch BB right after the entry block. Because we have
- // initialized setjmpTable/setjmpTableSize in the entry block and split the
+ // initialized functionInvocationId in the entry block and split the
// rest into another BB, here 'OrigEntry' is the function's original entry
// block before the transformation.
//
// entry:
- // setjmpTable / setjmpTableSize initialization
+ // functionInvocationId initialization
// setjmp.dispatch:
// switch will be inserted here later
// entry.split: (OrigEntry)
@@ -1731,17 +1617,15 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
// int val = __wasm_longjmp_args.val;
Instruction *Val = IRB.CreateLoad(IRB.getInt32Ty(), ValField, "val");
- // %label = testSetjmp(mem[%env], setjmpTable, setjmpTableSize);
+ // %label = __wasm_setjmp_test(%env, functionInvocatinoId);
// if (%label == 0)
// __wasm_longjmp(%env, %val)
// catchret to %setjmp.dispatch
BasicBlock *ThenBB = BasicBlock::Create(C, "if.then", &F);
BasicBlock *EndBB = BasicBlock::Create(C, "if.end", &F);
Value *EnvP = IRB.CreateBitCast(Env, getAddrPtrType(&M), "env.p");
- Value *SetjmpID = IRB.CreateLoad(getAddrIntType(&M), EnvP, "setjmp.id");
- Value *Label =
- IRB.CreateCall(TestSetjmpF, {SetjmpID, SetjmpTable, SetjmpTableSize},
- OperandBundleDef("funclet", CatchPad), "label");
+ Value *Label = IRB.CreateCall(WasmSetjmpTestF, {EnvP, FunctionInvocationId},
+ OperandBundleDef("funclet", CatchPad), "label");
Value *Cmp = IRB.CreateICmpEQ(Label, IRB.getInt32(0));
IRB.CreateCondBr(Cmp, ThenBB, EndBB);
diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 641158cb351f..78669784dd03 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -167,7 +167,7 @@ struct X86Operand final : public MCParsedAsmOperand {
Tok.Length = Value.size();
}
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == Register && "Invalid access!");
return Reg.RegNo;
}
diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index 8e0f61a85566..9be3812300af 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -119,8 +119,6 @@ private:
MachineFunction &MF) const;
bool selectSelect(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
- bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
// emit insert subreg instruction and insert it before MachineInstr &I
bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
@@ -434,8 +432,6 @@ bool X86InstructionSelector::select(MachineInstr &I) {
return selectMulDivRem(I, MRI, MF);
case TargetOpcode::G_SELECT:
return selectSelect(I, MRI, MF);
- case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
- return selectIntrinsicWSideEffects(I, MRI, MF);
}
return false;
@@ -1834,21 +1830,6 @@ bool X86InstructionSelector::selectSelect(MachineInstr &I,
return true;
}
-bool X86InstructionSelector::selectIntrinsicWSideEffects(
- MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
-
- assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
- "unexpected instruction");
-
- if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
- return false;
-
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
-
- I.eraseFromParent();
- return true;
-}
-
InstructionSelector *
llvm::createX86InstructionSelector(const X86TargetMachine &TM,
X86Subtarget &Subtarget,
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index 06389842ebb1..07041cc5b049 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -259,8 +259,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
getActionDefinitionsBuilder(G_ICMP)
.legalForCartesianProduct({s8}, Is64Bit ? IntTypes64 : IntTypes32)
.clampScalar(0, s8, s8)
- .clampScalar(1, s8, sMaxScalar)
- .scalarSameSizeAs(2, 1);
+ .clampScalar(1, s8, sMaxScalar);
// bswap
getActionDefinitionsBuilder(G_BSWAP)
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 53c0486c8697..6289b3a1df1f 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -650,6 +650,16 @@ void X86DomainReassignment::initConverters() {
createReplacer(X86::AND16rr, X86::KANDWrr);
createReplacer(X86::XOR16rr, X86::KXORWrr);
+ bool HasNDD = STI->hasNDD();
+ if (HasNDD) {
+ createReplacer(X86::SHR16ri_ND, X86::KSHIFTRWri);
+ createReplacer(X86::SHL16ri_ND, X86::KSHIFTLWri);
+ createReplacer(X86::NOT16r_ND, X86::KNOTWrr);
+ createReplacer(X86::OR16rr_ND, X86::KORWrr);
+ createReplacer(X86::AND16rr_ND, X86::KANDWrr);
+ createReplacer(X86::XOR16rr_ND, X86::KXORWrr);
+ }
+
if (STI->hasBWI()) {
createReplacer(X86::MOV32rm, GET_EGPR_IF_ENABLED(X86::KMOVDkm));
createReplacer(X86::MOV64rm, GET_EGPR_IF_ENABLED(X86::KMOVQkm));
@@ -684,6 +694,23 @@ void X86DomainReassignment::initConverters() {
createReplacer(X86::XOR32rr, X86::KXORDrr);
createReplacer(X86::XOR64rr, X86::KXORQrr);
+ if (HasNDD) {
+ createReplacer(X86::SHR32ri_ND, X86::KSHIFTRDri);
+ createReplacer(X86::SHL32ri_ND, X86::KSHIFTLDri);
+ createReplacer(X86::ADD32rr_ND, X86::KADDDrr);
+ createReplacer(X86::NOT32r_ND, X86::KNOTDrr);
+ createReplacer(X86::OR32rr_ND, X86::KORDrr);
+ createReplacer(X86::AND32rr_ND, X86::KANDDrr);
+ createReplacer(X86::XOR32rr_ND, X86::KXORDrr);
+ createReplacer(X86::SHR64ri_ND, X86::KSHIFTRQri);
+ createReplacer(X86::SHL64ri_ND, X86::KSHIFTLQri);
+ createReplacer(X86::ADD64rr_ND, X86::KADDQrr);
+ createReplacer(X86::NOT64r_ND, X86::KNOTQrr);
+ createReplacer(X86::OR64rr_ND, X86::KORQrr);
+ createReplacer(X86::AND64rr_ND, X86::KANDQrr);
+ createReplacer(X86::XOR64rr_ND, X86::KXORQrr);
+ }
+
// TODO: KTEST is not a replacement for TEST due to flag differences. Need
// to prove only Z flag is used.
// createReplacer(X86::TEST32rr, X86::KTESTDrr);
@@ -713,6 +740,17 @@ void X86DomainReassignment::initConverters() {
// createReplacer(X86::TEST16rr, X86::KTESTWrr);
createReplacer(X86::XOR8rr, X86::KXORBrr);
+
+ if (HasNDD) {
+ createReplacer(X86::ADD8rr_ND, X86::KADDBrr);
+ createReplacer(X86::ADD16rr_ND, X86::KADDWrr);
+ createReplacer(X86::AND8rr_ND, X86::KANDBrr);
+ createReplacer(X86::NOT8r_ND, X86::KNOTBrr);
+ createReplacer(X86::OR8rr_ND, X86::KORBrr);
+ createReplacer(X86::SHR8ri_ND, X86::KSHIFTRBri);
+ createReplacer(X86::SHL8ri_ND, X86::KSHIFTLBri);
+ createReplacer(X86::XOR8rr_ND, X86::KXORBrr);
+ }
}
#undef GET_EGPR_IF_ENABLED
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 35f756ea5e1d..312e4487a8f1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2631,6 +2631,11 @@ bool X86::mayFoldIntoZeroExtend(SDValue Op) {
return false;
}
+static bool isLogicOp(unsigned Opcode) {
+ // TODO: Add support for X86ISD::FAND/FOR/FXOR/FANDN with test coverage.
+ return ISD::isBitwiseLogicOp(Opcode) || X86ISD::ANDNP == Opcode;
+}
+
static bool isTargetShuffle(unsigned Opcode) {
switch(Opcode) {
default: return false;
@@ -21512,7 +21517,9 @@ SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
}
if (VT.getScalarType() == MVT::bf16) {
- if (SVT.getScalarType() == MVT::f32 && isTypeLegal(VT))
+ if (SVT.getScalarType() == MVT::f32 &&
+ ((Subtarget.hasBF16() && Subtarget.hasVLX()) ||
+ Subtarget.hasAVXNECONVERT()))
return Op;
return SDValue();
}
@@ -21619,7 +21626,8 @@ SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
SDLoc DL(Op);
MVT SVT = Op.getOperand(0).getSimpleValueType();
- if (SVT == MVT::f32 && (Subtarget.hasBF16() || Subtarget.hasAVXNECONVERT())) {
+ if (SVT == MVT::f32 && ((Subtarget.hasBF16() && Subtarget.hasVLX()) ||
+ Subtarget.hasAVXNECONVERT())) {
SDValue Res;
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, Op.getOperand(0));
Res = DAG.getNode(X86ISD::CVTNEPS2BF16, DL, MVT::v8bf16, Res);
@@ -39972,8 +39980,7 @@ static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG,
auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
// Ensure we only shuffle whole vector src elements, unless its a logical
// binops where we can more aggressively move shuffles from dst to src.
- return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
- BinOp == X86ISD::ANDNP ||
+ return isLogicOp(BinOp) ||
(Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
};
@@ -43992,6 +43999,50 @@ static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
Extract->getOperand(1));
}
+// If this extract is from a loaded vector value and will be used as an
+// integer, that requires a potentially expensive XMM -> GPR transfer.
+// Additionally, if we can convert to a scalar integer load, that will likely
+// be folded into a subsequent integer op.
+// Note: SrcVec might not have a VecVT type, but it must be the same size.
+// Note: Unlike the related fold for this in DAGCombiner, this is not limited
+// to a single-use of the loaded vector. For the reasons above, we
+// expect this to be profitable even if it creates an extra load.
+static SDValue
+combineExtractFromVectorLoad(SDNode *N, EVT VecVT, SDValue SrcVec, uint64_t Idx,
+ const SDLoc &dl, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ "Only EXTRACT_VECTOR_ELT supported so far");
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT VT = N->getValueType(0);
+
+ bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
+ return Use->getOpcode() == ISD::STORE ||
+ Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
+ Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
+ });
+
+ auto *LoadVec = dyn_cast<LoadSDNode>(SrcVec);
+ if (LoadVec && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
+ VecVT.getVectorElementType() == VT &&
+ VecVT.getSizeInBits() == SrcVec.getValueSizeInBits() &&
+ DCI.isAfterLegalizeDAG() && !LikelyUsedAsVector && LoadVec->isSimple()) {
+ SDValue NewPtr = TLI.getVectorElementPointer(
+ DAG, LoadVec->getBasePtr(), VecVT, DAG.getVectorIdxConstant(Idx, dl));
+ unsigned PtrOff = VT.getSizeInBits() * Idx / 8;
+ MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
+ Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
+ SDValue Load =
+ DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
+ LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
+ DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
+ return Load;
+ }
+
+ return SDValue();
+}
+
// Attempt to peek through a target shuffle and extract the scalar from the
// source.
static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
@@ -44188,6 +44239,11 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
return DAG.getZExtOrTrunc(V, dl, VT);
+ if (N->getOpcode() == ISD::EXTRACT_VECTOR_ELT && ExtractVT == SrcVT)
+ if (SDValue V = combineExtractFromVectorLoad(
+ N, SrcVT, peekThroughBitcasts(SrcOp), ExtractIdx, dl, DAG, DCI))
+ return V;
+
return SDValue();
}
@@ -44597,6 +44653,12 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
return V;
+ if (CIdx)
+ if (SDValue V = combineExtractFromVectorLoad(
+ N, InputVector.getValueType(), InputVector, CIdx->getZExtValue(),
+ dl, DAG, DCI))
+ return V;
+
// Attempt to extract a i1 element by using MOVMSK to extract the signbits
// and then testing the relevant element.
//
@@ -44642,34 +44704,6 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
}
}
- // If this extract is from a loaded vector value and will be used as an
- // integer, that requires a potentially expensive XMM -> GPR transfer.
- // Additionally, if we can convert to a scalar integer load, that will likely
- // be folded into a subsequent integer op.
- // Note: Unlike the related fold for this in DAGCombiner, this is not limited
- // to a single-use of the loaded vector. For the reasons above, we
- // expect this to be profitable even if it creates an extra load.
- bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
- return Use->getOpcode() == ISD::STORE ||
- Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
- Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
- });
- auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
- if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
- SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
- !LikelyUsedAsVector && LoadVec->isSimple()) {
- SDValue NewPtr =
- TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
- unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
- MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
- Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
- SDValue Load =
- DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
- LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
- DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
- return Load;
- }
-
return SDValue();
}
@@ -47403,10 +47437,13 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::VSRAV, DL, N->getVTList(), N0, ShrAmtVal);
}
- // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
- // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
- // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
- // depending on sign of (SarConst - [56,48,32,24,16])
+ // fold (SRA (SHL X, ShlConst), SraConst)
+ // into (SHL (sext_in_reg X), ShlConst - SraConst)
+ // or (sext_in_reg X)
+ // or (SRA (sext_in_reg X), SraConst - ShlConst)
+ // depending on relation between SraConst and ShlConst.
+ // We only do this if (Size - ShlConst) is equal to 8, 16 or 32. That allows
+ // us to do the sext_in_reg from corresponding bit.
// sexts in X86 are MOVs. The MOVs have the same code size
// as above SHIFTs (only SHIFT on 1 has lower code size).
@@ -47422,29 +47459,29 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
APInt ShlConst = N01->getAsAPIntVal();
- APInt SarConst = N1->getAsAPIntVal();
+ APInt SraConst = N1->getAsAPIntVal();
EVT CVT = N1.getValueType();
- if (SarConst.isNegative())
+ if (CVT != N01.getValueType())
+ return SDValue();
+ if (SraConst.isNegative())
return SDValue();
for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
unsigned ShiftSize = SVT.getSizeInBits();
- // skipping types without corresponding sext/zext and
- // ShlConst that is not one of [56,48,32,24,16]
+ // Only deal with (Size - ShlConst) being equal to 8, 16 or 32.
if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
continue;
SDLoc DL(N);
SDValue NN =
DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
- SarConst = SarConst - (Size - ShiftSize);
- if (SarConst == 0)
+ if (SraConst.eq(ShlConst))
return NN;
- if (SarConst.isNegative())
+ if (SraConst.ult(ShlConst))
return DAG.getNode(ISD::SHL, DL, VT, NN,
- DAG.getConstant(-SarConst, DL, CVT));
+ DAG.getConstant(ShlConst - SraConst, DL, CVT));
return DAG.getNode(ISD::SRA, DL, VT, NN,
- DAG.getConstant(SarConst, DL, CVT));
+ DAG.getConstant(SraConst - ShlConst, DL, CVT));
}
return SDValue();
}
@@ -48267,7 +48304,7 @@ static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
// We do not split for SSE at all, but we need to split vectors for AVX1 and
// AVX2.
- if (!Subtarget.useAVX512Regs() && VT.is512BitVector() &&
+ if (!Subtarget.useAVX512Regs() && VT.is512BitVector() &&
TLI.isTypeLegal(VT.getHalfNumVectorElementsVT(*DAG.getContext()))) {
SDValue LoX, HiX;
std::tie(LoX, HiX) = splitVector(X, DAG, DL);
@@ -55526,7 +55563,19 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
}
break;
// Due to VADD, VSUB, VMUL can executed on more ports than VINSERT and
- // their latency are short, so here we don't replace them.
+ // their latency are short, so here we don't replace them unless we won't
+ // introduce extra VINSERT.
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ if (!IsSplat && (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1)) &&
+ (VT.is256BitVector() ||
+ (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ ConcatSubOperand(VT, Ops, 0),
+ ConcatSubOperand(VT, Ops, 1));
+ }
+ break;
case ISD::FDIV:
if (!IsSplat && (VT.is256BitVector() ||
(VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index fef0a5a90cd6..c45ec8981ab1 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -334,6 +334,44 @@ let Predicates = [In64BitMode] in {
def IMUL32rmi_EVEX : IMulOpMI_RF<Xi32, WriteIMul32Imm>, PL;
def IMUL64rmi32_EVEX : IMulOpMI_RF<Xi64, WriteIMul64Imm>, PL;
}
+
+// IMULZU instructions
+class IMulZUOpRI8_R<X86TypeInfo t, X86FoldableSchedWrite sched>
+ : BinOpRI8<0x6B, "imulzu", binop_ndd_args, t, MRMSrcReg,
+ (outs t.RegClass:$dst)> {
+ let SchedRW = [sched];
+}
+class IMulZUOpRI_R<X86TypeInfo t, X86FoldableSchedWrite sched>
+ : BinOpRI<0x69, "imulzu", binop_ndd_args, t, MRMSrcReg,
+ (outs t.RegClass:$dst), []> {
+ let SchedRW = [sched];
+}
+class IMulZUOpMI8_R<X86TypeInfo t, X86FoldableSchedWrite sched>
+ : BinOpMI8<"imulzu", binop_ndd_args, t, MRMSrcMem, (outs t.RegClass:$dst)> {
+ let Opcode = 0x6B;
+ let SchedRW = [sched.Folded];
+}
+class IMulZUOpMI_R<X86TypeInfo t, X86FoldableSchedWrite sched>
+ : BinOpMI<0x69, "imulzu", binop_ndd_args, t, MRMSrcMem,
+ (outs t.RegClass:$dst), []> {
+ let SchedRW = [sched.Folded];
+}
+
+let Defs = [EFLAGS], Predicates = [HasEGPR, In64BitMode] in {
+ def IMULZU16rri8 : IMulZUOpRI8_R<Xi16, WriteIMul16Imm>, ZU, PD;
+ def IMULZU16rmi8 : IMulZUOpMI8_R<Xi16, WriteIMul16Imm>, ZU, PD;
+ def IMULZU16rri : IMulZUOpRI_R<Xi16, WriteIMul16Imm>, ZU, PD;
+ def IMULZU16rmi : IMulZUOpMI_R<Xi16, WriteIMul16Imm>, ZU, PD;
+ def IMULZU32rri8 : IMulZUOpRI8_R<Xi32, WriteIMul32Imm>, ZU;
+ def IMULZU32rmi8 : IMulZUOpMI8_R<Xi32, WriteIMul32Imm>, ZU;
+ def IMULZU32rri : IMulZUOpRI_R<Xi32, WriteIMul32Imm>, ZU;
+ def IMULZU32rmi : IMulZUOpMI_R<Xi32, WriteIMul32Imm>, ZU;
+ def IMULZU64rri8 : IMulZUOpRI8_R<Xi64, WriteIMul64Imm>, ZU;
+ def IMULZU64rmi8 : IMulZUOpMI8_R<Xi64, WriteIMul64Imm>, ZU;
+ def IMULZU64rri32 : IMulZUOpRI_R<Xi64, WriteIMul64Imm>, ZU;
+ def IMULZU64rmi32 : IMulZUOpMI_R<Xi64, WriteIMul64Imm>, ZU;
+}
+
//===----------------------------------------------------------------------===//
// INC and DEC Instructions
//
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index eb42a4b2119d..f24334312c11 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -5595,9 +5595,13 @@ static unsigned convertALUrr2ALUri(unsigned Opc) {
case X86::FROM: \
return X86::TO;
FROM_TO(TEST64rr, TEST64ri32)
+ FROM_TO(CTEST64rr, CTEST64ri32)
FROM_TO(CMP64rr, CMP64ri32)
+ FROM_TO(CCMP64rr, CCMP64ri32)
FROM_TO(TEST32rr, TEST32ri)
+ FROM_TO(CTEST32rr, CTEST32ri)
FROM_TO(CMP32rr, CMP32ri)
+ FROM_TO(CCMP32rr, CCMP32ri)
#undef FROM_TO
}
}
@@ -5697,7 +5701,8 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
UseMI.findRegisterUseOperandIdx(Reg) != 2)
return false;
// For CMP instructions the immediate can only be at index 1.
- if ((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) &&
+ if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
+ (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
UseMI.findRegisterUseOperandIdx(Reg) != 1)
return false;
@@ -5742,7 +5747,7 @@ bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
unsigned ImmOpNum = 2;
if (!UseMI.getOperand(0).isDef()) {
- Op1 = 0; // TEST, CMP
+ Op1 = 0; // TEST, CMP, CTEST, CCMP
ImmOpNum = 1;
}
if (Opc == TargetOpcode::COPY)
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 04d9d104ebc4..8387b76a40cd 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -119,6 +119,8 @@ class NDD<bit ndd> {
class NF: T_MAP4, EVEX, EVEX_NF;
// PL - Helper for promoted legacy instructions
class PL: T_MAP4, EVEX, ExplicitEVEXPrefix;
+// ZU - Helper for Zero Upper instructions
+class ZU: T_MAP4, EVEX, EVEX_B;
//===----------------------------------------------------------------------===//
// X86 Type infomation definitions
diff --git a/llvm/lib/Target/X86/X86SchedBroadwell.td b/llvm/lib/Target/X86/X86SchedBroadwell.td
index 61a8832000e2..0027de851df7 100644
--- a/llvm/lib/Target/X86/X86SchedBroadwell.td
+++ b/llvm/lib/Target/X86/X86SchedBroadwell.td
@@ -471,9 +471,11 @@ defm : X86WriteResPairUnsupported<WritePSADBWZ>;
defm : BWWriteResPair<WritePHMINPOS, [BWPort0], 5>; // Vector PHMINPOS.
// Vector integer shifts.
-defm : BWWriteResPair<WriteVecShift, [BWPort0], 1, [1], 1, 5>;
-defm : BWWriteResPair<WriteVecShiftX, [BWPort0,BWPort5], 2, [1,1], 2, 5>;
+defm : X86WriteRes<WriteVecShift, [BWPort0], 1, [1], 1>;
+defm : X86WriteRes<WriteVecShiftX, [BWPort0,BWPort5], 2, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftY, [BWPort0,BWPort5], 4, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftLd, [BWPort0,BWPort23], 6, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftXLd, [BWPort0,BWPort23], 7, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftYLd, [BWPort0,BWPort23], 7, [1,1], 2>;
defm : X86WriteResPairUnsupported<WriteVecShiftZ>;
diff --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td
index 8795ca95c559..a11b470b1f51 100644
--- a/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -469,10 +469,12 @@ defm : HWWriteResPair<WritePSADBWZ, [HWPort0], 5, [1], 1, 7>; // Unsupported = 1
defm : HWWriteResPair<WritePHMINPOS, [HWPort0], 5, [1], 1, 6>;
// Vector integer shifts.
-defm : HWWriteResPair<WriteVecShift, [HWPort0], 1, [1], 1, 5>;
-defm : HWWriteResPair<WriteVecShiftX, [HWPort0,HWPort5], 2, [1,1], 2, 6>;
+defm : X86WriteRes<WriteVecShift, [HWPort0], 1, [1], 1>;
+defm : X86WriteRes<WriteVecShiftX, [HWPort0,HWPort5], 2, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftY, [HWPort0,HWPort5], 4, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftZ, [HWPort0,HWPort5], 4, [1,1], 2>; // Unsupported = 1
+defm : X86WriteRes<WriteVecShiftLd, [HWPort0,HWPort23], 6, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftXLd, [HWPort0,HWPort23], 8, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftYLd, [HWPort0,HWPort23], 8, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftZLd, [HWPort0,HWPort23], 8, [1,1], 2>; // Unsupported = 1
diff --git a/llvm/lib/Target/X86/X86SchedIceLake.td b/llvm/lib/Target/X86/X86SchedIceLake.td
index 3981279abc36..2d296771b1c0 100644
--- a/llvm/lib/Target/X86/X86SchedIceLake.td
+++ b/llvm/lib/Target/X86/X86SchedIceLake.td
@@ -413,9 +413,9 @@ defm : ICXWriteResPair<WritePHMINPOS, [ICXPort0], 4, [1], 1, 6>; // Vector PHMIN
// Vector integer shifts.
defm : ICXWriteResPair<WriteVecShift, [ICXPort0], 1, [1], 1, 5>;
-defm : X86WriteRes<WriteVecShiftX, [ICXPort5,ICXPort01], 2, [1,1], 2>;
-defm : X86WriteRes<WriteVecShiftY, [ICXPort5,ICXPort01], 4, [1,1], 2>;
-defm : X86WriteRes<WriteVecShiftZ, [ICXPort5,ICXPort0], 4, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftX, [ICXPort01,ICXPort15], 2, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftY, [ICXPort01,ICXPort5], 4, [1,1], 2>;
+defm : X86WriteRes<WriteVecShiftZ, [ICXPort0,ICXPort5], 4, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftXLd, [ICXPort01,ICXPort23], 7, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftYLd, [ICXPort01,ICXPort23], 8, [1,1], 2>;
defm : X86WriteRes<WriteVecShiftZLd, [ICXPort0,ICXPort23], 8, [1,1], 2>;
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index d336ab9d309c..2ec29463d2fa 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -1480,6 +1480,14 @@ InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
Kind = improveShuffleKindFromMask(Kind, Mask, BaseTp, Index, SubTp);
+ // Recognize a basic concat_vector shuffle.
+ if (Kind == TTI::SK_PermuteTwoSrc &&
+ Mask.size() == (2 * BaseTp->getElementCount().getKnownMinValue()) &&
+ ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))
+ return getShuffleCost(TTI::SK_InsertSubvector,
+ VectorType::getDoubleElementsVectorType(BaseTp), Mask,
+ CostKind, Mask.size() / 2, BaseTp);
+
// Treat Transpose as 2-op shuffles - there's no difference in lowering.
if (Kind == TTI::SK_Transpose)
Kind = TTI::SK_PermuteTwoSrc;
@@ -5651,7 +5659,7 @@ InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
return TTI::TCC_Free;
break;
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
if ((Idx < 4) || (Imm.getBitWidth() <= 64 && Imm.isSignedIntN(64)))
return TTI::TCC_Free;
break;
diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp
index 3f808298527f..1fa00af2111e 100644
--- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp
+++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp
@@ -244,7 +244,7 @@ public:
/// getEndLoc - Gets location of the last token of this operand
SMLoc getEndLoc() const override { return EndLoc; }
- unsigned getReg() const override {
+ MCRegister getReg() const override {
assert(Kind == Register && "Invalid type access!");
return Reg.RegNum;
}
diff --git a/llvm/lib/TargetParser/AArch64TargetParser.cpp b/llvm/lib/TargetParser/AArch64TargetParser.cpp
index e36832f563ee..71099462d5ec 100644
--- a/llvm/lib/TargetParser/AArch64TargetParser.cpp
+++ b/llvm/lib/TargetParser/AArch64TargetParser.cpp
@@ -186,11 +186,6 @@ void AArch64::ExtensionSet::enable(ArchExtKind E) {
// Special cases for dependencies which vary depending on the base
// architecture version.
if (BaseArch) {
- // +sve implies +f32mm if the base architecture is v8.6A+ or v9.1A+
- // It isn't the case in general that sve implies both f64mm and f32mm
- if (E == AEK_SVE && BaseArch->is_superset(ARMV8_6A))
- enable(AEK_F32MM);
-
// +fp16 implies +fp16fml for v8.4A+, but not v9.0-A+
if (E == AEK_FP16 && BaseArch->is_superset(ARMV8_4A) &&
!BaseArch->is_superset(ARMV9A))
diff --git a/llvm/lib/TextAPI/BinaryReader/CMakeLists.txt b/llvm/lib/TextAPI/BinaryReader/CMakeLists.txt
index cbdf7b2c9696..c4535310d91c 100644
--- a/llvm/lib/TextAPI/BinaryReader/CMakeLists.txt
+++ b/llvm/lib/TextAPI/BinaryReader/CMakeLists.txt
@@ -2,6 +2,7 @@ add_llvm_component_library(LLVMTextAPIBinaryReader
DylibReader.cpp
LINK_COMPONENTS
+ DebugInfoDWARF
Support
Object
TextAPI
diff --git a/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp b/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
index 0694d8f28df6..f92a2d19a63f 100644
--- a/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
+++ b/llvm/lib/TextAPI/BinaryReader/DylibReader.cpp
@@ -12,7 +12,8 @@
#include "llvm/TextAPI/DylibReader.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringMap.h"
+#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/MachOUniversal.h"
#include "llvm/Support/Endian.h"
@@ -293,8 +294,11 @@ static Error readSymbols(MachOObjectFile *Obj, RecordsSlice &Slice,
RecordLinkage Linkage = RecordLinkage::Unknown;
SymbolFlags RecordFlags = SymbolFlags::None;
- if (Opt.Undefineds && (Flags & SymbolRef::SF_Undefined)) {
- Linkage = RecordLinkage::Undefined;
+ if (Flags & SymbolRef::SF_Undefined) {
+ if (Opt.Undefineds)
+ Linkage = RecordLinkage::Undefined;
+ else
+ continue;
if (Flags & SymbolRef::SF_Weak)
RecordFlags |= SymbolFlags::WeakReferenced;
} else if (Flags & SymbolRef::SF_Exported) {
@@ -429,3 +433,111 @@ DylibReader::get(MemoryBufferRef Buffer) {
return convertToInterfaceFile(*SlicesOrErr);
}
+
+static void DWARFErrorHandler(Error Err) { /**/ }
+
+static SymbolToSourceLocMap
+accumulateLocs(MachOObjectFile &Obj,
+ const std::unique_ptr<DWARFContext> &DiCtx) {
+ SymbolToSourceLocMap LocMap;
+ for (const auto &Symbol : Obj.symbols()) {
+ Expected<uint32_t> FlagsOrErr = Symbol.getFlags();
+ if (!FlagsOrErr) {
+ consumeError(FlagsOrErr.takeError());
+ continue;
+ }
+
+ if (!(*FlagsOrErr & SymbolRef::SF_Exported))
+ continue;
+
+ Expected<uint64_t> AddressOrErr = Symbol.getAddress();
+ if (!AddressOrErr) {
+ consumeError(AddressOrErr.takeError());
+ continue;
+ }
+ const uint64_t Address = *AddressOrErr;
+
+ auto TypeOrErr = Symbol.getType();
+ if (!TypeOrErr) {
+ consumeError(TypeOrErr.takeError());
+ continue;
+ }
+ const bool IsCode = (*TypeOrErr & SymbolRef::ST_Function);
+
+ auto *DWARFCU = IsCode ? DiCtx->getCompileUnitForCodeAddress(Address)
+ : DiCtx->getCompileUnitForDataAddress(Address);
+ if (!DWARFCU)
+ continue;
+
+ const DWARFDie &DIE = IsCode ? DWARFCU->getSubroutineForAddress(Address)
+ : DWARFCU->getVariableForAddress(Address);
+ const std::string File = DIE.getDeclFile(
+ llvm::DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath);
+ const uint64_t Line = DIE.getDeclLine();
+
+ auto NameOrErr = Symbol.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+ auto Name = *NameOrErr;
+ auto Sym = parseSymbol(Name);
+
+ if (!File.empty() && Line != 0)
+ LocMap.insert({Sym.Name.str(), RecordLoc(File, Line)});
+ }
+
+ return LocMap;
+}
+
+SymbolToSourceLocMap
+DylibReader::accumulateSourceLocFromDSYM(const StringRef DSYM,
+ const Target &T) {
+ // Find sidecar file.
+ auto DSYMsOrErr = MachOObjectFile::findDsymObjectMembers(DSYM);
+ if (!DSYMsOrErr) {
+ consumeError(DSYMsOrErr.takeError());
+ return SymbolToSourceLocMap();
+ }
+ if (DSYMsOrErr->empty())
+ return SymbolToSourceLocMap();
+
+ const StringRef Path = DSYMsOrErr->front();
+ auto BufOrErr = MemoryBuffer::getFile(Path);
+ if (auto Err = BufOrErr.getError())
+ return SymbolToSourceLocMap();
+
+ auto BinOrErr = createBinary(*BufOrErr.get());
+ if (!BinOrErr) {
+ consumeError(BinOrErr.takeError());
+ return SymbolToSourceLocMap();
+ }
+ // Handle single arch.
+ if (auto *Single = dyn_cast<MachOObjectFile>(BinOrErr->get())) {
+ auto DiCtx = DWARFContext::create(
+ *Single, DWARFContext::ProcessDebugRelocations::Process, nullptr, "",
+ DWARFErrorHandler, DWARFErrorHandler);
+
+ return accumulateLocs(*Single, DiCtx);
+ }
+ // Handle universal companion file.
+ if (auto *Fat = dyn_cast<MachOUniversalBinary>(BinOrErr->get())) {
+ auto ObjForArch = Fat->getObjectForArch(getArchitectureName(T.Arch));
+ if (!ObjForArch) {
+ consumeError(ObjForArch.takeError());
+ return SymbolToSourceLocMap();
+ }
+ auto MachOOrErr = ObjForArch->getAsObjectFile();
+ if (!MachOOrErr) {
+ consumeError(MachOOrErr.takeError());
+ return SymbolToSourceLocMap();
+ }
+ auto &Obj = **MachOOrErr;
+ auto DiCtx = DWARFContext::create(
+ Obj, DWARFContext::ProcessDebugRelocations::Process, nullptr, "",
+ DWARFErrorHandler, DWARFErrorHandler);
+
+ return accumulateLocs(Obj, DiCtx);
+ }
+ return SymbolToSourceLocMap();
+}
diff --git a/llvm/lib/TextAPI/Utils.cpp b/llvm/lib/TextAPI/Utils.cpp
index b5c3999a86ea..c54164504495 100644
--- a/llvm/lib/TextAPI/Utils.cpp
+++ b/llvm/lib/TextAPI/Utils.cpp
@@ -152,3 +152,49 @@ bool llvm::MachO::isPrivateLibrary(StringRef Path, bool IsSymLink) {
}
return false;
}
+
+static StringLiteral RegexMetachars = "()^$|+.[]\\{}";
+
+llvm::Expected<Regex> llvm::MachO::createRegexFromGlob(StringRef Glob) {
+ SmallString<128> RegexString("^");
+ unsigned NumWildcards = 0;
+ for (unsigned i = 0; i < Glob.size(); ++i) {
+ char C = Glob[i];
+ switch (C) {
+ case '?':
+ RegexString += '.';
+ break;
+ case '*': {
+ const char *PrevChar = i > 0 ? Glob.data() + i - 1 : nullptr;
+ NumWildcards = 1;
+ ++i;
+ while (i < Glob.size() && Glob[i] == '*') {
+ ++NumWildcards;
+ ++i;
+ }
+ const char *NextChar = i < Glob.size() ? Glob.data() + i : nullptr;
+
+ if ((NumWildcards > 1) && (PrevChar == nullptr || *PrevChar == '/') &&
+ (NextChar == nullptr || *NextChar == '/')) {
+ RegexString += "(([^/]*(/|$))*)";
+ } else
+ RegexString += "([^/]*)";
+ break;
+ }
+ default:
+ if (RegexMetachars.find(C) != StringRef::npos)
+ RegexString.push_back('\\');
+ RegexString.push_back(C);
+ }
+ }
+ RegexString.push_back('$');
+ if (NumWildcards == 0)
+ return make_error<StringError>("not a glob", inconvertibleErrorCode());
+
+ llvm::Regex Rule = Regex(RegexString);
+ std::string Error;
+ if (!Rule.isValid(Error))
+ return make_error<StringError>(Error, inconvertibleErrorCode());
+
+ return std::move(Rule);
+}
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index f98833bd1198..ff680e998e71 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -5190,6 +5190,12 @@ static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
} else if (auto *LI = dyn_cast<LoadInst>(I)) {
if (LI->getPointerOperand() == UseV)
MA = LI->getAlign();
+ } else if (auto *AI = dyn_cast<AtomicRMWInst>(I)) {
+ if (AI->getPointerOperand() == UseV)
+ MA = AI->getAlign();
+ } else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
+ if (AI->getPointerOperand() == UseV)
+ MA = AI->getAlign();
}
if (!MA || *MA <= QueryingAA.getKnownAlign())
diff --git a/llvm/lib/Transforms/IPO/CMakeLists.txt b/llvm/lib/Transforms/IPO/CMakeLists.txt
index 034f1587ae8d..5fbdbc3a014f 100644
--- a/llvm/lib/Transforms/IPO/CMakeLists.txt
+++ b/llvm/lib/Transforms/IPO/CMakeLists.txt
@@ -35,6 +35,7 @@ add_llvm_component_library(LLVMipo
PartialInlining.cpp
SampleContextTracker.cpp
SampleProfile.cpp
+ SampleProfileMatcher.cpp
SampleProfileProbe.cpp
SCCP.cpp
StripDeadPrototypes.cpp
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index ba5e3b637db7..4e4a49997766 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -310,8 +310,12 @@ public:
// True if this node was effectively removed from the graph, in which case
// its context id set, caller edges, and callee edges should all be empty.
bool isRemoved() const {
- assert(ContextIds.empty() ==
- (CalleeEdges.empty() && CallerEdges.empty()));
+ // Note that we can have non-empty context ids with empty caller and
+ // callee edges if the graph ends up with a single node.
+ if (ContextIds.empty())
+ assert(CalleeEdges.empty() && CallerEdges.empty() &&
+ "Context ids empty but at least one of callee and caller edges "
+ "were not!");
return ContextIds.empty();
}
@@ -353,9 +357,12 @@ public:
}
};
- /// Helper to remove callee edges that have allocation type None (due to not
+ /// Helpers to remove callee edges that have allocation type None (due to not
/// carrying any context ids) after transformations.
void removeNoneTypeCalleeEdges(ContextNode *Node);
+ void
+ recursivelyRemoveNoneTypeCalleeEdges(ContextNode *Node,
+ DenseSet<const ContextNode *> &Visited);
protected:
/// Get a list of nodes corresponding to the stack ids in the given callsite
@@ -2432,10 +2439,42 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
}
template <typename DerivedCCG, typename FuncTy, typename CallTy>
+void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
+ recursivelyRemoveNoneTypeCalleeEdges(
+ ContextNode *Node, DenseSet<const ContextNode *> &Visited) {
+ auto Inserted = Visited.insert(Node);
+ if (!Inserted.second)
+ return;
+
+ removeNoneTypeCalleeEdges(Node);
+
+ for (auto *Clone : Node->Clones)
+ recursivelyRemoveNoneTypeCalleeEdges(Clone, Visited);
+
+ // The recursive call may remove some of this Node's caller edges.
+ // Iterate over a copy and skip any that were removed.
+ auto CallerEdges = Node->CallerEdges;
+ for (auto &Edge : CallerEdges) {
+ // Skip any that have been removed by an earlier recursive call.
+ if (Edge->Callee == nullptr && Edge->Caller == nullptr) {
+ assert(!std::count(Node->CallerEdges.begin(), Node->CallerEdges.end(),
+ Edge));
+ continue;
+ }
+ recursivelyRemoveNoneTypeCalleeEdges(Edge->Caller, Visited);
+ }
+}
+
+template <typename DerivedCCG, typename FuncTy, typename CallTy>
void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones() {
DenseSet<const ContextNode *> Visited;
for (auto &Entry : AllocationCallToContextNodeMap)
identifyClones(Entry.second, Visited);
+ Visited.clear();
+ for (auto &Entry : AllocationCallToContextNodeMap)
+ recursivelyRemoveNoneTypeCalleeEdges(Entry.second, Visited);
+ if (VerifyCCG)
+ check();
}
// helper function to check an AllocType is cold or notcold or both.
@@ -2450,7 +2489,7 @@ template <typename DerivedCCG, typename FuncTy, typename CallTy>
void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
ContextNode *Node, DenseSet<const ContextNode *> &Visited) {
if (VerifyNodes)
- checkNode<DerivedCCG, FuncTy, CallTy>(Node);
+ checkNode<DerivedCCG, FuncTy, CallTy>(Node, /*CheckEdges=*/false);
assert(!Node->CloneOf);
// If Node as a null call, then either it wasn't found in the module (regular
@@ -2511,8 +2550,16 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
std::stable_sort(Node->CallerEdges.begin(), Node->CallerEdges.end(),
[&](const std::shared_ptr<ContextEdge> &A,
const std::shared_ptr<ContextEdge> &B) {
- assert(checkColdOrNotCold(A->AllocTypes) &&
- checkColdOrNotCold(B->AllocTypes));
+ // Nodes with non-empty context ids should be sorted before
+ // those with empty context ids.
+ if (A->ContextIds.empty())
+ // Either B ContextIds are non-empty (in which case we
+ // should return false because B < A), or B ContextIds
+ // are empty, in which case they are equal, and we should
+ // maintain the original relative ordering.
+ return false;
+ if (B->ContextIds.empty())
+ return true;
if (A->AllocTypes == B->AllocTypes)
// Use the first context id for each edge as a
@@ -2591,39 +2638,16 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
Node->AllocTypes != (uint8_t)AllocationType::None);
// Sanity check that no alloc types on clone or its edges are None.
assert(Clone->AllocTypes != (uint8_t)AllocationType::None);
- assert(llvm::none_of(
- Clone->CallerEdges, [&](const std::shared_ptr<ContextEdge> &E) {
- return E->AllocTypes == (uint8_t)AllocationType::None;
- }));
}
- // Cloning may have resulted in some cloned callee edges with type None,
- // because they aren't carrying any contexts. Remove those edges.
- for (auto *Clone : Node->Clones) {
- removeNoneTypeCalleeEdges(Clone);
- if (VerifyNodes)
- checkNode<DerivedCCG, FuncTy, CallTy>(Clone);
- }
// We should still have some context ids on the original Node.
assert(!Node->ContextIds.empty());
- // Remove any callee edges that ended up with alloc type None after creating
- // clones and updating callee edges.
- removeNoneTypeCalleeEdges(Node);
-
// Sanity check that no alloc types on node or edges are None.
assert(Node->AllocTypes != (uint8_t)AllocationType::None);
- assert(llvm::none_of(Node->CalleeEdges,
- [&](const std::shared_ptr<ContextEdge> &E) {
- return E->AllocTypes == (uint8_t)AllocationType::None;
- }));
- assert(llvm::none_of(Node->CallerEdges,
- [&](const std::shared_ptr<ContextEdge> &E) {
- return E->AllocTypes == (uint8_t)AllocationType::None;
- }));
if (VerifyNodes)
- checkNode<DerivedCCG, FuncTy, CallTy>(Node);
+ checkNode<DerivedCCG, FuncTy, CallTy>(Node, /*CheckEdges=*/false);
}
void ModuleCallsiteContextGraph::updateAllocationCall(
diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp
index 9a8040bc4b06..b5f45a252c7b 100644
--- a/llvm/lib/Transforms/IPO/SampleProfile.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp
@@ -71,6 +71,7 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/ProfiledCallGraph.h"
#include "llvm/Transforms/IPO/SampleContextTracker.h"
+#include "llvm/Transforms/IPO/SampleProfileMatcher.h"
#include "llvm/Transforms/IPO/SampleProfileProbe.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/CallPromotionUtils.h"
@@ -129,16 +130,16 @@ static cl::opt<std::string> SampleProfileRemappingFile(
"sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
-static cl::opt<bool> SalvageStaleProfile(
+cl::opt<bool> SalvageStaleProfile(
"salvage-stale-profile", cl::Hidden, cl::init(false),
cl::desc("Salvage stale profile by fuzzy matching and use the remapped "
"location for sample profile query."));
-static cl::opt<bool> ReportProfileStaleness(
+cl::opt<bool> ReportProfileStaleness(
"report-profile-staleness", cl::Hidden, cl::init(false),
cl::desc("Compute and report stale profile statistical metrics."));
-static cl::opt<bool> PersistProfileStaleness(
+cl::opt<bool> PersistProfileStaleness(
"persist-profile-staleness", cl::Hidden, cl::init(false),
cl::desc("Compute stale profile statistical metrics and write it into the "
"native object file(.llvm_stats section)."));
@@ -234,6 +235,21 @@ static cl::opt<unsigned> ProfileICPRelativeHotnessSkip(
cl::desc(
"Skip relative hotness check for ICP up to given number of targets."));
+static cl::opt<unsigned> HotFuncCutoffForStalenessError(
+ "hot-func-cutoff-for-staleness-error", cl::Hidden, cl::init(800000),
+ cl::desc("A function is considered hot for staleness error check if its "
+ "total sample count is above the specified percentile"));
+
+static cl::opt<unsigned> MinfuncsForStalenessError(
+ "min-functions-for-staleness-error", cl::Hidden, cl::init(50),
+ cl::desc("Skip the check if the number of hot functions is smaller than "
+ "the specified number."));
+
+static cl::opt<unsigned> PrecentMismatchForStalenessError(
+ "precent-mismatch-for-staleness-error", cl::Hidden, cl::init(80),
+ cl::desc("Reject the profile if the mismatch percent is higher than the "
+ "given number."));
+
static cl::opt<bool> CallsitePrioritizedInline(
"sample-profile-prioritized-inline", cl::Hidden,
@@ -433,136 +449,6 @@ using CandidateQueue =
PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
CandidateComparer>;
-// Sample profile matching - fuzzy match.
-class SampleProfileMatcher {
- Module &M;
- SampleProfileReader &Reader;
- const PseudoProbeManager *ProbeManager;
- SampleProfileMap FlattenedProfiles;
- // For each function, the matcher generates a map, of which each entry is a
- // mapping from the source location of current build to the source location in
- // the profile.
- StringMap<LocToLocMap> FuncMappings;
-
- // Match state for an anchor/callsite.
- enum class MatchState {
- Unknown = 0,
- // Initial match between input profile and current IR.
- InitialMatch = 1,
- // Initial mismatch between input profile and current IR.
- InitialMismatch = 2,
- // InitialMatch stays matched after fuzzy profile matching.
- UnchangedMatch = 3,
- // InitialMismatch stays mismatched after fuzzy profile matching.
- UnchangedMismatch = 4,
- // InitialMismatch is recovered after fuzzy profile matching.
- RecoveredMismatch = 5,
- // InitialMatch is removed and becomes mismatched after fuzzy profile
- // matching.
- RemovedMatch = 6,
- };
-
- // For each function, store every callsite and its matching state into this
- // map, of which each entry is a pair of callsite location and MatchState.
- // This is used for profile staleness computation and report.
- StringMap<std::unordered_map<LineLocation, MatchState, LineLocationHash>>
- FuncCallsiteMatchStates;
-
- // Profile mismatch statstics:
- uint64_t TotalProfiledFunc = 0;
- // Num of checksum-mismatched function.
- uint64_t NumStaleProfileFunc = 0;
- uint64_t TotalProfiledCallsites = 0;
- uint64_t NumMismatchedCallsites = 0;
- uint64_t NumRecoveredCallsites = 0;
- // Total samples for all profiled functions.
- uint64_t TotalFunctionSamples = 0;
- // Total samples for all checksum-mismatched functions.
- uint64_t MismatchedFunctionSamples = 0;
- uint64_t MismatchedCallsiteSamples = 0;
- uint64_t RecoveredCallsiteSamples = 0;
-
- // A dummy name for unknown indirect callee, used to differentiate from a
- // non-call instruction that also has an empty callee name.
- static constexpr const char *UnknownIndirectCallee =
- "unknown.indirect.callee";
-
-public:
- SampleProfileMatcher(Module &M, SampleProfileReader &Reader,
- const PseudoProbeManager *ProbeManager)
- : M(M), Reader(Reader), ProbeManager(ProbeManager){};
- void runOnModule();
- void clearMatchingData() {
- // Do not clear FuncMappings, it stores IRLoc to ProfLoc remappings which
- // will be used for sample loader.
- FuncCallsiteMatchStates.clear();
- }
-
-private:
- FunctionSamples *getFlattenedSamplesFor(const Function &F) {
- StringRef CanonFName = FunctionSamples::getCanonicalFnName(F);
- auto It = FlattenedProfiles.find(FunctionId(CanonFName));
- if (It != FlattenedProfiles.end())
- return &It->second;
- return nullptr;
- }
- void runOnFunction(const Function &F);
- void findIRAnchors(const Function &F,
- std::map<LineLocation, StringRef> &IRAnchors);
- void findProfileAnchors(
- const FunctionSamples &FS,
- std::map<LineLocation, std::unordered_set<FunctionId>> &ProfileAnchors);
- // Record the callsite match states for profile staleness report, the result
- // is saved in FuncCallsiteMatchStates.
- void recordCallsiteMatchStates(
- const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
- const std::map<LineLocation, std::unordered_set<FunctionId>>
- &ProfileAnchors,
- const LocToLocMap *IRToProfileLocationMap);
-
- bool isMismatchState(const enum MatchState &State) {
- return State == MatchState::InitialMismatch ||
- State == MatchState::UnchangedMismatch ||
- State == MatchState::RemovedMatch;
- };
-
- bool isInitialState(const enum MatchState &State) {
- return State == MatchState::InitialMatch ||
- State == MatchState::InitialMismatch;
- };
-
- bool isFinalState(const enum MatchState &State) {
- return State == MatchState::UnchangedMatch ||
- State == MatchState::UnchangedMismatch ||
- State == MatchState::RecoveredMismatch ||
- State == MatchState::RemovedMatch;
- };
-
- // Count the samples of checksum mismatched function for the top-level
- // function and all inlinees.
- void countMismatchedFuncSamples(const FunctionSamples &FS, bool IsTopLevel);
- // Count the number of mismatched or recovered callsites.
- void countMismatchCallsites(const FunctionSamples &FS);
- // Count the samples of mismatched or recovered callsites for top-level
- // function and all inlinees.
- void countMismatchedCallsiteSamples(const FunctionSamples &FS);
- void computeAndReportProfileStaleness();
-
- LocToLocMap &getIRToProfileLocationMap(const Function &F) {
- auto Ret = FuncMappings.try_emplace(
- FunctionSamples::getCanonicalFnName(F.getName()), LocToLocMap());
- return Ret.first->second;
- }
- void distributeIRToProfileLocationMap();
- void distributeIRToProfileLocationMap(FunctionSamples &FS);
- void runStaleProfileMatching(
- const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
- const std::map<LineLocation, std::unordered_set<FunctionId>>
- &ProfileAnchors,
- LocToLocMap &IRToProfileLocationMap);
- void reportOrPersistProfileStats();
-};
-
/// Sample profile pass.
///
/// This pass reads profile data from the file specified by
@@ -630,6 +516,8 @@ protected:
std::vector<Function *> buildFunctionOrder(Module &M, LazyCallGraph &CG);
std::unique_ptr<ProfiledCallGraph> buildProfiledCallGraph(Module &M);
void generateMDProfMetadata(Function &F);
+ bool rejectHighStalenessProfile(Module &M, ProfileSummaryInfo *PSI,
+ const SampleProfileMap &Profiles);
/// Map from function name to Function *. Used to find the function from
/// the function name. If the function name contains suffix, additional
@@ -747,10 +635,6 @@ void SampleProfileLoaderBaseImpl<Function>::computeDominanceAndLoopInfo(
}
} // namespace llvm
-static bool skipProfileForFunction(const Function &F) {
- return F.isDeclaration() || !F.hasFnAttribute("use-sample-profile");
-}
-
ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
if (FunctionSamples::ProfileIsProbeBased)
return getProbeWeight(Inst);
@@ -1894,15 +1778,22 @@ bool SampleProfileLoader::emitAnnotations(Function &F) {
bool Changed = false;
if (FunctionSamples::ProfileIsProbeBased) {
- if (!ProbeManager->profileIsValid(F, *Samples)) {
+ LLVM_DEBUG({
+ if (!ProbeManager->getDesc(F))
+ dbgs() << "Probe descriptor missing for Function " << F.getName()
+ << "\n";
+ });
+
+ if (ProbeManager->profileIsValid(F, *Samples)) {
+ ++NumMatchedProfile;
+ } else {
+ ++NumMismatchedProfile;
LLVM_DEBUG(
dbgs() << "Profile is invalid due to CFG mismatch for Function "
<< F.getName() << "\n");
- ++NumMismatchedProfile;
if (!SalvageStaleProfile)
return false;
}
- ++NumMatchedProfile;
} else {
if (getFunctionLoc(F) == 0)
return false;
@@ -2168,7 +2059,7 @@ bool SampleProfileLoader::doInitialization(Module &M,
// Load pseudo probe descriptors for probe-based function samples.
if (Reader->profileIsProbeBased()) {
- ProbeManager = std::make_unique<PseudoProbeManager>(M);
+ ProbeManager = std::make_unique<PseudoProbeManager>(M, LTOPhase);
if (!ProbeManager->moduleIsProbed(M)) {
const char *Msg =
"Pseudo-probe-based profile requires SampleProfileProbePass";
@@ -2180,531 +2071,60 @@ bool SampleProfileLoader::doInitialization(Module &M,
if (ReportProfileStaleness || PersistProfileStaleness ||
SalvageStaleProfile) {
- MatchingManager =
- std::make_unique<SampleProfileMatcher>(M, *Reader, ProbeManager.get());
+ MatchingManager = std::make_unique<SampleProfileMatcher>(
+ M, *Reader, ProbeManager.get(), LTOPhase);
}
return true;
}
-void SampleProfileMatcher::findIRAnchors(
- const Function &F, std::map<LineLocation, StringRef> &IRAnchors) {
- // For inlined code, recover the original callsite and callee by finding the
- // top-level inline frame. e.g. For frame stack "main:1 @ foo:2 @ bar:3", the
- // top-level frame is "main:1", the callsite is "1" and the callee is "foo".
- auto FindTopLevelInlinedCallsite = [](const DILocation *DIL) {
- assert((DIL && DIL->getInlinedAt()) && "No inlined callsite");
- const DILocation *PrevDIL = nullptr;
- do {
- PrevDIL = DIL;
- DIL = DIL->getInlinedAt();
- } while (DIL->getInlinedAt());
-
- LineLocation Callsite = FunctionSamples::getCallSiteIdentifier(DIL);
- StringRef CalleeName = PrevDIL->getSubprogramLinkageName();
- return std::make_pair(Callsite, CalleeName);
- };
-
- auto GetCanonicalCalleeName = [](const CallBase *CB) {
- StringRef CalleeName = UnknownIndirectCallee;
- if (Function *Callee = CB->getCalledFunction())
- CalleeName = FunctionSamples::getCanonicalFnName(Callee->getName());
- return CalleeName;
- };
-
- // Extract profile matching anchors in the IR.
- for (auto &BB : F) {
- for (auto &I : BB) {
- DILocation *DIL = I.getDebugLoc();
- if (!DIL)
- continue;
-
- if (FunctionSamples::ProfileIsProbeBased) {
- if (auto Probe = extractProbe(I)) {
- // Flatten inlined IR for the matching.
- if (DIL->getInlinedAt()) {
- IRAnchors.emplace(FindTopLevelInlinedCallsite(DIL));
- } else {
- // Use empty StringRef for basic block probe.
- StringRef CalleeName;
- if (const auto *CB = dyn_cast<CallBase>(&I)) {
- // Skip the probe inst whose callee name is "llvm.pseudoprobe".
- if (!isa<IntrinsicInst>(&I))
- CalleeName = GetCanonicalCalleeName(CB);
- }
- IRAnchors.emplace(LineLocation(Probe->Id, 0), CalleeName);
- }
- }
- } else {
- // TODO: For line-number based profile(AutoFDO), currently only support
- // find callsite anchors. In future, we need to parse all the non-call
- // instructions to extract the line locations for profile matching.
- if (!isa<CallBase>(&I) || isa<IntrinsicInst>(&I))
- continue;
-
- if (DIL->getInlinedAt()) {
- IRAnchors.emplace(FindTopLevelInlinedCallsite(DIL));
- } else {
- LineLocation Callsite = FunctionSamples::getCallSiteIdentifier(DIL);
- StringRef CalleeName = GetCanonicalCalleeName(dyn_cast<CallBase>(&I));
- IRAnchors.emplace(Callsite, CalleeName);
- }
- }
- }
- }
-}
-
-void SampleProfileMatcher::findProfileAnchors(
- const FunctionSamples &FS,
- std::map<LineLocation, std::unordered_set<FunctionId>> &ProfileAnchors) {
- auto isInvalidLineOffset = [](uint32_t LineOffset) {
- return LineOffset & 0x8000;
- };
-
- for (const auto &I : FS.getBodySamples()) {
- const LineLocation &Loc = I.first;
- if (isInvalidLineOffset(Loc.LineOffset))
+// Note that this is a module-level check. Even if one module is errored out,
+// the entire build will be errored out. However, the user could make big
+// changes to functions in single module but those changes might not be
+// performance significant to the whole binary. Therefore, to avoid those false
+// positives, we select a reasonable big set of hot functions that are supposed
+// to be globally performance significant, only compute and check the mismatch
+// within those functions. The function selection is based on two criteria:
+// 1) The function is hot enough, which is tuned by a hotness-based
+// flag(HotFuncCutoffForStalenessError). 2) The num of function is large enough
+// which is tuned by the MinfuncsForStalenessError flag.
+bool SampleProfileLoader::rejectHighStalenessProfile(
+ Module &M, ProfileSummaryInfo *PSI, const SampleProfileMap &Profiles) {
+ assert(FunctionSamples::ProfileIsProbeBased &&
+ "Only support for probe-based profile");
+ uint64_t TotalHotFunc = 0;
+ uint64_t NumMismatchedFunc = 0;
+ for (const auto &I : Profiles) {
+ const auto &FS = I.second;
+ const auto *FuncDesc = ProbeManager->getDesc(FS.getGUID());
+ if (!FuncDesc)
continue;
- for (const auto &I : I.second.getCallTargets()) {
- auto Ret = ProfileAnchors.try_emplace(Loc,
- std::unordered_set<FunctionId>());
- Ret.first->second.insert(I.first);
- }
- }
- for (const auto &I : FS.getCallsiteSamples()) {
- const LineLocation &Loc = I.first;
- if (isInvalidLineOffset(Loc.LineOffset))
+ // Use a hotness-based threshold to control the function selection.
+ if (!PSI->isHotCountNthPercentile(HotFuncCutoffForStalenessError,
+ FS.getTotalSamples()))
continue;
- const auto &CalleeMap = I.second;
- for (const auto &I : CalleeMap) {
- auto Ret = ProfileAnchors.try_emplace(Loc,
- std::unordered_set<FunctionId>());
- Ret.first->second.insert(I.first);
- }
- }
-}
-// Call target name anchor based profile fuzzy matching.
-// Input:
-// For IR locations, the anchor is the callee name of direct callsite; For
-// profile locations, it's the call target name for BodySamples or inlinee's
-// profile name for CallsiteSamples.
-// Matching heuristic:
-// First match all the anchors in lexical order, then split the non-anchor
-// locations between the two anchors evenly, first half are matched based on the
-// start anchor, second half are matched based on the end anchor.
-// For example, given:
-// IR locations: [1, 2(foo), 3, 5, 6(bar), 7]
-// Profile locations: [1, 2, 3(foo), 4, 7, 8(bar), 9]
-// The matching gives:
-// [1, 2(foo), 3, 5, 6(bar), 7]
-// | | | | | |
-// [1, 2, 3(foo), 4, 7, 8(bar), 9]
-// The output mapping: [2->3, 3->4, 5->7, 6->8, 7->9].
-void SampleProfileMatcher::runStaleProfileMatching(
- const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
- const std::map<LineLocation, std::unordered_set<FunctionId>>
- &ProfileAnchors,
- LocToLocMap &IRToProfileLocationMap) {
- LLVM_DEBUG(dbgs() << "Run stale profile matching for " << F.getName()
- << "\n");
- assert(IRToProfileLocationMap.empty() &&
- "Run stale profile matching only once per function");
-
- std::unordered_map<FunctionId, std::set<LineLocation>>
- CalleeToCallsitesMap;
- for (const auto &I : ProfileAnchors) {
- const auto &Loc = I.first;
- const auto &Callees = I.second;
- // Filter out possible indirect calls, use direct callee name as anchor.
- if (Callees.size() == 1) {
- FunctionId CalleeName = *Callees.begin();
- const auto &Candidates = CalleeToCallsitesMap.try_emplace(
- CalleeName, std::set<LineLocation>());
- Candidates.first->second.insert(Loc);
- }
- }
-
- auto InsertMatching = [&](const LineLocation &From, const LineLocation &To) {
- // Skip the unchanged location mapping to save memory.
- if (From != To)
- IRToProfileLocationMap.insert({From, To});
- };
-
- // Use function's beginning location as the initial anchor.
- int32_t LocationDelta = 0;
- SmallVector<LineLocation> LastMatchedNonAnchors;
-
- for (const auto &IR : IRAnchors) {
- const auto &Loc = IR.first;
- auto CalleeName = IR.second;
- bool IsMatchedAnchor = false;
- // Match the anchor location in lexical order.
- if (!CalleeName.empty()) {
- auto CandidateAnchors = CalleeToCallsitesMap.find(
- getRepInFormat(CalleeName));
- if (CandidateAnchors != CalleeToCallsitesMap.end() &&
- !CandidateAnchors->second.empty()) {
- auto CI = CandidateAnchors->second.begin();
- const auto Candidate = *CI;
- CandidateAnchors->second.erase(CI);
- InsertMatching(Loc, Candidate);
- LLVM_DEBUG(dbgs() << "Callsite with callee:" << CalleeName
- << " is matched from " << Loc << " to " << Candidate
- << "\n");
- LocationDelta = Candidate.LineOffset - Loc.LineOffset;
-
- // Match backwards for non-anchor locations.
- // The locations in LastMatchedNonAnchors have been matched forwards
- // based on the previous anchor, spilt it evenly and overwrite the
- // second half based on the current anchor.
- for (size_t I = (LastMatchedNonAnchors.size() + 1) / 2;
- I < LastMatchedNonAnchors.size(); I++) {
- const auto &L = LastMatchedNonAnchors[I];
- uint32_t CandidateLineOffset = L.LineOffset + LocationDelta;
- LineLocation Candidate(CandidateLineOffset, L.Discriminator);
- InsertMatching(L, Candidate);
- LLVM_DEBUG(dbgs() << "Location is rematched backwards from " << L
- << " to " << Candidate << "\n");
- }
-
- IsMatchedAnchor = true;
- LastMatchedNonAnchors.clear();
- }
- }
-
- // Match forwards for non-anchor locations.
- if (!IsMatchedAnchor) {
- uint32_t CandidateLineOffset = Loc.LineOffset + LocationDelta;
- LineLocation Candidate(CandidateLineOffset, Loc.Discriminator);
- InsertMatching(Loc, Candidate);
- LLVM_DEBUG(dbgs() << "Location is matched from " << Loc << " to "
- << Candidate << "\n");
- LastMatchedNonAnchors.emplace_back(Loc);
- }
+ TotalHotFunc++;
+ if (ProbeManager->profileIsHashMismatched(*FuncDesc, FS))
+ NumMismatchedFunc++;
}
-}
-
-void SampleProfileMatcher::runOnFunction(const Function &F) {
- // We need to use flattened function samples for matching.
- // Unlike IR, which includes all callsites from the source code, the callsites
- // in profile only show up when they are hit by samples, i,e. the profile
- // callsites in one context may differ from those in another context. To get
- // the maximum number of callsites, we merge the function profiles from all
- // contexts, aka, the flattened profile to find profile anchors.
- const auto *FSFlattened = getFlattenedSamplesFor(F);
- if (!FSFlattened)
- return;
-
- // Anchors for IR. It's a map from IR location to callee name, callee name is
- // empty for non-call instruction and use a dummy name(UnknownIndirectCallee)
- // for unknown indrect callee name.
- std::map<LineLocation, StringRef> IRAnchors;
- findIRAnchors(F, IRAnchors);
- // Anchors for profile. It's a map from callsite location to a set of callee
- // name.
- std::map<LineLocation, std::unordered_set<FunctionId>> ProfileAnchors;
- findProfileAnchors(*FSFlattened, ProfileAnchors);
-
- // Compute the callsite match states for profile staleness report.
- if (ReportProfileStaleness || PersistProfileStaleness)
- recordCallsiteMatchStates(F, IRAnchors, ProfileAnchors, nullptr);
-
- // Run profile matching for checksum mismatched profile, currently only
- // support for pseudo-probe.
- if (SalvageStaleProfile && FunctionSamples::ProfileIsProbeBased &&
- !ProbeManager->profileIsValid(F, *FSFlattened)) {
- // The matching result will be saved to IRToProfileLocationMap, create a new
- // map for each function.
- auto &IRToProfileLocationMap = getIRToProfileLocationMap(F);
- runStaleProfileMatching(F, IRAnchors, ProfileAnchors,
- IRToProfileLocationMap);
- // Find and update callsite match states after matching.
- if (ReportProfileStaleness || PersistProfileStaleness)
- recordCallsiteMatchStates(F, IRAnchors, ProfileAnchors,
- &IRToProfileLocationMap);
- }
-}
-
-void SampleProfileMatcher::recordCallsiteMatchStates(
- const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
- const std::map<LineLocation, std::unordered_set<FunctionId>>
- &ProfileAnchors,
- const LocToLocMap *IRToProfileLocationMap) {
- bool IsPostMatch = IRToProfileLocationMap != nullptr;
- auto &CallsiteMatchStates =
- FuncCallsiteMatchStates[FunctionSamples::getCanonicalFnName(F.getName())];
-
- auto MapIRLocToProfileLoc = [&](const LineLocation &IRLoc) {
- // IRToProfileLocationMap is null in pre-match phrase.
- if (!IRToProfileLocationMap)
- return IRLoc;
- const auto &ProfileLoc = IRToProfileLocationMap->find(IRLoc);
- if (ProfileLoc != IRToProfileLocationMap->end())
- return ProfileLoc->second;
- else
- return IRLoc;
- };
-
- for (const auto &I : IRAnchors) {
- // After fuzzy profile matching, use the matching result to remap the
- // current IR callsite.
- const auto &ProfileLoc = MapIRLocToProfileLoc(I.first);
- const auto &IRCalleeName = I.second;
- const auto &It = ProfileAnchors.find(ProfileLoc);
- if (It == ProfileAnchors.end())
- continue;
- const auto &Callees = It->second;
-
- bool IsCallsiteMatched = false;
- // Since indirect call does not have CalleeName, check conservatively if
- // callsite in the profile is a callsite location. This is to reduce num of
- // false positive since otherwise all the indirect call samples will be
- // reported as mismatching.
- if (IRCalleeName == SampleProfileMatcher::UnknownIndirectCallee)
- IsCallsiteMatched = true;
- else if (Callees.size() == 1 && Callees.count(getRepInFormat(IRCalleeName)))
- IsCallsiteMatched = true;
-
- if (IsCallsiteMatched) {
- auto It = CallsiteMatchStates.find(ProfileLoc);
- if (It == CallsiteMatchStates.end())
- CallsiteMatchStates.emplace(ProfileLoc, MatchState::InitialMatch);
- else if (IsPostMatch) {
- if (It->second == MatchState::InitialMatch)
- It->second = MatchState::UnchangedMatch;
- else if (It->second == MatchState::InitialMismatch)
- It->second = MatchState::RecoveredMismatch;
- }
- }
- }
-
- // Check if there are any callsites in the profile that does not match to any
- // IR callsites.
- for (const auto &I : ProfileAnchors) {
- const auto &Loc = I.first;
- [[maybe_unused]] const auto &Callees = I.second;
- assert(!Callees.empty() && "Callees should not be empty");
- auto It = CallsiteMatchStates.find(Loc);
- if (It == CallsiteMatchStates.end())
- CallsiteMatchStates.emplace(Loc, MatchState::InitialMismatch);
- else if (IsPostMatch) {
- // Update the state if it's not matched(UnchangedMatch or
- // RecoveredMismatch).
- if (It->second == MatchState::InitialMismatch)
- It->second = MatchState::UnchangedMismatch;
- else if (It->second == MatchState::InitialMatch)
- It->second = MatchState::RemovedMatch;
- }
- }
-}
-
-void SampleProfileMatcher::countMismatchedFuncSamples(const FunctionSamples &FS,
- bool IsTopLevel) {
- const auto *FuncDesc = ProbeManager->getDesc(FS.getGUID());
- // Skip the function that is external or renamed.
- if (!FuncDesc)
- return;
-
- if (ProbeManager->profileIsHashMismatched(*FuncDesc, FS)) {
- if (IsTopLevel)
- NumStaleProfileFunc++;
- // Given currently all probe ids are after block probe ids, once the
- // checksum is mismatched, it's likely all the callites are mismatched and
- // dropped. We conservatively count all the samples as mismatched and stop
- // counting the inlinees' profiles.
- MismatchedFunctionSamples += FS.getTotalSamples();
- return;
- }
-
- // Even the current-level function checksum is matched, it's possible that the
- // nested inlinees' checksums are mismatched that affect the inlinee's sample
- // loading, we need to go deeper to check the inlinees' function samples.
- // Similarly, count all the samples as mismatched if the inlinee's checksum is
- // mismatched using this recursive function.
- for (const auto &I : FS.getCallsiteSamples())
- for (const auto &CS : I.second)
- countMismatchedFuncSamples(CS.second, false);
-}
-
-void SampleProfileMatcher::countMismatchedCallsiteSamples(
- const FunctionSamples &FS) {
- auto It = FuncCallsiteMatchStates.find(FS.getFuncName());
- // Skip it if no mismatched callsite or this is an external function.
- if (It == FuncCallsiteMatchStates.end() || It->second.empty())
- return;
- const auto &CallsiteMatchStates = It->second;
-
- auto findMatchState = [&](const LineLocation &Loc) {
- auto It = CallsiteMatchStates.find(Loc);
- if (It == CallsiteMatchStates.end())
- return MatchState::Unknown;
- return It->second;
- };
-
- auto AttributeMismatchedSamples = [&](const enum MatchState &State,
- uint64_t Samples) {
- if (isMismatchState(State))
- MismatchedCallsiteSamples += Samples;
- else if (State == MatchState::RecoveredMismatch)
- RecoveredCallsiteSamples += Samples;
- };
-
- // The non-inlined callsites are saved in the body samples of function
- // profile, go through it to count the non-inlined callsite samples.
- for (const auto &I : FS.getBodySamples())
- AttributeMismatchedSamples(findMatchState(I.first), I.second.getSamples());
-
- // Count the inlined callsite samples.
- for (const auto &I : FS.getCallsiteSamples()) {
- auto State = findMatchState(I.first);
- uint64_t CallsiteSamples = 0;
- for (const auto &CS : I.second)
- CallsiteSamples += CS.second.getTotalSamples();
- AttributeMismatchedSamples(State, CallsiteSamples);
-
- if (isMismatchState(State))
- continue;
-
- // When the current level of inlined call site matches the profiled call
- // site, we need to go deeper along the inline tree to count mismatches from
- // lower level inlinees.
- for (const auto &CS : I.second)
- countMismatchedCallsiteSamples(CS.second);
- }
-}
-
-void SampleProfileMatcher::countMismatchCallsites(const FunctionSamples &FS) {
- auto It = FuncCallsiteMatchStates.find(FS.getFuncName());
- // Skip it if no mismatched callsite or this is an external function.
- if (It == FuncCallsiteMatchStates.end() || It->second.empty())
- return;
- const auto &MatchStates = It->second;
- [[maybe_unused]] bool OnInitialState =
- isInitialState(MatchStates.begin()->second);
- for (const auto &I : MatchStates) {
- TotalProfiledCallsites++;
- assert(
- (OnInitialState ? isInitialState(I.second) : isFinalState(I.second)) &&
- "Profile matching state is inconsistent");
-
- if (isMismatchState(I.second))
- NumMismatchedCallsites++;
- else if (I.second == MatchState::RecoveredMismatch)
- NumRecoveredCallsites++;
- }
-}
-
-void SampleProfileMatcher::computeAndReportProfileStaleness() {
- if (!ReportProfileStaleness && !PersistProfileStaleness)
- return;
-
- // Count profile mismatches for profile staleness report.
- for (const auto &F : M) {
- if (skipProfileForFunction(F))
- continue;
- // As the stats will be merged by linker, skip reporting the metrics for
- // imported functions to avoid repeated counting.
- if (GlobalValue::isAvailableExternallyLinkage(F.getLinkage()))
- continue;
- const auto *FS = Reader.getSamplesFor(F);
- if (!FS)
- continue;
- TotalProfiledFunc++;
- TotalFunctionSamples += FS->getTotalSamples();
-
- // Checksum mismatch is only used in pseudo-probe mode.
- if (FunctionSamples::ProfileIsProbeBased)
- countMismatchedFuncSamples(*FS, true);
-
- // Count mismatches and samples for calliste.
- countMismatchCallsites(*FS);
- countMismatchedCallsiteSamples(*FS);
- }
-
- if (ReportProfileStaleness) {
- if (FunctionSamples::ProfileIsProbeBased) {
- errs() << "(" << NumStaleProfileFunc << "/" << TotalProfiledFunc << ")"
- << " of functions' profile are invalid and "
- << " (" << MismatchedFunctionSamples << "/" << TotalFunctionSamples
- << ") of samples are discarded due to function hash mismatch.\n";
- }
- errs() << "(" << (NumMismatchedCallsites + NumRecoveredCallsites) << "/"
- << TotalProfiledCallsites << ")"
- << " of callsites' profile are invalid and "
- << "(" << (MismatchedCallsiteSamples + RecoveredCallsiteSamples)
- << "/" << TotalFunctionSamples << ")"
- << " of samples are discarded due to callsite location mismatch.\n";
- errs() << "(" << NumRecoveredCallsites << "/"
- << (NumRecoveredCallsites + NumMismatchedCallsites) << ")"
- << " of callsites and "
- << "(" << RecoveredCallsiteSamples << "/"
- << (RecoveredCallsiteSamples + MismatchedCallsiteSamples) << ")"
- << " of samples are recovered by stale profile matching.\n";
- }
-
- if (PersistProfileStaleness) {
- LLVMContext &Ctx = M.getContext();
- MDBuilder MDB(Ctx);
-
- SmallVector<std::pair<StringRef, uint64_t>> ProfStatsVec;
- if (FunctionSamples::ProfileIsProbeBased) {
- ProfStatsVec.emplace_back("NumStaleProfileFunc", NumStaleProfileFunc);
- ProfStatsVec.emplace_back("TotalProfiledFunc", TotalProfiledFunc);
- ProfStatsVec.emplace_back("MismatchedFunctionSamples",
- MismatchedFunctionSamples);
- ProfStatsVec.emplace_back("TotalFunctionSamples", TotalFunctionSamples);
- }
-
- ProfStatsVec.emplace_back("NumMismatchedCallsites", NumMismatchedCallsites);
- ProfStatsVec.emplace_back("NumRecoveredCallsites", NumRecoveredCallsites);
- ProfStatsVec.emplace_back("TotalProfiledCallsites", TotalProfiledCallsites);
- ProfStatsVec.emplace_back("MismatchedCallsiteSamples",
- MismatchedCallsiteSamples);
- ProfStatsVec.emplace_back("RecoveredCallsiteSamples",
- RecoveredCallsiteSamples);
-
- auto *MD = MDB.createLLVMStats(ProfStatsVec);
- auto *NMD = M.getOrInsertNamedMetadata("llvm.stats");
- NMD->addOperand(MD);
- }
-}
-
-void SampleProfileMatcher::runOnModule() {
- ProfileConverter::flattenProfile(Reader.getProfiles(), FlattenedProfiles,
- FunctionSamples::ProfileIsCS);
- for (auto &F : M) {
- if (skipProfileForFunction(F))
- continue;
- runOnFunction(F);
- }
- if (SalvageStaleProfile)
- distributeIRToProfileLocationMap();
-
- computeAndReportProfileStaleness();
-}
-
-void SampleProfileMatcher::distributeIRToProfileLocationMap(
- FunctionSamples &FS) {
- const auto ProfileMappings = FuncMappings.find(FS.getFuncName());
- if (ProfileMappings != FuncMappings.end()) {
- FS.setIRToProfileLocationMap(&(ProfileMappings->second));
- }
-
- for (auto &Inlinees : FS.getCallsiteSamples()) {
- for (auto FS : Inlinees.second) {
- distributeIRToProfileLocationMap(FS.second);
- }
- }
-}
+ // Make sure that the num of selected function is not too small to distinguish
+ // from the user's benign changes.
+ if (TotalHotFunc < MinfuncsForStalenessError)
+ return false;
-// Use a central place to distribute the matching results. Outlined and inlined
-// profile with the function name will be set to the same pointer.
-void SampleProfileMatcher::distributeIRToProfileLocationMap() {
- for (auto &I : Reader.getProfiles()) {
- distributeIRToProfileLocationMap(I.second);
+ // Finally check the mismatch percentage against the threshold.
+ if (NumMismatchedFunc * 100 >=
+ TotalHotFunc * PrecentMismatchForStalenessError) {
+ auto &Ctx = M.getContext();
+ const char *Msg =
+ "The input profile significantly mismatches current source code. "
+ "Please recollect profile to avoid performance regression.";
+ Ctx.diagnose(DiagnosticInfoSampleProfile(M.getModuleIdentifier(), Msg));
+ return true;
}
+ return false;
}
bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
@@ -2718,6 +2138,11 @@ bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
ProfileSummary::PSK_Sample);
PSI->refresh();
}
+
+ if (FunctionSamples::ProfileIsProbeBased &&
+ rejectHighStalenessProfile(M, PSI, Reader->getProfiles()))
+ return false;
+
// Compute the total number of samples collected in this profile.
for (const auto &I : Reader->getProfiles())
TotalCollectedSamples += I.second.getTotalSamples();
diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp
new file mode 100644
index 000000000000..bb46539989ab
--- /dev/null
+++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp
@@ -0,0 +1,552 @@
+//===- SampleProfileMatcher.cpp - Sampling-based Stale Profile Matcher ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SampleProfileMatcher used for stale
+// profile matching.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/IPO/SampleProfileMatcher.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/MDBuilder.h"
+
+using namespace llvm;
+using namespace sampleprof;
+
+#define DEBUG_TYPE "sample-profile-matcher"
+
+extern cl::opt<bool> SalvageStaleProfile;
+extern cl::opt<bool> PersistProfileStaleness;
+extern cl::opt<bool> ReportProfileStaleness;
+
+void SampleProfileMatcher::findIRAnchors(
+ const Function &F, std::map<LineLocation, StringRef> &IRAnchors) {
+ // For inlined code, recover the original callsite and callee by finding the
+ // top-level inline frame. e.g. For frame stack "main:1 @ foo:2 @ bar:3", the
+ // top-level frame is "main:1", the callsite is "1" and the callee is "foo".
+ auto FindTopLevelInlinedCallsite = [](const DILocation *DIL) {
+ assert((DIL && DIL->getInlinedAt()) && "No inlined callsite");
+ const DILocation *PrevDIL = nullptr;
+ do {
+ PrevDIL = DIL;
+ DIL = DIL->getInlinedAt();
+ } while (DIL->getInlinedAt());
+
+ LineLocation Callsite = FunctionSamples::getCallSiteIdentifier(DIL);
+ StringRef CalleeName = PrevDIL->getSubprogramLinkageName();
+ return std::make_pair(Callsite, CalleeName);
+ };
+
+ auto GetCanonicalCalleeName = [](const CallBase *CB) {
+ StringRef CalleeName = UnknownIndirectCallee;
+ if (Function *Callee = CB->getCalledFunction())
+ CalleeName = FunctionSamples::getCanonicalFnName(Callee->getName());
+ return CalleeName;
+ };
+
+ // Extract profile matching anchors in the IR.
+ for (auto &BB : F) {
+ for (auto &I : BB) {
+ DILocation *DIL = I.getDebugLoc();
+ if (!DIL)
+ continue;
+
+ if (FunctionSamples::ProfileIsProbeBased) {
+ if (auto Probe = extractProbe(I)) {
+ // Flatten inlined IR for the matching.
+ if (DIL->getInlinedAt()) {
+ IRAnchors.emplace(FindTopLevelInlinedCallsite(DIL));
+ } else {
+ // Use empty StringRef for basic block probe.
+ StringRef CalleeName;
+ if (const auto *CB = dyn_cast<CallBase>(&I)) {
+ // Skip the probe inst whose callee name is "llvm.pseudoprobe".
+ if (!isa<IntrinsicInst>(&I))
+ CalleeName = GetCanonicalCalleeName(CB);
+ }
+ IRAnchors.emplace(LineLocation(Probe->Id, 0), CalleeName);
+ }
+ }
+ } else {
+ // TODO: For line-number based profile(AutoFDO), currently only support
+ // find callsite anchors. In future, we need to parse all the non-call
+ // instructions to extract the line locations for profile matching.
+ if (!isa<CallBase>(&I) || isa<IntrinsicInst>(&I))
+ continue;
+
+ if (DIL->getInlinedAt()) {
+ IRAnchors.emplace(FindTopLevelInlinedCallsite(DIL));
+ } else {
+ LineLocation Callsite = FunctionSamples::getCallSiteIdentifier(DIL);
+ StringRef CalleeName = GetCanonicalCalleeName(dyn_cast<CallBase>(&I));
+ IRAnchors.emplace(Callsite, CalleeName);
+ }
+ }
+ }
+ }
+}
+
+void SampleProfileMatcher::findProfileAnchors(
+ const FunctionSamples &FS,
+ std::map<LineLocation, std::unordered_set<FunctionId>> &ProfileAnchors) {
+ auto isInvalidLineOffset = [](uint32_t LineOffset) {
+ return LineOffset & 0x8000;
+ };
+
+ for (const auto &I : FS.getBodySamples()) {
+ const LineLocation &Loc = I.first;
+ if (isInvalidLineOffset(Loc.LineOffset))
+ continue;
+ for (const auto &I : I.second.getCallTargets()) {
+ auto Ret =
+ ProfileAnchors.try_emplace(Loc, std::unordered_set<FunctionId>());
+ Ret.first->second.insert(I.first);
+ }
+ }
+
+ for (const auto &I : FS.getCallsiteSamples()) {
+ const LineLocation &Loc = I.first;
+ if (isInvalidLineOffset(Loc.LineOffset))
+ continue;
+ const auto &CalleeMap = I.second;
+ for (const auto &I : CalleeMap) {
+ auto Ret =
+ ProfileAnchors.try_emplace(Loc, std::unordered_set<FunctionId>());
+ Ret.first->second.insert(I.first);
+ }
+ }
+}
+
+// Call target name anchor based profile fuzzy matching.
+// Input:
+// For IR locations, the anchor is the callee name of direct callsite; For
+// profile locations, it's the call target name for BodySamples or inlinee's
+// profile name for CallsiteSamples.
+// Matching heuristic:
+// First match all the anchors in lexical order, then split the non-anchor
+// locations between the two anchors evenly, first half are matched based on the
+// start anchor, second half are matched based on the end anchor.
+// For example, given:
+// IR locations: [1, 2(foo), 3, 5, 6(bar), 7]
+// Profile locations: [1, 2, 3(foo), 4, 7, 8(bar), 9]
+// The matching gives:
+// [1, 2(foo), 3, 5, 6(bar), 7]
+// | | | | | |
+// [1, 2, 3(foo), 4, 7, 8(bar), 9]
+// The output mapping: [2->3, 3->4, 5->7, 6->8, 7->9].
+void SampleProfileMatcher::runStaleProfileMatching(
+ const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
+ const std::map<LineLocation, std::unordered_set<FunctionId>>
+ &ProfileAnchors,
+ LocToLocMap &IRToProfileLocationMap) {
+ LLVM_DEBUG(dbgs() << "Run stale profile matching for " << F.getName()
+ << "\n");
+ assert(IRToProfileLocationMap.empty() &&
+ "Run stale profile matching only once per function");
+
+ std::unordered_map<FunctionId, std::set<LineLocation>> CalleeToCallsitesMap;
+ for (const auto &I : ProfileAnchors) {
+ const auto &Loc = I.first;
+ const auto &Callees = I.second;
+ // Filter out possible indirect calls, use direct callee name as anchor.
+ if (Callees.size() == 1) {
+ FunctionId CalleeName = *Callees.begin();
+ const auto &Candidates = CalleeToCallsitesMap.try_emplace(
+ CalleeName, std::set<LineLocation>());
+ Candidates.first->second.insert(Loc);
+ }
+ }
+
+ auto InsertMatching = [&](const LineLocation &From, const LineLocation &To) {
+ // Skip the unchanged location mapping to save memory.
+ if (From != To)
+ IRToProfileLocationMap.insert({From, To});
+ };
+
+ // Use function's beginning location as the initial anchor.
+ int32_t LocationDelta = 0;
+ SmallVector<LineLocation> LastMatchedNonAnchors;
+
+ for (const auto &IR : IRAnchors) {
+ const auto &Loc = IR.first;
+ auto CalleeName = IR.second;
+ bool IsMatchedAnchor = false;
+ // Match the anchor location in lexical order.
+ if (!CalleeName.empty()) {
+ auto CandidateAnchors =
+ CalleeToCallsitesMap.find(getRepInFormat(CalleeName));
+ if (CandidateAnchors != CalleeToCallsitesMap.end() &&
+ !CandidateAnchors->second.empty()) {
+ auto CI = CandidateAnchors->second.begin();
+ const auto Candidate = *CI;
+ CandidateAnchors->second.erase(CI);
+ InsertMatching(Loc, Candidate);
+ LLVM_DEBUG(dbgs() << "Callsite with callee:" << CalleeName
+ << " is matched from " << Loc << " to " << Candidate
+ << "\n");
+ LocationDelta = Candidate.LineOffset - Loc.LineOffset;
+
+ // Match backwards for non-anchor locations.
+ // The locations in LastMatchedNonAnchors have been matched forwards
+ // based on the previous anchor, spilt it evenly and overwrite the
+ // second half based on the current anchor.
+ for (size_t I = (LastMatchedNonAnchors.size() + 1) / 2;
+ I < LastMatchedNonAnchors.size(); I++) {
+ const auto &L = LastMatchedNonAnchors[I];
+ uint32_t CandidateLineOffset = L.LineOffset + LocationDelta;
+ LineLocation Candidate(CandidateLineOffset, L.Discriminator);
+ InsertMatching(L, Candidate);
+ LLVM_DEBUG(dbgs() << "Location is rematched backwards from " << L
+ << " to " << Candidate << "\n");
+ }
+
+ IsMatchedAnchor = true;
+ LastMatchedNonAnchors.clear();
+ }
+ }
+
+ // Match forwards for non-anchor locations.
+ if (!IsMatchedAnchor) {
+ uint32_t CandidateLineOffset = Loc.LineOffset + LocationDelta;
+ LineLocation Candidate(CandidateLineOffset, Loc.Discriminator);
+ InsertMatching(Loc, Candidate);
+ LLVM_DEBUG(dbgs() << "Location is matched from " << Loc << " to "
+ << Candidate << "\n");
+ LastMatchedNonAnchors.emplace_back(Loc);
+ }
+ }
+}
+
+void SampleProfileMatcher::runOnFunction(Function &F) {
+ // We need to use flattened function samples for matching.
+ // Unlike IR, which includes all callsites from the source code, the callsites
+ // in profile only show up when they are hit by samples, i,e. the profile
+ // callsites in one context may differ from those in another context. To get
+ // the maximum number of callsites, we merge the function profiles from all
+ // contexts, aka, the flattened profile to find profile anchors.
+ const auto *FSFlattened = getFlattenedSamplesFor(F);
+ if (!FSFlattened)
+ return;
+
+ // Anchors for IR. It's a map from IR location to callee name, callee name is
+ // empty for non-call instruction and use a dummy name(UnknownIndirectCallee)
+ // for unknown indrect callee name.
+ std::map<LineLocation, StringRef> IRAnchors;
+ findIRAnchors(F, IRAnchors);
+ // Anchors for profile. It's a map from callsite location to a set of callee
+ // name.
+ std::map<LineLocation, std::unordered_set<FunctionId>> ProfileAnchors;
+ findProfileAnchors(*FSFlattened, ProfileAnchors);
+
+ // Compute the callsite match states for profile staleness report.
+ if (ReportProfileStaleness || PersistProfileStaleness)
+ recordCallsiteMatchStates(F, IRAnchors, ProfileAnchors, nullptr);
+
+ // Run profile matching for checksum mismatched profile, currently only
+ // support for pseudo-probe.
+ if (SalvageStaleProfile && FunctionSamples::ProfileIsProbeBased &&
+ !ProbeManager->profileIsValid(F, *FSFlattened)) {
+ // For imported functions, the checksum metadata(pseudo_probe_desc) are
+ // dropped, so we leverage function attribute(profile-checksum-mismatch) to
+ // transfer the info: add the attribute during pre-link phase and check it
+ // during post-link phase(see "profileIsValid").
+ if (FunctionSamples::ProfileIsProbeBased &&
+ LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink)
+ F.addFnAttr("profile-checksum-mismatch");
+
+ // The matching result will be saved to IRToProfileLocationMap, create a
+ // new map for each function.
+ auto &IRToProfileLocationMap = getIRToProfileLocationMap(F);
+ runStaleProfileMatching(F, IRAnchors, ProfileAnchors,
+ IRToProfileLocationMap);
+ // Find and update callsite match states after matching.
+ if (ReportProfileStaleness || PersistProfileStaleness)
+ recordCallsiteMatchStates(F, IRAnchors, ProfileAnchors,
+ &IRToProfileLocationMap);
+ }
+}
+
+void SampleProfileMatcher::recordCallsiteMatchStates(
+ const Function &F, const std::map<LineLocation, StringRef> &IRAnchors,
+ const std::map<LineLocation, std::unordered_set<FunctionId>>
+ &ProfileAnchors,
+ const LocToLocMap *IRToProfileLocationMap) {
+ bool IsPostMatch = IRToProfileLocationMap != nullptr;
+ auto &CallsiteMatchStates =
+ FuncCallsiteMatchStates[FunctionSamples::getCanonicalFnName(F.getName())];
+
+ auto MapIRLocToProfileLoc = [&](const LineLocation &IRLoc) {
+ // IRToProfileLocationMap is null in pre-match phrase.
+ if (!IRToProfileLocationMap)
+ return IRLoc;
+ const auto &ProfileLoc = IRToProfileLocationMap->find(IRLoc);
+ if (ProfileLoc != IRToProfileLocationMap->end())
+ return ProfileLoc->second;
+ else
+ return IRLoc;
+ };
+
+ for (const auto &I : IRAnchors) {
+ // After fuzzy profile matching, use the matching result to remap the
+ // current IR callsite.
+ const auto &ProfileLoc = MapIRLocToProfileLoc(I.first);
+ const auto &IRCalleeName = I.second;
+ const auto &It = ProfileAnchors.find(ProfileLoc);
+ if (It == ProfileAnchors.end())
+ continue;
+ const auto &Callees = It->second;
+
+ bool IsCallsiteMatched = false;
+ // Since indirect call does not have CalleeName, check conservatively if
+ // callsite in the profile is a callsite location. This is to reduce num of
+ // false positive since otherwise all the indirect call samples will be
+ // reported as mismatching.
+ if (IRCalleeName == SampleProfileMatcher::UnknownIndirectCallee)
+ IsCallsiteMatched = true;
+ else if (Callees.size() == 1 && Callees.count(getRepInFormat(IRCalleeName)))
+ IsCallsiteMatched = true;
+
+ if (IsCallsiteMatched) {
+ auto It = CallsiteMatchStates.find(ProfileLoc);
+ if (It == CallsiteMatchStates.end())
+ CallsiteMatchStates.emplace(ProfileLoc, MatchState::InitialMatch);
+ else if (IsPostMatch) {
+ if (It->second == MatchState::InitialMatch)
+ It->second = MatchState::UnchangedMatch;
+ else if (It->second == MatchState::InitialMismatch)
+ It->second = MatchState::RecoveredMismatch;
+ }
+ }
+ }
+
+ // Check if there are any callsites in the profile that does not match to any
+ // IR callsites.
+ for (const auto &I : ProfileAnchors) {
+ const auto &Loc = I.first;
+ [[maybe_unused]] const auto &Callees = I.second;
+ assert(!Callees.empty() && "Callees should not be empty");
+ auto It = CallsiteMatchStates.find(Loc);
+ if (It == CallsiteMatchStates.end())
+ CallsiteMatchStates.emplace(Loc, MatchState::InitialMismatch);
+ else if (IsPostMatch) {
+ // Update the state if it's not matched(UnchangedMatch or
+ // RecoveredMismatch).
+ if (It->second == MatchState::InitialMismatch)
+ It->second = MatchState::UnchangedMismatch;
+ else if (It->second == MatchState::InitialMatch)
+ It->second = MatchState::RemovedMatch;
+ }
+ }
+}
+
+void SampleProfileMatcher::countMismatchedFuncSamples(const FunctionSamples &FS,
+ bool IsTopLevel) {
+ const auto *FuncDesc = ProbeManager->getDesc(FS.getGUID());
+ // Skip the function that is external or renamed.
+ if (!FuncDesc)
+ return;
+
+ if (ProbeManager->profileIsHashMismatched(*FuncDesc, FS)) {
+ if (IsTopLevel)
+ NumStaleProfileFunc++;
+ // Given currently all probe ids are after block probe ids, once the
+ // checksum is mismatched, it's likely all the callites are mismatched and
+ // dropped. We conservatively count all the samples as mismatched and stop
+ // counting the inlinees' profiles.
+ MismatchedFunctionSamples += FS.getTotalSamples();
+ return;
+ }
+
+ // Even the current-level function checksum is matched, it's possible that the
+ // nested inlinees' checksums are mismatched that affect the inlinee's sample
+ // loading, we need to go deeper to check the inlinees' function samples.
+ // Similarly, count all the samples as mismatched if the inlinee's checksum is
+ // mismatched using this recursive function.
+ for (const auto &I : FS.getCallsiteSamples())
+ for (const auto &CS : I.second)
+ countMismatchedFuncSamples(CS.second, false);
+}
+
+void SampleProfileMatcher::countMismatchedCallsiteSamples(
+ const FunctionSamples &FS) {
+ auto It = FuncCallsiteMatchStates.find(FS.getFuncName());
+ // Skip it if no mismatched callsite or this is an external function.
+ if (It == FuncCallsiteMatchStates.end() || It->second.empty())
+ return;
+ const auto &CallsiteMatchStates = It->second;
+
+ auto findMatchState = [&](const LineLocation &Loc) {
+ auto It = CallsiteMatchStates.find(Loc);
+ if (It == CallsiteMatchStates.end())
+ return MatchState::Unknown;
+ return It->second;
+ };
+
+ auto AttributeMismatchedSamples = [&](const enum MatchState &State,
+ uint64_t Samples) {
+ if (isMismatchState(State))
+ MismatchedCallsiteSamples += Samples;
+ else if (State == MatchState::RecoveredMismatch)
+ RecoveredCallsiteSamples += Samples;
+ };
+
+ // The non-inlined callsites are saved in the body samples of function
+ // profile, go through it to count the non-inlined callsite samples.
+ for (const auto &I : FS.getBodySamples())
+ AttributeMismatchedSamples(findMatchState(I.first), I.second.getSamples());
+
+ // Count the inlined callsite samples.
+ for (const auto &I : FS.getCallsiteSamples()) {
+ auto State = findMatchState(I.first);
+ uint64_t CallsiteSamples = 0;
+ for (const auto &CS : I.second)
+ CallsiteSamples += CS.second.getTotalSamples();
+ AttributeMismatchedSamples(State, CallsiteSamples);
+
+ if (isMismatchState(State))
+ continue;
+
+ // When the current level of inlined call site matches the profiled call
+ // site, we need to go deeper along the inline tree to count mismatches from
+ // lower level inlinees.
+ for (const auto &CS : I.second)
+ countMismatchedCallsiteSamples(CS.second);
+ }
+}
+
+void SampleProfileMatcher::countMismatchCallsites(const FunctionSamples &FS) {
+ auto It = FuncCallsiteMatchStates.find(FS.getFuncName());
+ // Skip it if no mismatched callsite or this is an external function.
+ if (It == FuncCallsiteMatchStates.end() || It->second.empty())
+ return;
+ const auto &MatchStates = It->second;
+ [[maybe_unused]] bool OnInitialState =
+ isInitialState(MatchStates.begin()->second);
+ for (const auto &I : MatchStates) {
+ TotalProfiledCallsites++;
+ assert(
+ (OnInitialState ? isInitialState(I.second) : isFinalState(I.second)) &&
+ "Profile matching state is inconsistent");
+
+ if (isMismatchState(I.second))
+ NumMismatchedCallsites++;
+ else if (I.second == MatchState::RecoveredMismatch)
+ NumRecoveredCallsites++;
+ }
+}
+
+void SampleProfileMatcher::computeAndReportProfileStaleness() {
+ if (!ReportProfileStaleness && !PersistProfileStaleness)
+ return;
+
+ // Count profile mismatches for profile staleness report.
+ for (const auto &F : M) {
+ if (skipProfileForFunction(F))
+ continue;
+ // As the stats will be merged by linker, skip reporting the metrics for
+ // imported functions to avoid repeated counting.
+ if (GlobalValue::isAvailableExternallyLinkage(F.getLinkage()))
+ continue;
+ const auto *FS = Reader.getSamplesFor(F);
+ if (!FS)
+ continue;
+ TotalProfiledFunc++;
+ TotalFunctionSamples += FS->getTotalSamples();
+
+ // Checksum mismatch is only used in pseudo-probe mode.
+ if (FunctionSamples::ProfileIsProbeBased)
+ countMismatchedFuncSamples(*FS, true);
+
+ // Count mismatches and samples for calliste.
+ countMismatchCallsites(*FS);
+ countMismatchedCallsiteSamples(*FS);
+ }
+
+ if (ReportProfileStaleness) {
+ if (FunctionSamples::ProfileIsProbeBased) {
+ errs() << "(" << NumStaleProfileFunc << "/" << TotalProfiledFunc
+ << ") of functions' profile are invalid and ("
+ << MismatchedFunctionSamples << "/" << TotalFunctionSamples
+ << ") of samples are discarded due to function hash mismatch.\n";
+ }
+ errs() << "(" << (NumMismatchedCallsites + NumRecoveredCallsites) << "/"
+ << TotalProfiledCallsites
+ << ") of callsites' profile are invalid and ("
+ << (MismatchedCallsiteSamples + RecoveredCallsiteSamples) << "/"
+ << TotalFunctionSamples
+ << ") of samples are discarded due to callsite location mismatch.\n";
+ errs() << "(" << NumRecoveredCallsites << "/"
+ << (NumRecoveredCallsites + NumMismatchedCallsites)
+ << ") of callsites and (" << RecoveredCallsiteSamples << "/"
+ << (RecoveredCallsiteSamples + MismatchedCallsiteSamples)
+ << ") of samples are recovered by stale profile matching.\n";
+ }
+
+ if (PersistProfileStaleness) {
+ LLVMContext &Ctx = M.getContext();
+ MDBuilder MDB(Ctx);
+
+ SmallVector<std::pair<StringRef, uint64_t>> ProfStatsVec;
+ if (FunctionSamples::ProfileIsProbeBased) {
+ ProfStatsVec.emplace_back("NumStaleProfileFunc", NumStaleProfileFunc);
+ ProfStatsVec.emplace_back("TotalProfiledFunc", TotalProfiledFunc);
+ ProfStatsVec.emplace_back("MismatchedFunctionSamples",
+ MismatchedFunctionSamples);
+ ProfStatsVec.emplace_back("TotalFunctionSamples", TotalFunctionSamples);
+ }
+
+ ProfStatsVec.emplace_back("NumMismatchedCallsites", NumMismatchedCallsites);
+ ProfStatsVec.emplace_back("NumRecoveredCallsites", NumRecoveredCallsites);
+ ProfStatsVec.emplace_back("TotalProfiledCallsites", TotalProfiledCallsites);
+ ProfStatsVec.emplace_back("MismatchedCallsiteSamples",
+ MismatchedCallsiteSamples);
+ ProfStatsVec.emplace_back("RecoveredCallsiteSamples",
+ RecoveredCallsiteSamples);
+
+ auto *MD = MDB.createLLVMStats(ProfStatsVec);
+ auto *NMD = M.getOrInsertNamedMetadata("llvm.stats");
+ NMD->addOperand(MD);
+ }
+}
+
+void SampleProfileMatcher::runOnModule() {
+ ProfileConverter::flattenProfile(Reader.getProfiles(), FlattenedProfiles,
+ FunctionSamples::ProfileIsCS);
+ for (auto &F : M) {
+ if (skipProfileForFunction(F))
+ continue;
+ runOnFunction(F);
+ }
+ if (SalvageStaleProfile)
+ distributeIRToProfileLocationMap();
+
+ computeAndReportProfileStaleness();
+}
+
+void SampleProfileMatcher::distributeIRToProfileLocationMap(
+ FunctionSamples &FS) {
+ const auto ProfileMappings = FuncMappings.find(FS.getFuncName());
+ if (ProfileMappings != FuncMappings.end()) {
+ FS.setIRToProfileLocationMap(&(ProfileMappings->second));
+ }
+
+ for (auto &Callees :
+ const_cast<CallsiteSampleMap &>(FS.getCallsiteSamples())) {
+ for (auto &FS : Callees.second) {
+ distributeIRToProfileLocationMap(FS.second);
+ }
+ }
+}
+
+// Use a central place to distribute the matching results. Outlined and inlined
+// profile with the function name will be set to the same pointer.
+void SampleProfileMatcher::distributeIRToProfileLocationMap() {
+ for (auto &I : Reader.getProfiles()) {
+ distributeIRToProfileLocationMap(I.second);
+ }
+}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index aaf7184a5562..fa1e2280991f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -819,7 +819,7 @@ static Instruction *foldNoWrapAdd(BinaryOperator &Add,
Value *X;
const APInt *C1, *C2;
if (match(Op1, m_APInt(C1)) &&
- match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
+ match(Op0, m_OneUse(m_ZExt(m_NUWAddLike(m_Value(X), m_APInt(C2))))) &&
C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
Constant *NewC =
ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
@@ -829,14 +829,16 @@ static Instruction *foldNoWrapAdd(BinaryOperator &Add,
// More general combining of constants in the wide type.
// (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
Constant *NarrowC;
- if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) {
+ if (match(Op0,
+ m_OneUse(m_SExt(m_NSWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
Value *WideC = Builder.CreateSExt(NarrowC, Ty);
Value *NewC = Builder.CreateAdd(WideC, Op1C);
Value *WideX = Builder.CreateSExt(X, Ty);
return BinaryOperator::CreateAdd(WideX, NewC);
}
// (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
- if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) {
+ if (match(Op0,
+ m_OneUse(m_ZExt(m_NUWAddLike(m_Value(X), m_Constant(NarrowC)))))) {
Value *WideC = Builder.CreateZExt(NarrowC, Ty);
Value *NewC = Builder.CreateAdd(WideC, Op1C);
Value *WideX = Builder.CreateZExt(X, Ty);
@@ -2522,9 +2524,10 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
// sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1)
// --> (A < 0) ? -A : A
Value *IsNeg = Builder.CreateIsNeg(A);
- // Copy the nuw/nsw flags from the sub to the negate.
- Value *NegA = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(),
- I.hasNoSignedWrap());
+ // Copy the nsw flags from the sub to the negate.
+ Value *NegA = I.hasNoUnsignedWrap()
+ ? Constant::getNullValue(A->getType())
+ : Builder.CreateNeg(A, "", I.hasNoSignedWrap());
return SelectInst::Create(IsNeg, NegA, A);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index bf8794bba885..c0cf1a7db726 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4250,10 +4250,11 @@ static Instruction *canonicalizeAbs(BinaryOperator &Xor,
// xor (add A, Op1), Op1 ; add -1 and flip bits if negative
// --> (A < 0) ? -A : A
Value *IsNeg = Builder.CreateIsNeg(A);
- // Copy the nuw/nsw flags from the add to the negate.
+ // Copy the nsw flags from the add to the negate.
auto *Add = cast<BinaryOperator>(Op0);
- Value *NegA = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(),
- Add->hasNoSignedWrap());
+ Value *NegA = Add->hasNoUnsignedWrap()
+ ? Constant::getNullValue(A->getType())
+ : Builder.CreateNeg(A, "", Add->hasNoSignedWrap());
return SelectInst::Create(IsNeg, NegA, A);
}
return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 426b548c074a..b1017f4d6bc1 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1795,7 +1795,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// We don't have a "nabs" intrinsic, so negate if needed based on the
// max/min operation.
if (IID == Intrinsic::smin || IID == Intrinsic::umax)
- Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison);
+ Abs = Builder.CreateNeg(Abs, "nabs", IntMinIsPoison);
return replaceInstUsesWith(CI, Abs);
}
@@ -2093,8 +2093,9 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
bool IsSigned = IID == Intrinsic::sadd_with_overflow;
- bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
- : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
+ bool HasNWAdd = IsSigned
+ ? match(Arg0, m_NSWAddLike(m_Value(X), m_APInt(C0)))
+ : match(Arg0, m_NUWAddLike(m_Value(X), m_APInt(C0)));
if (HasNWAdd && match(Arg1, m_APInt(C1))) {
bool Overflow;
APInt NewC =
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 089a70c6e6cc..0652a8ba80b3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -734,19 +734,18 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
if (DestWidth == 1) {
Value *Zero = Constant::getNullValue(SrcTy);
- if (DestTy->isIntegerTy()) {
- // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
- // TODO: We canonicalize to more instructions here because we are probably
- // lacking equivalent analysis for trunc relative to icmp. There may also
- // be codegen concerns. If those trunc limitations were removed, we could
- // remove this transform.
- Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
- return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
- }
- // For vectors, we do not canonicalize all truncs to icmp, so optimize
- // patterns that would be covered within visitICmpInst.
Value *X;
+ const APInt *C1;
+ Constant *C2;
+ if (match(Src, m_OneUse(m_Shr(m_Shl(m_Power2(C1), m_Value(X)),
+ m_ImmConstant(C2))))) {
+ // trunc ((C1 << X) >> C2) to i1 --> X == (C2-cttz(C1)), where C1 is pow2
+ Constant *Log2C1 = ConstantInt::get(SrcTy, C1->exactLogBase2());
+ Constant *CmpC = ConstantExpr::getSub(C2, Log2C1);
+ return new ICmpInst(ICmpInst::ICMP_EQ, X, CmpC);
+ }
+
Constant *C;
if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) {
// trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
@@ -763,6 +762,14 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
Value *And = Builder.CreateAnd(X, Builder.CreateOr(MaskC, One));
return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
}
+
+ {
+ const APInt *C;
+ if (match(Src, m_Shl(m_APInt(C), m_Value(X))) && (*C)[0] == 1) {
+ // trunc (C << X) to i1 --> X == 0, where C is odd
+ return new ICmpInst(ICmpInst::Predicate::ICMP_EQ, X, Zero);
+ }
+ }
}
Value *A, *B;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 9d4c271f990d..8c698e52b5a0 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -105,7 +105,7 @@ static Value *foldMulSelectToNegate(BinaryOperator &I,
if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
m_Value(OtherOp)))) {
bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
- Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
+ Value *Neg = Builder.CreateNeg(OtherOp, "", HasAnyNoWrap);
return Builder.CreateSelect(Cond, OtherOp, Neg);
}
// mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
@@ -113,7 +113,7 @@ static Value *foldMulSelectToNegate(BinaryOperator &I,
if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
m_Value(OtherOp)))) {
bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
- Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
+ Value *Neg = Builder.CreateNeg(OtherOp, "", HasAnyNoWrap);
return Builder.CreateSelect(Cond, Neg, OtherOp);
}
@@ -452,9 +452,8 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
// mul Y, (sext X) -> select X, -Y, 0
if (match(&I, m_c_Mul(m_OneUse(m_SExt(m_Value(X))), m_Value(Y))) &&
X->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(
- X, Builder.CreateNeg(Y, "", /*HasNUW=*/false, I.hasNoSignedWrap()),
- ConstantInt::getNullValue(Op0->getType()));
+ return SelectInst::Create(X, Builder.CreateNeg(Y, "", I.hasNoSignedWrap()),
+ ConstantInt::getNullValue(Op0->getType()));
Constant *ImmC;
if (match(Op1, m_ImmConstant(ImmC))) {
@@ -612,6 +611,18 @@ Instruction *InstCombinerImpl::foldPowiReassoc(BinaryOperator &I) {
Y->getType() == Z->getType())
return createPowiExpr(I, *this, X, Y, Z);
+ // powi(X, Y) / X --> powi(X, Y-1)
+ // This is legal when (Y - 1) can't wraparound, in which case reassoc and nnan
+ // are required.
+ // TODO: Multi-use may be also better off creating Powi(x,y-1)
+ if (I.hasAllowReassoc() && I.hasNoNaNs() &&
+ match(Op0, m_OneUse(m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(
+ m_Specific(Op1), m_Value(Y))))) &&
+ willNotOverflowSignedSub(Y, ConstantInt::get(Y->getType(), 1), I)) {
+ Constant *NegOne = ConstantInt::getAllOnesValue(Y->getType());
+ return createPowiExpr(I, *this, Op1, Y, NegOne);
+ }
+
return nullptr;
}
@@ -814,8 +825,19 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
if (match(Op1, m_SpecificFP(-1.0)))
return UnaryOperator::CreateFNegFMF(Op0, &I);
- // With no-nans: X * 0.0 --> copysign(0.0, X)
- if (I.hasNoNaNs() && match(Op1, m_PosZeroFP())) {
+ // With no-nans/no-infs:
+ // X * 0.0 --> copysign(0.0, X)
+ // X * -0.0 --> copysign(0.0, -X)
+ const APFloat *FPC;
+ if (match(Op1, m_APFloatAllowUndef(FPC)) && FPC->isZero() &&
+ ((I.hasNoInfs() &&
+ isKnownNeverNaN(Op0, /*Depth=*/0, SQ.getWithInstruction(&I))) ||
+ isKnownNeverNaN(&I, /*Depth=*/0, SQ.getWithInstruction(&I)))) {
+ if (FPC->isNegative())
+ Op0 = Builder.CreateFNegFMF(Op0, &I);
+ Op1 = Constant::replaceUndefsWith(
+ cast<Constant>(Op1),
+ ConstantFP::get(Op1->getType()->getScalarType(), *FPC));
CallInst *CopySign = Builder.CreateIntrinsic(Intrinsic::copysign,
{I.getType()}, {Op1, Op0}, &I);
return replaceInstUsesWith(I, CopySign);
@@ -1160,14 +1182,14 @@ Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
// We need a multiple of the divisor for a signed add constant, but
// unsigned is fine with any constant pair.
if (IsSigned &&
- match(Op0, m_NSWAdd(m_NSWMul(m_Value(X), m_SpecificInt(*C2)),
- m_APInt(C1))) &&
+ match(Op0, m_NSWAddLike(m_NSWMul(m_Value(X), m_SpecificInt(*C2)),
+ m_APInt(C1))) &&
isMultiple(*C1, *C2, Quotient, IsSigned)) {
return BinaryOperator::CreateNSWAdd(X, ConstantInt::get(Ty, Quotient));
}
if (!IsSigned &&
- match(Op0, m_NUWAdd(m_NUWMul(m_Value(X), m_SpecificInt(*C2)),
- m_APInt(C1)))) {
+ match(Op0, m_NUWAddLike(m_NUWMul(m_Value(X), m_SpecificInt(*C2)),
+ m_APInt(C1)))) {
return BinaryOperator::CreateNUWAdd(X,
ConstantInt::get(Ty, C1->udiv(*C2)));
}
@@ -1894,20 +1916,8 @@ Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
return replaceInstUsesWith(I, Pow);
}
- // powi(X, Y) / X --> powi(X, Y-1)
- // This is legal when (Y - 1) can't wraparound, in which case reassoc and nnan
- // are required.
- // TODO: Multi-use may be also better off creating Powi(x,y-1)
- if (I.hasAllowReassoc() && I.hasNoNaNs() &&
- match(Op0, m_OneUse(m_Intrinsic<Intrinsic::powi>(m_Specific(Op1),
- m_Value(Y)))) &&
- willNotOverflowSignedSub(Y, ConstantInt::get(Y->getType(), 1), I)) {
- Constant *NegOne = ConstantInt::getAllOnesValue(Y->getType());
- Value *Y1 = Builder.CreateAdd(Y, NegOne);
- Type *Types[] = {Op1->getType(), Y1->getType()};
- Value *Pow = Builder.CreateIntrinsic(Intrinsic::powi, Types, {Op1, Y1}, &I);
- return replaceInstUsesWith(I, Pow);
- }
+ if (Instruction *FoldedPowi = foldPowiReassoc(I))
+ return FoldedPowi;
return nullptr;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
index f73679f9461b..cd8e47e1b391 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
@@ -140,7 +140,7 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
// Integral constants can be freely negated.
if (match(V, m_AnyIntegralConstant()))
- return ConstantExpr::getNeg(cast<Constant>(V), /*HasNUW=*/false,
+ return ConstantExpr::getNeg(cast<Constant>(V),
/*HasNSW=*/false);
// If we have a non-instruction, then give up.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index eafd2889ec50..95aa2119e2d8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -437,7 +437,7 @@ Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
Value *A;
Constant *C, *C1;
if (match(Op0, m_Constant(C)) &&
- match(Op1, m_NUWAdd(m_Value(A), m_Constant(C1)))) {
+ match(Op1, m_NUWAddLike(m_Value(A), m_Constant(C1)))) {
Value *NewC = Builder.CreateBinOp(I.getOpcode(), C, C1);
BinaryOperator *NewShiftOp = BinaryOperator::Create(I.getOpcode(), NewC, A);
if (I.getOpcode() == Instruction::Shl) {
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 3c4c0f35eb6d..99f1f8eb34bb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -487,7 +487,9 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
// extelt (cmp X, Y), Index --> cmp (extelt X, Index), (extelt Y, Index)
Value *E0 = Builder.CreateExtractElement(X, Index);
Value *E1 = Builder.CreateExtractElement(Y, Index);
- return CmpInst::Create(cast<CmpInst>(SrcVec)->getOpcode(), Pred, E0, E1);
+ CmpInst *SrcCmpInst = cast<CmpInst>(SrcVec);
+ return CmpInst::CreateWithCopiedFlags(SrcCmpInst->getOpcode(), Pred, E0, E1,
+ SrcCmpInst);
}
if (auto *I = dyn_cast<Instruction>(SrcVec)) {
@@ -2135,7 +2137,8 @@ static Instruction *foldSelectShuffleOfSelectShuffle(ShuffleVectorInst &Shuf) {
return new ShuffleVectorInst(X, Y, NewMask);
}
-static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) {
+static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf,
+ const SimplifyQuery &SQ) {
assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
// Are we shuffling together some value and that same value after it has been
@@ -2159,6 +2162,19 @@ static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) {
if (!IdC)
return nullptr;
+ Value *X = Op0IsBinop ? Op1 : Op0;
+
+ // Prevent folding in the case the non-binop operand might have NaN values.
+ // If X can have NaN elements then we have that the floating point math
+ // operation in the transformed code may not preserve the exact NaN
+ // bit-pattern -- e.g. `fadd sNaN, 0.0 -> qNaN`.
+ // This makes the transformation incorrect since the original program would
+ // have preserved the exact NaN bit-pattern.
+ // Avoid the folding if X can have NaN elements.
+ if (Shuf.getType()->getElementType()->isFloatingPointTy() &&
+ !isKnownNeverNaN(X, 0, SQ))
+ return nullptr;
+
// Shuffle identity constants into the lanes that return the original value.
// Example: shuf (mul X, {-1,-2,-3,-4}), X, {0,5,6,3} --> mul X, {-1,1,1,-4}
// Example: shuf X, (add X, {-1,-2,-3,-4}), {0,1,6,7} --> add X, {0,0,-3,-4}
@@ -2175,7 +2191,6 @@ static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf) {
// shuf (bop X, C), X, M --> bop X, C'
// shuf X, (bop X, C), M --> bop X, C'
- Value *X = Op0IsBinop ? Op1 : Op0;
Instruction *NewBO = BinaryOperator::Create(BOpcode, X, NewC);
NewBO->copyIRFlags(BO);
@@ -2241,7 +2256,8 @@ Instruction *InstCombinerImpl::foldSelectShuffle(ShuffleVectorInst &Shuf) {
if (Instruction *I = foldSelectShuffleOfSelectShuffle(Shuf))
return I;
- if (Instruction *I = foldSelectShuffleWith1Binop(Shuf))
+ if (Instruction *I = foldSelectShuffleWith1Binop(
+ Shuf, getSimplifyQuery().getWithInstruction(&Shuf)))
return I;
BinaryOperator *B0, *B1;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 4bdeb6bbab85..d0d349c891a3 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -363,7 +363,7 @@ private:
Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, unsigned AllocaNo);
Value *getUARTag(IRBuilder<> &IRB);
- Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
+ Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB);
Value *applyTagMask(IRBuilder<> &IRB, Value *OldTag);
unsigned retagMask(unsigned AllocaNo);
@@ -422,6 +422,7 @@ private:
bool InstrumentLandingPads;
bool InstrumentWithCalls;
bool InstrumentStack;
+ bool InstrumentGlobals;
bool DetectUseAfterScope;
bool UsePageAliases;
bool UseMatchAllCallback;
@@ -639,11 +640,13 @@ void HWAddressSanitizer::initializeModule() {
// If we don't have personality function support, fall back to landing pads.
InstrumentLandingPads = optOr(ClInstrumentLandingPads, !NewRuntime);
+ InstrumentGlobals =
+ !CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
+
if (!CompileKernel) {
createHwasanCtorComdat();
- bool InstrumentGlobals = optOr(ClGlobals, NewRuntime);
- if (InstrumentGlobals && !UsePageAliases)
+ if (InstrumentGlobals)
instrumentGlobals();
bool InstrumentPersonalityFunctions =
@@ -787,6 +790,13 @@ bool HWAddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
if (SSI && SSI->stackAccessIsSafe(*Inst))
return true;
}
+
+ if (isa<GlobalVariable>(getUnderlyingObject(Ptr))) {
+ if (!InstrumentGlobals)
+ return true;
+ // TODO: Optimize inbound global accesses, like Asan `instrumentMop`.
+ }
+
return false;
}
@@ -1219,20 +1229,13 @@ Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
return UntaggedPtrLong;
}
-Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
- Module *M = IRB.GetInsertBlock()->getParent()->getParent();
- if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
- // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
- // in Bionic's libc/private/bionic_tls.h.
- Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
- return IRB.CreateConstGEP1_32(Int8Ty, IRB.CreateCall(ThreadPointerFunc),
- 0x30);
- }
- if (ThreadPtrGlobal)
- return ThreadPtrGlobal;
-
- return nullptr;
+Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB) {
+ // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
+ // in Bionic's libc/platform/bionic/tls_defines.h.
+ constexpr int SanitizerSlot = 6;
+ if (TargetTriple.isAArch64() && TargetTriple.isAndroid())
+ return memtag::getAndroidSlotPtr(IRB, SanitizerSlot);
+ return ThreadPtrGlobal;
}
Value *HWAddressSanitizer::getCachedFP(IRBuilder<> &IRB) {
@@ -1271,7 +1274,7 @@ void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
auto getThreadLongMaybeUntagged = [&]() {
if (!SlotPtr)
- SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
+ SlotPtr = getHwasanThreadSlotPtr(IRB);
if (!ThreadLong)
ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
// Extract the address field from ThreadLong. Unnecessary on AArch64 with
@@ -1307,6 +1310,22 @@ void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
// The use of AShr instead of LShr is due to
// https://bugs.llvm.org/show_bug.cgi?id=39030
// Runtime library makes sure not to use the highest bit.
+ //
+ // Mechanical proof of this address calculation can be found at:
+ // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/prove_hwasanwrap.smt2
+ //
+ // Example of the wrap case for N = 1
+ // Pointer: 0x01AAAAAAAAAAAFF8
+ // +
+ // 0x0000000000000008
+ // =
+ // 0x01AAAAAAAAAAB000
+ // &
+ // WrapMask: 0xFFFFFFFFFFFFF000
+ // =
+ // 0x01AAAAAAAAAAA000
+ //
+ // Then the WrapMask will be a no-op until the next wrap case.
Value *WrapMask = IRB.CreateXor(
IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
ConstantInt::get(IntptrTy, (uint64_t)-1));
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index de3bfb57b538..fb334e8292eb 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -497,7 +497,7 @@ static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
// Is X in [IntMin, 0]? NOTE: INT_MIN is fine!
if (Range.getSignedMax().isNonPositive()) {
IRBuilder<> B(II);
- Value *NegX = B.CreateNeg(X, II->getName(), /*HasNUW=*/false,
+ Value *NegX = B.CreateNeg(X, II->getName(),
/*HasNSW=*/IsIntMinPoison);
++NumAbs;
II->replaceAllUsesWith(NegX);
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 5a9ea420392f..bfc8bd5970bf 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -760,8 +760,6 @@ namespace {
bool isNoopIntrinsic(Instruction *I) {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
- case Intrinsic::allow_runtime_check:
- case Intrinsic::allow_ubsan_check:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_end:
diff --git a/llvm/lib/Transforms/Scalar/Float2Int.cpp b/llvm/lib/Transforms/Scalar/Float2Int.cpp
index ccca8bcc1a56..da4d39b4e3ed 100644
--- a/llvm/lib/Transforms/Scalar/Float2Int.cpp
+++ b/llvm/lib/Transforms/Scalar/Float2Int.cpp
@@ -311,7 +311,7 @@ void Float2IntPass::walkForwards() {
}
// If there is a valid transform to be done, do it.
-bool Float2IntPass::validateAndTransform() {
+bool Float2IntPass::validateAndTransform(const DataLayout &DL) {
bool MadeChange = false;
// Iterate over every disjoint partition of the def-use graph.
@@ -359,9 +359,7 @@ bool Float2IntPass::validateAndTransform() {
// The number of bits required is the maximum of the upper and
// lower limits, plus one so it can be signed.
- unsigned MinBW = std::max(R.getLower().getSignificantBits(),
- R.getUpper().getSignificantBits()) +
- 1;
+ unsigned MinBW = R.getMinSignedBits() + 1;
LLVM_DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n");
// If we've run off the realms of the exactly representable integers,
@@ -376,15 +374,23 @@ bool Float2IntPass::validateAndTransform() {
LLVM_DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n");
continue;
}
- if (MinBW > 64) {
- LLVM_DEBUG(
- dbgs() << "F2I: Value requires more than 64 bits to represent!\n");
- continue;
- }
- // OK, R is known to be representable. Now pick a type for it.
- // FIXME: Pick the smallest legal type that will fit.
- Type *Ty = (MinBW > 32) ? Type::getInt64Ty(*Ctx) : Type::getInt32Ty(*Ctx);
+ // OK, R is known to be representable.
+ // Pick the smallest legal type that will fit.
+ Type *Ty = DL.getSmallestLegalIntType(*Ctx, MinBW);
+ if (!Ty) {
+ // Every supported target supports 64-bit and 32-bit integers,
+ // so fallback to a 32 or 64-bit integer if the value fits.
+ if (MinBW <= 32) {
+ Ty = Type::getInt32Ty(*Ctx);
+ } else if (MinBW <= 64) {
+ Ty = Type::getInt64Ty(*Ctx);
+ } else {
+ LLVM_DEBUG(dbgs() << "F2I: Value requires more bits to represent than "
+ "the target supports!\n");
+ continue;
+ }
+ }
for (auto MI = ECs.member_begin(It), ME = ECs.member_end();
MI != ME; ++MI)
@@ -491,7 +497,8 @@ bool Float2IntPass::runImpl(Function &F, const DominatorTree &DT) {
walkBackwards();
walkForwards();
- bool Modified = validateAndTransform();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ bool Modified = validateAndTransform(DL);
if (Modified)
cleanup();
return Modified;
diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
index b564f00eb9d1..261c1259c9c9 100644
--- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp
+++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp
@@ -951,6 +951,14 @@ void GVNHoist::makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt,
OtherGep = cast<GetElementPtrInst>(
cast<StoreInst>(OtherInst)->getPointerOperand());
ClonedGep->andIRFlags(OtherGep);
+
+ // Merge debug locations of GEPs, because the hoisted GEP replaces those
+ // in branches. When cloning, ClonedGep preserves the debug location of
+ // Gepd, so Gep is skipped to avoid merging it twice.
+ if (OtherGep != Gep) {
+ ClonedGep->applyMergedLocation(ClonedGep->getDebugLoc(),
+ OtherGep->getDebugLoc());
+ }
}
// Replace uses of Gep with ClonedGep in Repl.
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index 1e0906717549..2bd13556c696 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -74,7 +74,7 @@ namespace {
struct BCEAtom {
BCEAtom() = default;
BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset)
- : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {}
+ : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(std::move(Offset)) {}
BCEAtom(const BCEAtom &) = delete;
BCEAtom &operator=(const BCEAtom &) = delete;
diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 436a85f62df6..f5c9aaa4f20b 100644
--- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -517,7 +517,7 @@ static bool doesNotRequireEntrySafepointBefore(CallBase *Call) {
switch (II->getIntrinsicID()) {
case Intrinsic::experimental_gc_statepoint:
case Intrinsic::experimental_patchpoint_void:
- case Intrinsic::experimental_patchpoint_i64:
+ case Intrinsic::experimental_patchpoint:
// The can wrap an actual call which may grow the stack by an unbounded
// amount or run forever.
return false;
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 519ff3221a3b..bc4b6de2f07f 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -509,8 +509,10 @@ void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
NewEntry->takeName(HeaderBB);
HeaderBB->setName("tailrecurse");
- BranchInst *BI = BranchInst::Create(HeaderBB, NewEntry);
- BI->setDebugLoc(CI->getDebugLoc());
+ BranchInst::Create(HeaderBB, NewEntry);
+ // If the new branch preserves the debug location of CI, it could result in
+ // misleading stepping, if CI is located in a conditional branch.
+ // So, here we don't give any debug location to the new branch.
// Move all fixed sized allocas from HeaderBB to NewEntry.
for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 3191751d92e1..6988292ac715 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -745,7 +745,7 @@ void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
/// and other with remaining incoming blocks; then first PHIs are placed in
/// outlined region.
void CodeExtractor::severSplitPHINodesOfExits(
- const SmallPtrSetImpl<BasicBlock *> &Exits) {
+ const SetVector<BasicBlock *> &Exits) {
for (BasicBlock *ExitBB : Exits) {
BasicBlock *NewBB = nullptr;
@@ -1751,7 +1751,7 @@ CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
// Calculate the exit blocks for the extracted region and the total exit
// weights for each of those blocks.
DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
- SmallPtrSet<BasicBlock *, 1> ExitBlocks;
+ SetVector<BasicBlock *> ExitBlocks;
for (BasicBlock *Block : Blocks) {
for (BasicBlock *Succ : successors(Block)) {
if (!Blocks.count(Succ)) {
diff --git a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
index bc6711711371..0f55af3b6edd 100644
--- a/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopRotationUtils.cpp
@@ -347,9 +347,19 @@ static void updateBranchWeights(BranchInst &PreHeaderBI, BranchInst &LoopBI,
// probabilities as if there are only 0-trip and 1-trip cases.
ExitWeight0 = OrigLoopExitWeight - OrigLoopBackedgeWeight;
}
+ } else {
+ // Theoretically, if the loop body must be executed at least once, the
+ // backedge count must be not less than exit count. However the branch
+ // weight collected by sampling-based PGO may be not very accurate due to
+ // sampling. Therefore this workaround is required here to avoid underflow
+ // of unsigned in following update of branch weight.
+ if (OrigLoopExitWeight > OrigLoopBackedgeWeight)
+ OrigLoopBackedgeWeight = OrigLoopExitWeight;
}
+ assert(OrigLoopExitWeight >= ExitWeight0 && "Bad branch weight");
ExitWeight1 = OrigLoopExitWeight - ExitWeight0;
EnterWeight = ExitWeight1;
+ assert(OrigLoopBackedgeWeight >= EnterWeight && "Bad branch weight");
LoopBackWeight = OrigLoopBackedgeWeight - EnterWeight;
} else if (OrigLoopExitWeight == 0) {
if (OrigLoopBackedgeWeight == 0) {
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 8dd1002a6e4a..7b1eb70168d8 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -273,5 +273,15 @@ Value *getFP(IRBuilder<> &IRB) {
IRB.getIntPtrTy(M->getDataLayout()));
}
+Value *getAndroidSlotPtr(IRBuilder<> &IRB, int Slot) {
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
+ // in Bionic's libc/private/bionic_tls.h.
+ Function *ThreadPointerFunc =
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ return IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
+ IRB.CreateCall(ThreadPointerFunc), 8 * Slot);
+}
+
} // namespace memtag
} // namespace llvm
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 3a3bcde7c3dc..74cffbc005c8 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -59,6 +59,10 @@ PoisonFlags::PoisonFlags(const Instruction *I) {
Disjoint = PDI->isDisjoint();
if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
NNeg = PNI->hasNonNeg();
+ if (auto *TI = dyn_cast<TruncInst>(I)) {
+ NUW = TI->hasNoUnsignedWrap();
+ NSW = TI->hasNoSignedWrap();
+ }
}
void PoisonFlags::apply(Instruction *I) {
@@ -72,6 +76,10 @@ void PoisonFlags::apply(Instruction *I) {
PDI->setIsDisjoint(Disjoint);
if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
PNI->setNonNeg(NNeg);
+ if (isa<TruncInst>(I)) {
+ I->setHasNoUnsignedWrap(NUW);
+ I->setHasNoSignedWrap(NSW);
+ }
}
/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index e86705e89889..5d03b66b0ce3 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -68,6 +68,9 @@ class VPBuilder {
public:
VPBuilder() = default;
VPBuilder(VPBasicBlock *InsertBB) { setInsertPoint(InsertBB); }
+ VPBuilder(VPRecipeBase *InsertPt) {
+ setInsertPoint(InsertPt->getParent(), InsertPt->getIterator());
+ }
/// Clear the insertion point: created instructions will not be inserted into
/// a block.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2163930b02c1..452c84f2dcf5 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7898,6 +7898,18 @@ void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
}
}
+iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
+VPRecipeBuilder::mapToVPValues(User::op_range Operands) {
+ std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
+ if (auto *I = dyn_cast<Instruction>(Op)) {
+ if (auto *R = Ingredient2Recipe.lookup(I))
+ return R->getVPSingleValue();
+ }
+ return Plan.getOrAddLiveIn(Op);
+ };
+ return map_range(Operands, Fn);
+}
+
VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
@@ -7922,7 +7934,7 @@ VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
if (OrigLoop->isLoopExiting(Src))
return EdgeMaskCache[Edge] = SrcMask;
- VPValue *EdgeMask = Plan.getVPValueOrAddLiveIn(BI->getCondition());
+ VPValue *EdgeMask = getVPValueOrAddLiveIn(BI->getCondition(), Plan);
assert(EdgeMask && "No Edge Mask found for condition");
if (BI->getSuccessor(0) != Dst)
@@ -7933,7 +7945,7 @@ VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
// 'select i1 SrcMask, i1 EdgeMask, i1 false'.
// The select version does not introduce new UB if SrcMask is false and
// EdgeMask is poison. Using 'and' here introduces undefined behavior.
- VPValue *False = Plan.getVPValueOrAddLiveIn(
+ VPValue *False = Plan.getOrAddLiveIn(
ConstantInt::getFalse(BI->getCondition()->getType()));
EdgeMask =
Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc());
@@ -8135,7 +8147,7 @@ VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
auto *Phi = cast<PHINode>(I->getOperand(0));
const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi);
- VPValue *Start = Plan.getVPValueOrAddLiveIn(II.getStartValue());
+ VPValue *Start = Plan.getOrAddLiveIn(II.getStartValue());
return createWidenInductionRecipes(Phi, I, Start, II, Plan, *PSE.getSE(),
*OrigLoop, Range);
}
@@ -8245,7 +8257,7 @@ VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
if (Legal->isMaskRequired(CI))
Mask = getBlockInMask(CI->getParent());
else
- Mask = Plan.getVPValueOrAddLiveIn(ConstantInt::getTrue(
+ Mask = Plan.getOrAddLiveIn(ConstantInt::getTrue(
IntegerType::getInt1Ty(Variant->getFunctionType()->getContext())));
Ops.insert(Ops.begin() + *MaskPos, Mask);
@@ -8289,7 +8301,7 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
SmallVector<VPValue *> Ops(Operands.begin(), Operands.end());
VPValue *Mask = getBlockInMask(I->getParent());
VPValue *One =
- Plan.getVPValueOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
+ Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
auto *SafeRHS =
new VPInstruction(Instruction::Select, {Mask, Ops[1], One},
I->getDebugLoc());
@@ -8383,7 +8395,7 @@ VPReplicateRecipe *VPRecipeBuilder::handleReplication(Instruction *I,
BlockInMask = getBlockInMask(I->getParent());
}
- auto *Recipe = new VPReplicateRecipe(I, Plan.mapToVPValues(I->operands()),
+ auto *Recipe = new VPReplicateRecipe(I, mapToVPValues(I->operands()),
IsUniform, BlockInMask);
return Recipe;
}
@@ -8399,10 +8411,6 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
if (Phi->getParent() != OrigLoop->getHeader())
return tryToBlend(Phi, Operands);
- // Always record recipes for header phis. Later first-order recurrence phis
- // can have earlier phis as incoming values.
- recordRecipeOf(Phi);
-
if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, Range)))
return Recipe;
@@ -8427,14 +8435,6 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
}
- // Record the incoming value from the backedge, so we can add the incoming
- // value from the backedge after all recipes have been created.
- auto *Inc = cast<Instruction>(
- Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
- auto RecipeIter = Ingredient2Recipe.find(Inc);
- if (RecipeIter == Ingredient2Recipe.end())
- recordRecipeOf(Inc);
-
PhisToFix.push_back(PhiRecipe);
return PhiRecipe;
}
@@ -8499,7 +8499,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW,
DebugLoc DL) {
Value *StartIdx = ConstantInt::get(IdxTy, 0);
- auto *StartV = Plan.getVPValueOrAddLiveIn(StartIdx);
+ auto *StartV = Plan.getOrAddLiveIn(StartIdx);
// Add a VPCanonicalIVPHIRecipe starting at 0 to the header.
auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
@@ -8522,7 +8522,7 @@ static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, bool HasNUW,
// Add exit values to \p Plan. VPLiveOuts are added for each LCSSA phi in the
// original exit block.
static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, Loop *OrigLoop,
- VPlan &Plan) {
+ VPRecipeBuilder &Builder, VPlan &Plan) {
BasicBlock *ExitBB = OrigLoop->getUniqueExitBlock();
BasicBlock *ExitingBB = OrigLoop->getExitingBlock();
// Only handle single-exit loops with unique exit blocks for now.
@@ -8533,7 +8533,7 @@ static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, Loop *OrigLoop,
for (PHINode &ExitPhi : ExitBB->phis()) {
Value *IncomingValue =
ExitPhi.getIncomingValueForBlock(ExitingBB);
- VPValue *V = Plan.getVPValueOrAddLiveIn(IncomingValue);
+ VPValue *V = Builder.getVPValueOrAddLiveIn(IncomingValue, Plan);
Plan.addLiveOut(&ExitPhi, V);
}
}
@@ -8603,9 +8603,6 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
if (!getDecisionAndClampRange(applyIG, Range))
continue;
InterleaveGroups.insert(IG);
- for (unsigned i = 0; i < IG->getFactor(); i++)
- if (Instruction *Member = IG->getMember(i))
- RecipeBuilder.recordRecipeOf(Member);
};
// ---------------------------------------------------------------------------
@@ -8644,10 +8641,10 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
SmallVector<VPValue *, 4> Operands;
auto *Phi = dyn_cast<PHINode>(Instr);
if (Phi && Phi->getParent() == HeaderBB) {
- Operands.push_back(Plan->getVPValueOrAddLiveIn(
+ Operands.push_back(Plan->getOrAddLiveIn(
Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
} else {
- auto OpRange = Plan->mapToVPValues(Instr->operands());
+ auto OpRange = RecipeBuilder.mapToVPValues(Instr->operands());
Operands = {OpRange.begin(), OpRange.end()};
}
@@ -8662,10 +8659,6 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
RecipeBuilder.tryToCreateWidenRecipe(Instr, Operands, Range, VPBB);
if (!Recipe)
Recipe = RecipeBuilder.handleReplication(Instr, Range);
- for (auto *Def : Recipe->definedValues()) {
- auto *UV = Def->getUnderlyingValue();
- Plan->addVPValue(UV, Def);
- }
RecipeBuilder.setRecipe(Instr, Recipe);
if (isa<VPHeaderPHIRecipe>(Recipe)) {
@@ -8697,7 +8690,7 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
// and there is nothing to fix from vector loop; phis should have incoming
// from scalar loop only.
} else
- addUsersInExitBlock(HeaderVPBB, OrigLoop, *Plan);
+ addUsersInExitBlock(HeaderVPBB, OrigLoop, RecipeBuilder, *Plan);
assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) &&
!Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() &&
@@ -8759,16 +8752,12 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
continue;
Constant *CI = ConstantInt::get(Stride->getType(), ScevStride->getAPInt());
- auto *ConstVPV = Plan->getVPValueOrAddLiveIn(CI);
+ auto *ConstVPV = Plan->getOrAddLiveIn(CI);
// The versioned value may not be used in the loop directly, so just add a
// new live-in in those cases.
- Plan->getVPValueOrAddLiveIn(StrideV)->replaceAllUsesWith(ConstVPV);
+ Plan->getOrAddLiveIn(StrideV)->replaceAllUsesWith(ConstVPV);
}
- // From this point onwards, VPlan-to-VPlan transformations may change the plan
- // in ways that accessing values using original IR values is incorrect.
- Plan->disableValue2VPValue();
-
VPlanTransforms::dropPoisonGeneratingRecipes(*Plan, [this](BasicBlock *BB) {
return Legal->blockNeedsPredication(BB);
});
@@ -9122,42 +9111,11 @@ void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
"Not a pointer induction according to InductionDescriptor!");
assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
"Unexpected type.");
+ assert(!onlyScalarsGenerated(State.VF.isScalable()) &&
+ "Recipe should have been replaced");
auto *IVR = getParent()->getPlan()->getCanonicalIV();
PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0, /*IsScalar*/ true));
-
- if (onlyScalarsGenerated(State.VF.isScalable())) {
- // This is the normalized GEP that starts counting at zero.
- Value *PtrInd = State.Builder.CreateSExtOrTrunc(
- CanonicalIV, IndDesc.getStep()->getType());
- // Determine the number of scalars we need to generate for each unroll
- // iteration. If the instruction is uniform, we only need to generate the
- // first lane. Otherwise, we generate all VF values.
- bool IsUniform = vputils::onlyFirstLaneUsed(this);
- assert((IsUniform || !State.VF.isScalable()) &&
- "Cannot scalarize a scalable VF");
- unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
-
- for (unsigned Part = 0; Part < State.UF; ++Part) {
- Value *PartStart =
- createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part);
-
- for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
- Value *Idx = State.Builder.CreateAdd(
- PartStart, ConstantInt::get(PtrInd->getType(), Lane));
- Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx);
-
- Value *Step = State.get(getOperand(1), VPIteration(Part, Lane));
- Value *SclrGep = emitTransformedIndex(
- State.Builder, GlobalIdx, IndDesc.getStartValue(), Step,
- IndDesc.getKind(), IndDesc.getInductionBinOp());
- SclrGep->setName("next.gep");
- State.set(this, SclrGep, VPIteration(Part, Lane));
- }
- }
- return;
- }
-
Type *PhiType = IndDesc.getStep()->getType();
// Build a pointer phi
@@ -10067,7 +10025,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
EpilogILV.setTripCount(MainILV.getTripCount());
for (auto &R : make_early_inc_range(*BestEpiPlan.getPreheader())) {
auto *ExpandR = cast<VPExpandSCEVRecipe>(&R);
- auto *ExpandedVal = BestEpiPlan.getVPValueOrAddLiveIn(
+ auto *ExpandedVal = BestEpiPlan.getOrAddLiveIn(
ExpandedSCEVs.find(ExpandR->getSCEV())->second);
ExpandR->replaceAllUsesWith(ExpandedVal);
if (BestEpiPlan.getTripCount() == ExpandR)
@@ -10108,7 +10066,7 @@ bool LoopVectorizePass::processLoop(Loop *L) {
{EPI.MainLoopIterationCountCheck});
}
assert(ResumeV && "Must have a resume value");
- VPValue *StartVal = BestEpiPlan.getVPValueOrAddLiveIn(ResumeV);
+ VPValue *StartVal = BestEpiPlan.getOrAddLiveIn(ResumeV);
cast<VPHeaderPHIRecipe>(&R)->setStartValue(StartVal);
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index a52064e5417b..2bc0c5dcc606 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -458,8 +458,7 @@ static SmallBitVector isUndefVector(const Value *V,
/// ShuffleVectorInst/getShuffleCost?
static std::optional<TargetTransformInfo::ShuffleKind>
isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
- const auto *It =
- find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
+ const auto *It = find_if(VL, IsaPred<ExtractElementInst>);
if (It == VL.end())
return std::nullopt;
auto *EI0 = cast<ExtractElementInst>(*It);
@@ -2022,9 +2021,8 @@ public:
public:
/// Initialize with all the operands of the instruction vector \p RootVL.
- VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI,
- const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R)
- : TLI(TLI), DL(DL), SE(SE), R(R) {
+ VLOperands(ArrayRef<Value *> RootVL, const BoUpSLP &R)
+ : TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R) {
// Append all the operands of RootVL.
appendOperandsOfVL(RootVL);
}
@@ -2506,10 +2504,10 @@ private:
/// Reorder commutative or alt operands to get better probability of
/// generating vectorized code.
- static void reorderInputsAccordingToOpcode(
- ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left,
- SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI,
- const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R);
+ static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
+ SmallVectorImpl<Value *> &Left,
+ SmallVectorImpl<Value *> &Right,
+ const BoUpSLP &R);
/// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
/// users of \p TE and collects the stores. It returns the map from the store
@@ -4696,12 +4694,8 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
// TODO: add analysis of other gather nodes with extractelement
// instructions and other values/instructions, not only undefs.
if ((TE.getOpcode() == Instruction::ExtractElement ||
- (all_of(TE.Scalars,
- [](Value *V) {
- return isa<UndefValue, ExtractElementInst>(V);
- }) &&
- any_of(TE.Scalars,
- [](Value *V) { return isa<ExtractElementInst>(V); }))) &&
+ (all_of(TE.Scalars, IsaPred<UndefValue, ExtractElementInst>) &&
+ any_of(TE.Scalars, IsaPred<ExtractElementInst>))) &&
all_of(TE.Scalars, [](Value *V) {
auto *EE = dyn_cast<ExtractElementInst>(V);
return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
@@ -4722,7 +4716,7 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
// might be transformed.
int Sz = TE.Scalars.size();
if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) &&
- count_if(TE.Scalars, UndefValue::classof) == Sz - 1) {
+ count_if(TE.Scalars, IsaPred<UndefValue>) == Sz - 1) {
const auto *It =
find_if(TE.Scalars, [](Value *V) { return !isConstant(V); });
if (It == TE.Scalars.begin())
@@ -6346,11 +6340,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize &&
!(S.getOpcode() && allSameBlock(VL))) {
assert(S.OpValue->getType()->isPointerTy() &&
- count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >=
- 2 &&
+ count_if(VL, IsaPred<GetElementPtrInst>) >= 2 &&
"Expected pointers only.");
// Reset S to make it GetElementPtr kind of node.
- const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
+ const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
assert(It != VL.end() && "Expected at least one GEP.");
S = getSameOpcode(*It, *TLI);
}
@@ -6626,7 +6619,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// so that each side is more likely to have the same opcode.
assert(P0 == CmpInst::getSwappedPredicate(P0) &&
"Commutative Predicate mismatch");
- reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this);
+ reorderInputsAccordingToOpcode(VL, Left, Right, *this);
} else {
// Collect operands - commute if it uses the swapped predicate.
for (Value *V : VL) {
@@ -6673,7 +6666,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// have the same opcode.
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
ValueList Left, Right;
- reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this);
+ reorderInputsAccordingToOpcode(VL, Left, Right, *this);
TE->setOperand(0, Left);
TE->setOperand(1, Right);
buildTree_rec(Left, Depth + 1, {TE, 0});
@@ -6810,8 +6803,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (!CI || all_of(VL, [](Value *V) {
return cast<CmpInst>(V)->isCommutative();
})) {
- reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE,
- *this);
+ reorderInputsAccordingToOpcode(VL, Left, Right, *this);
} else {
auto *MainCI = cast<CmpInst>(S.MainOp);
auto *AltCI = cast<CmpInst>(S.AltOp);
@@ -6895,17 +6887,12 @@ unsigned BoUpSLP::canMapToVector(Type *T) const {
bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
SmallVectorImpl<unsigned> &CurrentOrder,
bool ResizeAllowed) const {
- const auto *It = find_if(VL, [](Value *V) {
- return isa<ExtractElementInst, ExtractValueInst>(V);
- });
+ const auto *It = find_if(VL, IsaPred<ExtractElementInst, ExtractValueInst>);
assert(It != VL.end() && "Expected at least one extract instruction.");
auto *E0 = cast<Instruction>(*It);
- assert(all_of(VL,
- [](Value *V) {
- return isa<UndefValue, ExtractElementInst, ExtractValueInst>(
- V);
- }) &&
- "Invalid opcode");
+ assert(
+ all_of(VL, IsaPred<UndefValue, ExtractElementInst, ExtractValueInst>) &&
+ "Invalid opcode");
// Check if all of the extracts come from the same vector and from the
// correct offset.
Value *Vec = E0->getOperand(0);
@@ -7577,7 +7564,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
}
InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) {
- if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof))
+ if ((!Root && allConstant(VL)) || all_of(VL, IsaPred<UndefValue>))
return TTI::TCC_Free;
auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size());
InstructionCost GatherCost = 0;
@@ -7745,13 +7732,12 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
} else if (!Root && isSplat(VL)) {
// Found the broadcasting of the single scalar, calculate the cost as
// the broadcast.
- const auto *It =
- find_if(VL, [](Value *V) { return !isa<UndefValue>(V); });
+ const auto *It = find_if_not(VL, IsaPred<UndefValue>);
assert(It != VL.end() && "Expected at least one non-undef value.");
// Add broadcast for non-identity shuffle only.
bool NeedShuffle =
count(VL, *It) > 1 &&
- (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof));
+ (VL.front() != *It || !all_of(VL.drop_front(), IsaPred<UndefValue>));
if (!NeedShuffle)
return TTI.getVectorInstrCost(Instruction::InsertElement, VecTy,
CostKind, std::distance(VL.begin(), It),
@@ -7759,7 +7745,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
SmallVector<int> ShuffleMask(VL.size(), PoisonMaskElem);
transform(VL, ShuffleMask.begin(), [](Value *V) {
- return isa<PoisonValue>(V) ? PoisonMaskElem : 0;
+ return isa<PoisonValue>(V) ? PoisonMaskElem : 0;
});
InstructionCost InsertCost = TTI.getVectorInstrCost(
Instruction::InsertElement, VecTy, CostKind, 0,
@@ -7770,7 +7756,7 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
/*SubTp=*/nullptr, /*Args=*/*It);
}
return GatherCost +
- (all_of(Gathers, UndefValue::classof)
+ (all_of(Gathers, IsaPred<UndefValue>)
? TTI::TCC_Free
: R.getGatherCost(Gathers, !Root && VL.equals(Gathers)));
};
@@ -8180,9 +8166,8 @@ public:
// Take credit for instruction that will become dead.
if (EE->hasOneUse() || !PrevNodeFound) {
Instruction *Ext = EE->user_back();
- if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) {
- return isa<GetElementPtrInst>(U);
- })) {
+ if (isa<SExtInst, ZExtInst>(Ext) &&
+ all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
// Use getExtractWithExtendCost() to calculate the cost of
// extractelement/ext pair.
Cost -=
@@ -8647,8 +8632,7 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
if (I->hasOneUse()) {
Instruction *Ext = I->user_back();
if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
- all_of(Ext->users(),
- [](User *U) { return isa<GetElementPtrInst>(U); })) {
+ all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
// Use getExtractWithExtendCost() to calculate the cost of
// extractelement/ext pair.
InstructionCost Cost = TTI->getExtractWithExtendCost(
@@ -9132,10 +9116,7 @@ bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
(allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
TE->Scalars.size() < Limit ||
((TE->getOpcode() == Instruction::ExtractElement ||
- all_of(TE->Scalars,
- [](Value *V) {
- return isa<ExtractElementInst, UndefValue>(V);
- })) &&
+ all_of(TE->Scalars, IsaPred<ExtractElementInst, UndefValue>)) &&
isFixedVectorShuffle(TE->Scalars, Mask)) ||
(TE->State == TreeEntry::NeedToGather &&
TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()));
@@ -9256,9 +9237,7 @@ bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
return (TE->State == TreeEntry::NeedToGather &&
TE->getOpcode() != Instruction::ExtractElement &&
- count_if(TE->Scalars,
- [](Value *V) { return isa<ExtractElementInst>(V); }) <=
- Limit) ||
+ count_if(TE->Scalars, IsaPred<ExtractElementInst>) <= Limit) ||
TE->getOpcode() == Instruction::PHI;
}))
return true;
@@ -9275,14 +9254,19 @@ bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
// Check if any of the gather node forms an insertelement buildvector
// somewhere.
- if (any_of(VectorizableTree, [](const std::unique_ptr<TreeEntry> &TE) {
+ bool IsAllowedSingleBVNode =
+ VectorizableTree.size() > 1 ||
+ (VectorizableTree.size() == 1 && VectorizableTree.front()->getOpcode() &&
+ VectorizableTree.front()->getOpcode() != Instruction::PHI &&
+ VectorizableTree.front()->getOpcode() != Instruction::GetElementPtr &&
+ allSameBlock(VectorizableTree.front()->Scalars));
+ if (any_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
return TE->State == TreeEntry::NeedToGather &&
- all_of(TE->Scalars, [](Value *V) {
+ all_of(TE->Scalars, [&](Value *V) {
return isa<ExtractElementInst, UndefValue>(V) ||
- (!V->hasNUsesOrMore(UsesLimit) &&
- any_of(V->users(), [](User *U) {
- return isa<InsertElementInst>(U);
- }));
+ (IsAllowedSingleBVNode &&
+ !V->hasNUsesOrMore(UsesLimit) &&
+ any_of(V->users(), IsaPred<InsertElementInst>));
});
}))
return false;
@@ -10279,7 +10263,7 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
}
}
- bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof);
+ bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, IsaPred<UndefValue>);
// Checks if the 2 PHIs are compatible in terms of high possibility to be
// vectorized.
auto AreCompatiblePHIs = [&](Value *V, Value *V1) {
@@ -10513,13 +10497,13 @@ InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL,
// Perform operand reordering on the instructions in VL and return the reordered
// operands in Left and Right.
-void BoUpSLP::reorderInputsAccordingToOpcode(
- ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left,
- SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI,
- const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) {
+void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
+ SmallVectorImpl<Value *> &Left,
+ SmallVectorImpl<Value *> &Right,
+ const BoUpSLP &R) {
if (VL.empty())
return;
- VLOperands Ops(VL, TLI, DL, SE, R);
+ VLOperands Ops(VL, R);
// Reorder the operands in place.
Ops.reorder();
Left = Ops.getVL(0);
@@ -11256,8 +11240,7 @@ Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
InstructionsState S = getSameOpcode(VL, *TLI);
// Special processing for GEPs bundle, which may include non-gep values.
if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
- const auto *It =
- find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
+ const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
if (It != VL.end())
S = getSameOpcode(*It, *TLI);
}
@@ -11427,7 +11410,7 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
unsigned NumParts = TTI->getNumberOfParts(VecTy);
if (NumParts == 0 || NumParts >= GatheredScalars.size())
NumParts = 1;
- if (!all_of(GatheredScalars, UndefValue::classof)) {
+ if (!all_of(GatheredScalars, IsaPred<UndefValue>)) {
// Check for gathered extracts.
bool Resized = false;
ExtractShuffles =
@@ -11752,7 +11735,7 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
GatheredScalars[I] = PoisonValue::get(ScalarTy);
}
// Generate constants for final shuffle and build a mask for them.
- if (!all_of(GatheredScalars, PoisonValue::classof)) {
+ if (!all_of(GatheredScalars, IsaPred<PoisonValue>)) {
SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem);
TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true);
Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size());
@@ -11919,7 +11902,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true);
if (VecTy != Vec->getType()) {
- assert((getOperandEntry(E, I)->State == TreeEntry::NeedToGather ||
+ assert((It != MinBWs.end() ||
+ getOperandEntry(E, I)->State == TreeEntry::NeedToGather ||
MinBWs.contains(getOperandEntry(E, I))) &&
"Expected item in MinBWs.");
Vec = Builder.CreateIntCast(Vec, VecTy, GetOperandSignedness(I));
@@ -12468,12 +12452,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1))
TysForDecl.push_back(
FixedVectorType::get(CI->getType(), E->Scalars.size()));
+ auto *CEI = cast<CallInst>(VL0);
for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
ValueList OpVL;
// Some intrinsics have scalar arguments. This argument should not be
// vectorized.
if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) {
- CallInst *CEI = cast<CallInst>(VL0);
ScalarArg = CEI->getArgOperand(I);
OpVecs.push_back(CEI->getArgOperand(I));
if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
@@ -12486,6 +12470,13 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue;
}
+ ScalarArg = CEI->getArgOperand(I);
+ if (cast<VectorType>(OpVec->getType())->getElementType() !=
+ ScalarArg->getType()) {
+ auto *CastTy = FixedVectorType::get(ScalarArg->getType(),
+ VecTy->getNumElements());
+ OpVec = Builder.CreateIntCast(OpVec, CastTy, GetOperandSignedness(I));
+ }
LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n");
OpVecs.push_back(OpVec);
if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
@@ -12607,8 +12598,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
},
Mask, &OpScalars, &AltScalars);
- propagateIRFlags(V0, OpScalars);
- propagateIRFlags(V1, AltScalars);
+ propagateIRFlags(V0, OpScalars, E->getMainOp(), !MinBWs.contains(E));
+ propagateIRFlags(V1, AltScalars, E->getAltOp(), !MinBWs.contains(E));
Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
if (auto *I = dyn_cast<Instruction>(V)) {
@@ -13913,26 +13904,29 @@ unsigned BoUpSLP::getVectorElementSize(Value *V) {
// that feed it. The type of the loaded value may indicate a more suitable
// width than V's type. We want to base the vector element size on the width
// of memory operations where possible.
- SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
+ SmallVector<std::tuple<Instruction *, BasicBlock *, unsigned>> Worklist;
SmallPtrSet<Instruction *, 16> Visited;
if (auto *I = dyn_cast<Instruction>(V)) {
- Worklist.emplace_back(I, I->getParent());
+ Worklist.emplace_back(I, I->getParent(), 0);
Visited.insert(I);
}
// Traverse the expression tree in bottom-up order looking for loads. If we
// encounter an instruction we don't yet handle, we give up.
auto Width = 0u;
+ Value *FirstNonBool = nullptr;
while (!Worklist.empty()) {
- Instruction *I;
- BasicBlock *Parent;
- std::tie(I, Parent) = Worklist.pop_back_val();
+ auto [I, Parent, Level] = Worklist.pop_back_val();
// We should only be looking at scalar instructions here. If the current
// instruction has a vector type, skip.
auto *Ty = I->getType();
if (isa<VectorType>(Ty))
continue;
+ if (Ty != Builder.getInt1Ty() && !FirstNonBool)
+ FirstNonBool = I;
+ if (Level > RecursionMaxDepth)
+ continue;
// If the current instruction is a load, update MaxWidth to reflect the
// width of the loaded value.
@@ -13945,11 +13939,16 @@ unsigned BoUpSLP::getVectorElementSize(Value *V) {
// user or the use is a PHI node, we add it to the worklist.
else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst,
BinaryOperator, UnaryOperator>(I)) {
- for (Use &U : I->operands())
+ for (Use &U : I->operands()) {
if (auto *J = dyn_cast<Instruction>(U.get()))
if (Visited.insert(J).second &&
- (isa<PHINode>(I) || J->getParent() == Parent))
- Worklist.emplace_back(J, J->getParent());
+ (isa<PHINode>(I) || J->getParent() == Parent)) {
+ Worklist.emplace_back(J, J->getParent(), Level + 1);
+ continue;
+ }
+ if (!FirstNonBool && U.get()->getType() != Builder.getInt1Ty())
+ FirstNonBool = U.get();
+ }
} else {
break;
}
@@ -13959,8 +13958,8 @@ unsigned BoUpSLP::getVectorElementSize(Value *V) {
// gave up for some reason, just return the width of V. Otherwise, return the
// maximum width we found.
if (!Width) {
- if (auto *CI = dyn_cast<CmpInst>(V))
- V = CI->getOperand(0);
+ if (V->getType() == Builder.getInt1Ty() && FirstNonBool)
+ V = FirstNonBool;
Width = DL->getTypeSizeInBits(V->getType());
}
@@ -13995,7 +13994,7 @@ bool BoUpSLP::collectValuesToDemote(
if (MultiNodeScalars.contains(V))
return false;
uint32_t OrigBitWidth = DL->getTypeSizeInBits(V->getType());
- if (OrigBitWidth < BitWidth) {
+ if (OrigBitWidth > BitWidth) {
APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
return true;
@@ -14078,12 +14077,14 @@ bool BoUpSLP::collectValuesToDemote(
MaxDepthLevel = 1;
if (IsProfitableToDemoteRoot)
IsProfitableToDemote = true;
+ (void)IsPotentiallyTruncated(V, BitWidth);
break;
case Instruction::ZExt:
case Instruction::SExt:
if (!IsTruncRoot)
MaxDepthLevel = 1;
IsProfitableToDemote = true;
+ (void)IsPotentiallyTruncated(V, BitWidth);
break;
// We can demote certain binary operations if we can demote both of their
@@ -14165,6 +14166,28 @@ bool BoUpSLP::collectValuesToDemote(
return false;
break;
}
+ case Instruction::UDiv:
+ case Instruction::URem: {
+ if (ITE->UserTreeIndices.size() > 1 && !IsPotentiallyTruncated(I, BitWidth))
+ return false;
+ // UDiv and URem can be truncated if all the truncated bits are zero.
+ if (!AttemptCheckBitwidth(
+ [&](unsigned BitWidth, unsigned OrigBitWidth) {
+ assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
+ APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
+ return MaskedValueIsZero(I->getOperand(0), Mask,
+ SimplifyQuery(*DL)) &&
+ MaskedValueIsZero(I->getOperand(1), Mask,
+ SimplifyQuery(*DL));
+ },
+ NeedToExit))
+ return false;
+ if (NeedToExit)
+ return true;
+ if (!ProcessOperands({I->getOperand(0), I->getOperand(1)}, NeedToExit))
+ return false;
+ break;
+ }
// We can demote selects if we can demote their true and false values.
case Instruction::Select: {
@@ -14237,6 +14260,7 @@ void BoUpSLP::computeMinimumValueSizes() {
// resize to the final type.
bool IsTruncRoot = false;
bool IsProfitableToDemoteRoot = !IsStoreOrInsertElt;
+ SmallVector<unsigned> RootDemotes;
if (NodeIdx != 0 &&
VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
(VectorizableTree[NodeIdx]->getOpcode() == Instruction::ZExt ||
@@ -14244,6 +14268,7 @@ void BoUpSLP::computeMinimumValueSizes() {
VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc)) {
assert(IsStoreOrInsertElt && "Expected store/insertelement seeded graph.");
IsTruncRoot = VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc;
+ RootDemotes.push_back(NodeIdx);
IsProfitableToDemoteRoot = true;
++NodeIdx;
}
@@ -14380,6 +14405,7 @@ void BoUpSLP::computeMinimumValueSizes() {
while (NodeIdx < VectorizableTree.size() &&
VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
+ RootDemotes.push_back(NodeIdx);
++NodeIdx;
IsTruncRoot = true;
}
@@ -14395,14 +14421,35 @@ void BoUpSLP::computeMinimumValueSizes() {
unsigned MaxBitWidth = ComputeMaxBitWidth(
TreeRoot, VectorizableTree[NodeIdx]->getVectorFactor(), IsTopRoot,
IsProfitableToDemoteRoot, Opcode, Limit, IsTruncRoot);
+ if (ReductionBitWidth != 0 && (IsTopRoot || !RootDemotes.empty())) {
+ if (MaxBitWidth != 0 && ReductionBitWidth < MaxBitWidth)
+ ReductionBitWidth = bit_ceil(MaxBitWidth);
+ else if (MaxBitWidth == 0)
+ ReductionBitWidth = 0;
+ }
+
+ for (unsigned Idx : RootDemotes) {
+ Value *V = VectorizableTree[Idx]->Scalars.front();
+ uint32_t OrigBitWidth = DL->getTypeSizeInBits(V->getType());
+ if (OrigBitWidth > MaxBitWidth) {
+ APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, MaxBitWidth);
+ if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
+ ToDemote.push_back(V);
+ }
+ }
+ RootDemotes.clear();
IsTopRoot = false;
IsProfitableToDemoteRoot = true;
if (TruncNodes.empty()) {
NodeIdx = VectorizableTree.size();
} else {
- NodeIdx = *TruncNodes.begin() + 1;
- TruncNodes.erase(TruncNodes.begin());
+ unsigned NewIdx = 0;
+ do {
+ NewIdx = *TruncNodes.begin() + 1;
+ TruncNodes.erase(TruncNodes.begin());
+ } while (NewIdx <= NodeIdx && !TruncNodes.empty());
+ NodeIdx = NewIdx;
IsTruncRoot = true;
}
@@ -14440,7 +14487,7 @@ void BoUpSLP::computeMinimumValueSizes() {
return SIt != DemotedConsts.end() &&
is_contained(SIt->getSecond(), Idx);
}) ||
- all_of(CTE->Scalars, Constant::classof))
+ all_of(CTE->Scalars, IsaPred<Constant>))
MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned);
}
}
@@ -15188,12 +15235,10 @@ class HorizontalReduction {
static Value *createOp(IRBuilderBase &Builder, RecurKind RdxKind, Value *LHS,
Value *RHS, const Twine &Name,
const ReductionOpsListType &ReductionOps) {
- bool UseSelect =
- ReductionOps.size() == 2 ||
- // Logical or/and.
- (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) {
- return isa<SelectInst>(V);
- }));
+ bool UseSelect = ReductionOps.size() == 2 ||
+ // Logical or/and.
+ (ReductionOps.size() == 1 &&
+ any_of(ReductionOps.front(), IsaPred<SelectInst>));
assert((!UseSelect || ReductionOps.size() != 2 ||
isa<SelectInst>(ReductionOps[1][0])) &&
"Expected cmp + select pairs for reduction");
@@ -15432,7 +15477,7 @@ public:
!hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
!isVectorizable(RdxKind, EdgeInst) ||
(R.isAnalyzedReductionRoot(EdgeInst) &&
- all_of(EdgeInst->operands(), Constant::classof))) {
+ all_of(EdgeInst->operands(), IsaPred<Constant>))) {
PossibleReducedVals.push_back(EdgeVal);
continue;
}
@@ -15803,7 +15848,9 @@ public:
RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize);
unsigned ReduxWidth = std::min<unsigned>(
- llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts));
+ llvm::bit_floor(NumReducedVals),
+ std::clamp<unsigned>(MaxElts, RedValsMaxNumber,
+ RegMaxNumber * RedValsMaxNumber));
unsigned Start = 0;
unsigned Pos = Start;
// Restarts vectorization attempt with lower vector factor.
@@ -15945,7 +15992,7 @@ public:
LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost
<< " for reduction\n");
if (!Cost.isValid())
- return nullptr;
+ break;
if (Cost >= -SLPCostThreshold) {
V.getORE()->emit([&]() {
return OptimizationRemarkMissed(
@@ -16786,9 +16833,7 @@ bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
SmallVector<Value *, 16> BuildVectorOpds;
SmallVector<int> Mask;
if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
- (llvm::all_of(
- BuildVectorOpds,
- [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) &&
+ (llvm::all_of(BuildVectorOpds, IsaPred<ExtractElementInst, UndefValue>) &&
isFixedVectorShuffle(BuildVectorOpds, Mask)))
return false;
@@ -17009,10 +17054,7 @@ bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts,
bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions,
BasicBlock *BB, BoUpSLP &R) {
- assert(all_of(Instructions,
- [](auto *I) {
- return isa<InsertElementInst, InsertValueInst>(I);
- }) &&
+ assert(all_of(Instructions, IsaPred<InsertElementInst, InsertValueInst>) &&
"This function only accepts Insert instructions");
bool OpsChanged = false;
SmallVector<WeakTrackingVH> PostponedInsts;
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index 29a395c35731..605b47fa0a46 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -52,9 +52,8 @@ class VPRecipeBuilder {
EdgeMaskCacheTy EdgeMaskCache;
BlockMaskCacheTy BlockMaskCache;
- // VPlan-VPlan transformations support: Hold a mapping from ingredients to
- // their recipe. To save on memory, only do so for selected ingredients,
- // marked by having a nullptr entry in this map.
+ // VPlan construction support: Hold a mapping from ingredients to
+ // their recipe.
DenseMap<Instruction *, VPRecipeBase *> Ingredient2Recipe;
/// Cross-iteration reduction & first-order recurrence phis for which we need
@@ -117,13 +116,10 @@ public:
ArrayRef<VPValue *> Operands,
VFRange &Range, VPBasicBlock *VPBB);
- /// Set the recipe created for given ingredient. This operation is a no-op for
- /// ingredients that were not marked using a nullptr entry in the map.
+ /// Set the recipe created for given ingredient.
void setRecipe(Instruction *I, VPRecipeBase *R) {
- if (!Ingredient2Recipe.count(I))
- return;
- assert(Ingredient2Recipe[I] == nullptr &&
- "Recipe already set for ingredient");
+ assert(!Ingredient2Recipe.contains(I) &&
+ "Cannot reset recipe for instruction.");
Ingredient2Recipe[I] = R;
}
@@ -146,14 +142,6 @@ public:
/// between SRC and DST.
VPValue *getEdgeMask(BasicBlock *Src, BasicBlock *Dst) const;
- /// Mark given ingredient for recording its recipe once one is created for
- /// it.
- void recordRecipeOf(Instruction *I) {
- assert((!Ingredient2Recipe.count(I) || Ingredient2Recipe[I] == nullptr) &&
- "Recipe already set for ingredient");
- Ingredient2Recipe[I] = nullptr;
- }
-
/// Return the recipe created for given ingredient.
VPRecipeBase *getRecipe(Instruction *I) {
assert(Ingredient2Recipe.count(I) &&
@@ -171,6 +159,19 @@ public:
/// Add the incoming values from the backedge to reduction & first-order
/// recurrence cross-iteration phis.
void fixHeaderPhis();
+
+ /// Returns a range mapping the values of the range \p Operands to their
+ /// corresponding VPValues.
+ iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
+ mapToVPValues(User::op_range Operands);
+
+ VPValue *getVPValueOrAddLiveIn(Value *V, VPlan &Plan) {
+ if (auto *I = dyn_cast<Instruction>(V)) {
+ if (auto *R = Ingredient2Recipe.lookup(I))
+ return R->getVPSingleValue();
+ }
+ return Plan.getOrAddLiveIn(V);
+ }
};
} // end namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 9768e4b7aa0a..9a8f53c8dbe4 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -812,7 +812,7 @@ void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
// needs to be changed from zero to the value after the main vector loop.
// FIXME: Improve modeling for canonical IV start values in the epilogue loop.
if (CanonicalIVStartValue) {
- VPValue *VPV = getVPValueOrAddLiveIn(CanonicalIVStartValue);
+ VPValue *VPV = getOrAddLiveIn(CanonicalIVStartValue);
auto *IV = getCanonicalIV();
assert(all_of(IV->users(),
[](const VPUser *U) {
@@ -860,11 +860,8 @@ void VPlan::execute(VPTransformState *State) {
Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0));
} else {
auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
- // TODO: Split off the case that all users of a pointer phi are scalar
- // from the VPWidenPointerInductionRecipe.
- if (WidenPhi->onlyScalarsGenerated(State->VF.isScalable()))
- continue;
-
+ assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
+ "recipe generating only scalars should have been replaced");
auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
Phi = cast<PHINode>(GEP->getPointerOperand());
}
@@ -1091,7 +1088,7 @@ VPlan *VPlan::duplicate() {
DenseMap<VPValue *, VPValue *> Old2NewVPValues;
for (VPValue *OldLiveIn : VPLiveInsToFree) {
Old2NewVPValues[OldLiveIn] =
- NewPlan->getVPValueOrAddLiveIn(OldLiveIn->getLiveInIRValue());
+ NewPlan->getOrAddLiveIn(OldLiveIn->getLiveInIRValue());
}
Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount;
Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF;
@@ -1102,7 +1099,7 @@ VPlan *VPlan::duplicate() {
assert(TripCount && "trip count must be set");
if (TripCount->isLiveIn())
Old2NewVPValues[TripCount] =
- NewPlan->getVPValueOrAddLiveIn(TripCount->getLiveInIRValue());
+ NewPlan->getOrAddLiveIn(TripCount->getLiveInIRValue());
// else NewTripCount will be created and inserted into Old2NewVPValues when
// TripCount is cloned. In any case NewPlan->TripCount is updated below.
@@ -1425,9 +1422,9 @@ VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
return Expanded;
VPValue *Expanded = nullptr;
if (auto *E = dyn_cast<SCEVConstant>(Expr))
- Expanded = Plan.getVPValueOrAddLiveIn(E->getValue());
+ Expanded = Plan.getOrAddLiveIn(E->getValue());
else if (auto *E = dyn_cast<SCEVUnknown>(Expr))
- Expanded = Plan.getVPValueOrAddLiveIn(E->getValue());
+ Expanded = Plan.getOrAddLiveIn(E->getValue());
else {
Expanded = new VPExpandSCEVRecipe(Expr, SE);
Plan.getPreheader()->appendRecipe(Expanded->getDefiningRecipe());
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index d77c7554d50e..6c90c793e66c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1127,6 +1127,12 @@ public:
return WrapFlags.HasNSW;
}
+ bool isDisjoint() const {
+ assert(OpType == OperationType::DisjointOp &&
+ "recipe cannot have a disjoing flag");
+ return DisjointFlags.IsDisjoint;
+ }
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void printFlags(raw_ostream &O) const;
#endif
@@ -1155,6 +1161,10 @@ public:
BranchOnCount,
BranchOnCond,
ComputeReductionResult,
+ // Add an offset in bytes (second operand) to a base pointer (first
+ // operand). Only generates scalar values (either for the first lane only or
+ // for all lanes, depending on its uses).
+ PtrAdd,
};
private:
@@ -1164,11 +1174,28 @@ private:
/// An optional name that can be used for the generated IR instruction.
const std::string Name;
- /// Utility method serving execute(): generates a single instance of the
- /// modeled instruction. \returns the generated value for \p Part.
- /// In some cases an existing value is returned rather than a generated
+ /// Returns true if this VPInstruction generates scalar values for all lanes.
+ /// Most VPInstructions generate a single value per part, either vector or
+ /// scalar. VPReplicateRecipe takes care of generating multiple (scalar)
+ /// values per all lanes, stemming from an original ingredient. This method
+ /// identifies the (rare) cases of VPInstructions that do so as well, w/o an
+ /// underlying ingredient.
+ bool doesGeneratePerAllLanes() const;
+
+ /// Returns true if we can generate a scalar for the first lane only if
+ /// needed.
+ bool canGenerateScalarForFirstLane() const;
+
+ /// Utility methods serving execute(): generates a single instance of the
+ /// modeled instruction for a given part. \returns the generated value for \p
+ /// Part. In some cases an existing value is returned rather than a generated
/// one.
- Value *generateInstruction(VPTransformState &State, unsigned Part);
+ Value *generatePerPart(VPTransformState &State, unsigned Part);
+
+ /// Utility methods serving execute(): generates a scalar single instance of
+ /// the modeled instruction for a given lane. \returns the scalar generated
+ /// value for lane \p Lane.
+ Value *generatePerLane(VPTransformState &State, const VPIteration &Lane);
#if !defined(NDEBUG)
/// Return true if the VPInstruction is a floating point math operation, i.e.
@@ -1265,6 +1292,7 @@ public:
default:
return false;
case VPInstruction::BranchOnCount:
+ case VPInstruction::CanonicalIVIncrementForPart:
return true;
};
llvm_unreachable("switch should return");
@@ -2490,12 +2518,6 @@ class VPDerivedIVRecipe : public VPSingleDefRecipe {
/// for floating point inductions.
const FPMathOperator *FPBinOp;
- VPDerivedIVRecipe(InductionDescriptor::InductionKind Kind,
- const FPMathOperator *FPBinOp, VPValue *Start,
- VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step)
- : VPSingleDefRecipe(VPDef::VPDerivedIVSC, {Start, CanonicalIV, Step}),
- Kind(Kind), FPBinOp(FPBinOp) {}
-
public:
VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start,
VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step)
@@ -2504,6 +2526,12 @@ public:
dyn_cast_or_null<FPMathOperator>(IndDesc.getInductionBinOp()),
Start, CanonicalIV, Step) {}
+ VPDerivedIVRecipe(InductionDescriptor::InductionKind Kind,
+ const FPMathOperator *FPBinOp, VPValue *Start,
+ VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step)
+ : VPSingleDefRecipe(VPDef::VPDerivedIVSC, {Start, CanonicalIV, Step}),
+ Kind(Kind), FPBinOp(FPBinOp) {}
+
~VPDerivedIVRecipe() override = default;
VPRecipeBase *clone() override {
@@ -2872,10 +2900,6 @@ class VPlan {
/// definitions are VPValues that hold a pointer to their underlying IR.
SmallVector<VPValue *, 16> VPLiveInsToFree;
- /// Indicates whether it is safe use the Value2VPValue mapping or if the
- /// mapping cannot be used any longer, because it is stale.
- bool Value2VPValueEnabled = true;
-
/// Values used outside the plan.
MapVector<PHINode *, VPLiveOut *> LiveOuts;
@@ -2954,10 +2978,6 @@ public:
/// Returns VF * UF of the vector loop region.
VPValue &getVFxUF() { return VFxUF; }
- /// Mark the plan to indicate that using Value2VPValue is not safe any
- /// longer, because it may be stale.
- void disableValue2VPValue() { Value2VPValueEnabled = false; }
-
void addVF(ElementCount VF) { VFs.insert(VF); }
void setVF(ElementCount VF) {
@@ -2987,25 +3007,15 @@ public:
void setName(const Twine &newName) { Name = newName.str(); }
void addVPValue(Value *V, VPValue *VPV) {
- assert((Value2VPValueEnabled || VPV->isLiveIn()) &&
- "Value2VPValue mapping may be out of date!");
+ assert(VPV->isLiveIn() && "VPV must be a live-in.");
assert(V && "Trying to add a null Value to VPlan");
assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
Value2VPValue[V] = VPV;
}
- /// Returns the VPValue for \p V.
- VPValue *getVPValue(Value *V) {
- assert(V && "Trying to get the VPValue of a null Value");
- assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
- assert((Value2VPValueEnabled || Value2VPValue[V]->isLiveIn()) &&
- "Value2VPValue mapping may be out of date!");
- return Value2VPValue[V];
- }
-
- /// Gets the VPValue for \p V or adds a new live-in (if none exists yet) for
- /// \p V.
- VPValue *getVPValueOrAddLiveIn(Value *V) {
+ /// Gets the live-in VPValue for \p V or adds a new live-in (if none exists
+ /// yet) for \p V.
+ VPValue *getOrAddLiveIn(Value *V) {
assert(V && "Trying to get or add the VPValue of a null Value");
if (!Value2VPValue.count(V)) {
VPValue *VPV = new VPValue(V);
@@ -3013,7 +3023,10 @@ public:
addVPValue(V, VPV);
}
- return getVPValue(V);
+ assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
+ assert(Value2VPValue[V]->isLiveIn() &&
+ "Only live-ins should be in mapping");
+ return Value2VPValue[V];
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -3030,16 +3043,6 @@ public:
LLVM_DUMP_METHOD void dump() const;
#endif
- /// Returns a range mapping the values the range \p Operands to their
- /// corresponding VPValues.
- iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
- mapToVPValues(User::op_range Operands) {
- std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
- return getVPValueOrAddLiveIn(Op);
- };
- return map_range(Operands, Fn);
- }
-
/// Returns the VPRegionBlock of the vector loop.
VPRegionBlock *getVectorLoopRegion() {
return cast<VPRegionBlock>(getEntry()->getSingleSuccessor());
diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
index f55beac2047c..04e30312dc23 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
@@ -44,6 +44,9 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
CachedTypes[OtherV] = ResTy;
return ResTy;
}
+ case VPInstruction::PtrAdd:
+ // Return the type based on the pointer argument (i.e. first operand).
+ return inferScalarType(R->getOperand(0));
default:
break;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
index 877b5d438115..b57ff2840a72 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
@@ -272,7 +272,7 @@ VPValue *PlainCFGBuilder::getOrCreateVPOperand(Value *IRVal) {
// A and B: Create VPValue and add it to the pool of external definitions and
// to the Value->VPValue map.
- VPValue *NewVPVal = Plan.getVPValueOrAddLiveIn(IRVal);
+ VPValue *NewVPVal = Plan.getOrAddLiveIn(IRVal);
IRDef2VPValue[IRVal] = NewVPVal;
return NewVPVal;
}
@@ -361,7 +361,7 @@ void PlainCFGBuilder::buildPlainCFG() {
for (auto &I : *ThePreheaderBB) {
if (I.getType()->isVoidTy())
continue;
- IRDef2VPValue[&I] = Plan.getVPValueOrAddLiveIn(&I);
+ IRDef2VPValue[&I] = Plan.getOrAddLiveIn(&I);
}
LoopBlocksRPO RPO(TheLoop);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
index aa2535906945..a03a408686ef 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
@@ -261,6 +261,11 @@ m_Mul(const Op0_t &Op0, const Op1_t &Op1) {
return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
}
+template <typename Op0_t, typename Op1_t>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or>
+m_Or(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
+}
} // namespace VPlanPatternMatch
} // namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index d75e322a74cf..23d025cf33ea 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -127,6 +127,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPInstruction::Not:
case VPInstruction::CalculateTripCountMinusVF:
case VPInstruction::CanonicalIVIncrementForPart:
+ case VPInstruction::PtrAdd:
return false;
default:
return true;
@@ -270,10 +271,39 @@ VPInstruction::VPInstruction(unsigned Opcode,
assert(isFPMathOp() && "this op can't take fast-math flags");
}
-Value *VPInstruction::generateInstruction(VPTransformState &State,
- unsigned Part) {
+bool VPInstruction::doesGeneratePerAllLanes() const {
+ return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
+}
+
+bool VPInstruction::canGenerateScalarForFirstLane() const {
+ if (Instruction::isBinaryOp(getOpcode()))
+ return true;
+
+ switch (Opcode) {
+ case VPInstruction::BranchOnCond:
+ case VPInstruction::BranchOnCount:
+ case VPInstruction::CalculateTripCountMinusVF:
+ case VPInstruction::CanonicalIVIncrementForPart:
+ case VPInstruction::ComputeReductionResult:
+ case VPInstruction::PtrAdd:
+ return true;
+ default:
+ return false;
+ }
+}
+
+Value *VPInstruction::generatePerLane(VPTransformState &State,
+ const VPIteration &Lane) {
+ IRBuilderBase &Builder = State.Builder;
+
+ assert(getOpcode() == VPInstruction::PtrAdd &&
+ "only PtrAdd opcodes are supported for now");
+ return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
+ State.get(getOperand(1), Lane), Name);
+}
+
+Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) {
IRBuilderBase &Builder = State.Builder;
- Builder.SetCurrentDebugLocation(getDebugLoc());
if (Instruction::isBinaryOp(getOpcode())) {
bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
@@ -345,6 +375,9 @@ Value *VPInstruction::generateInstruction(VPTransformState &State,
return Builder.CreateVectorSplice(PartMinus1, V2, -1, Name);
}
case VPInstruction::CalculateTripCountMinusVF: {
+ if (Part != 0)
+ return State.get(this, 0, /*IsScalar*/ true);
+
Value *ScalarTC = State.get(getOperand(0), {0, 0});
Value *Step =
createStepForVF(Builder, ScalarTC->getType(), State.VF, State.UF);
@@ -487,6 +520,13 @@ Value *VPInstruction::generateInstruction(VPTransformState &State,
return ReducedPartRdx;
}
+ case VPInstruction::PtrAdd: {
+ assert(vputils::onlyFirstLaneUsed(this) &&
+ "can only generate first lane for PtrAdd");
+ Value *Ptr = State.get(getOperand(0), Part, /* IsScalar */ true);
+ Value *Addend = State.get(getOperand(1), Part, /* IsScalar */ true);
+ return Builder.CreatePtrAdd(Ptr, Addend, Name);
+ }
default:
llvm_unreachable("Unsupported opcode for instruction");
}
@@ -511,17 +551,33 @@ void VPInstruction::execute(VPTransformState &State) {
"Recipe not a FPMathOp but has fast-math flags?");
if (hasFastMathFlags())
State.Builder.setFastMathFlags(getFastMathFlags());
+ State.Builder.SetCurrentDebugLocation(getDebugLoc());
+ bool GeneratesPerFirstLaneOnly =
+ canGenerateScalarForFirstLane() &&
+ (vputils::onlyFirstLaneUsed(this) ||
+ getOpcode() == VPInstruction::ComputeReductionResult);
+ bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
for (unsigned Part = 0; Part < State.UF; ++Part) {
- Value *GeneratedValue = generateInstruction(State, Part);
- if (!hasResult())
+ if (GeneratesPerAllLanes) {
+ for (unsigned Lane = 0, NumLanes = State.VF.getKnownMinValue();
+ Lane != NumLanes; ++Lane) {
+ Value *GeneratedValue = generatePerLane(State, VPIteration(Part, Lane));
+ assert(GeneratedValue && "generatePerLane must produce a value");
+ State.set(this, GeneratedValue, VPIteration(Part, Lane));
+ }
continue;
- assert(GeneratedValue && "generateInstruction must produce a value");
+ }
- bool IsVector = GeneratedValue->getType()->isVectorTy();
- State.set(this, GeneratedValue, Part, !IsVector);
- assert((IsVector || getOpcode() == VPInstruction::ComputeReductionResult ||
- State.VF.isScalar() || vputils::onlyFirstLaneUsed(this)) &&
- "scalar value but not only first lane used");
+ Value *GeneratedValue = generatePerPart(State, Part);
+ if (!hasResult())
+ continue;
+ assert(GeneratedValue && "generatePerPart must produce a value");
+ assert((GeneratedValue->getType()->isVectorTy() ==
+ !GeneratesPerFirstLaneOnly ||
+ State.VF.isScalar()) &&
+ "scalar value but not only first lane defined");
+ State.set(this, GeneratedValue, Part,
+ /*IsScalar*/ GeneratesPerFirstLaneOnly);
}
}
@@ -534,6 +590,7 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
default:
return false;
case Instruction::ICmp:
+ case VPInstruction::PtrAdd:
// TODO: Cover additional opcodes.
return vputils::onlyFirstLaneUsed(this);
case VPInstruction::ActiveLaneMask:
@@ -591,6 +648,9 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent,
case VPInstruction::ComputeReductionResult:
O << "compute-reduction-result";
break;
+ case VPInstruction::PtrAdd:
+ O << "ptradd";
+ break;
default:
O << Instruction::getOpcodeName(getOpcode());
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index a91ccefe4b6d..6f881d4328f6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -48,15 +48,14 @@ void VPlanTransforms::VPInstructionsToVPRecipes(
VPRecipeBase *NewRecipe = nullptr;
if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
- if (const auto *II = GetIntOrFpInductionDescriptor(Phi)) {
- VPValue *Start = Plan->getVPValueOrAddLiveIn(II->getStartValue());
- VPValue *Step =
- vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
- NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, *II);
- } else {
- Plan->addVPValue(Phi, VPPhi);
+ const auto *II = GetIntOrFpInductionDescriptor(Phi);
+ if (!II)
continue;
- }
+
+ VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
+ VPValue *Step =
+ vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
+ NewRecipe = new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, *II);
} else {
assert(isa<VPInstruction>(&Ingredient) &&
"only VPInstructions expected here");
@@ -499,15 +498,18 @@ static void removeDeadRecipes(VPlan &Plan) {
}
}
-static VPValue *createScalarIVSteps(VPlan &Plan, const InductionDescriptor &ID,
+static VPValue *createScalarIVSteps(VPlan &Plan,
+ InductionDescriptor::InductionKind Kind,
+ Instruction::BinaryOps InductionOpcode,
+ FPMathOperator *FPBinOp,
ScalarEvolution &SE, Instruction *TruncI,
VPValue *StartV, VPValue *Step,
VPBasicBlock::iterator IP) {
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
VPSingleDefRecipe *BaseIV = CanonicalIV;
- if (!CanonicalIV->isCanonical(ID.getKind(), StartV, Step)) {
- BaseIV = new VPDerivedIVRecipe(ID, StartV, CanonicalIV, Step);
+ if (!CanonicalIV->isCanonical(Kind, StartV, Step)) {
+ BaseIV = new VPDerivedIVRecipe(Kind, FPBinOp, StartV, CanonicalIV, Step);
HeaderVPBB->insert(BaseIV, IP);
}
@@ -537,21 +539,56 @@ static VPValue *createScalarIVSteps(VPlan &Plan, const InductionDescriptor &ID,
VecPreheader->appendRecipe(Step->getDefiningRecipe());
}
- VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(ID, BaseIV, Step);
+ VPScalarIVStepsRecipe *Steps = new VPScalarIVStepsRecipe(
+ BaseIV, Step, InductionOpcode,
+ FPBinOp ? FPBinOp->getFastMathFlags() : FastMathFlags());
HeaderVPBB->insert(Steps, IP);
return Steps;
}
-/// If any user of a VPWidenIntOrFpInductionRecipe needs scalar values,
-/// provide them by building scalar steps off of the canonical scalar IV and
-/// update the original IV's users. This is an optional optimization to reduce
-/// the needs of vector extracts.
-static void optimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
+/// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
+/// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
+/// VPWidenPointerInductionRecipe will generate vectors only. If some users
+/// require vectors while other require scalars, the scalar uses need to extract
+/// the scalars from the generated vectors (Note that this is different to how
+/// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe,
+/// if any of its users needs scalar values, by providing them scalar steps
+/// built on the canonical scalar IV and update the original IV's users. This is
+/// an optional optimization to reduce the needs of vector extracts.
+static void legalizeAndOptimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
SmallVector<VPRecipeBase *> ToRemove;
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
VPBasicBlock::iterator InsertPt = HeaderVPBB->getFirstNonPhi();
for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
+ // Replace wide pointer inductions which have only their scalars used by
+ // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
+ if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
+ if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
+ continue;
+
+ const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
+ VPValue *StartV =
+ Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
+ VPValue *StepV = PtrIV->getOperand(1);
+ VPRecipeBase *Steps =
+ createScalarIVSteps(Plan, InductionDescriptor::IK_IntInduction,
+ Instruction::Add, nullptr, SE, nullptr, StartV,
+ StepV, InsertPt)
+ ->getDefiningRecipe();
+
+ auto *Recipe =
+ new VPInstruction(VPInstruction::PtrAdd,
+ {PtrIV->getStartValue(), Steps->getVPSingleValue()},
+ PtrIV->getDebugLoc(), "next.gep");
+
+ Recipe->insertAfter(Steps);
+ PtrIV->replaceAllUsesWith(Recipe);
+ continue;
+ }
+
+ // Replace widened induction with scalar steps for users that only use
+ // scalars.
auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
if (!WideIV)
continue;
@@ -561,9 +598,11 @@ static void optimizeInductions(VPlan &Plan, ScalarEvolution &SE) {
continue;
const InductionDescriptor &ID = WideIV->getInductionDescriptor();
- VPValue *Steps = createScalarIVSteps(Plan, ID, SE, WideIV->getTruncInst(),
- WideIV->getStartValue(),
- WideIV->getStepValue(), InsertPt);
+ VPValue *Steps = createScalarIVSteps(
+ Plan, ID.getKind(), ID.getInductionOpcode(),
+ dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), SE,
+ WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
+ InsertPt);
// Update scalar users of IV to use Step instead.
if (!HasOnlyVectorVFs)
@@ -624,9 +663,9 @@ void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
return;
LLVMContext &Ctx = SE.getContext();
- auto *BOC = new VPInstruction(
- VPInstruction::BranchOnCond,
- {Plan.getVPValueOrAddLiveIn(ConstantInt::getTrue(Ctx))});
+ auto *BOC =
+ new VPInstruction(VPInstruction::BranchOnCond,
+ {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))});
Term->eraseFromParent();
ExitingVPBB->appendRecipe(BOC);
Plan.setVF(BestVF);
@@ -1026,7 +1065,7 @@ void VPlanTransforms::optimize(VPlan &Plan, ScalarEvolution &SE) {
removeRedundantInductionCasts(Plan);
simplifyRecipes(Plan, SE.getContext());
- optimizeInductions(Plan, SE);
+ legalizeAndOptimizeInductions(Plan, SE);
removeDeadRecipes(Plan);
createAndOptimizeReplicateRegions(Plan);
@@ -1216,7 +1255,24 @@ void VPlanTransforms::dropPoisonGeneratingRecipes(
// load/store. If the underlying instruction has poison-generating flags,
// drop them directly.
if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
- RecWithFlags->dropPoisonGeneratingFlags();
+ VPValue *A, *B;
+ using namespace llvm::VPlanPatternMatch;
+ // Dropping disjoint from an OR may yield incorrect results, as some
+ // analysis may have converted it to an Add implicitly (e.g. SCEV used
+ // for dependence analysis). Instead, replace it with an equivalent Add.
+ // This is possible as all users of the disjoint OR only access lanes
+ // where the operands are disjoint or poison otherwise.
+ if (match(RecWithFlags, m_Or(m_VPValue(A), m_VPValue(B))) &&
+ RecWithFlags->isDisjoint()) {
+ VPBuilder Builder(RecWithFlags);
+ VPInstruction *New = Builder.createOverflowingOp(
+ Instruction::Add, {A, B}, {false, false},
+ RecWithFlags->getDebugLoc());
+ RecWithFlags->replaceAllUsesWith(New);
+ RecWithFlags->eraseFromParent();
+ CurRec = New;
+ } else
+ RecWithFlags->dropPoisonGeneratingFlags();
} else {
Instruction *Instr = dyn_cast_or_null<Instruction>(
CurRec->getVPSingleValue()->getUnderlyingValue());
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 23494314f132..7e86137f23f3 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -66,8 +66,8 @@ class VectorCombine {
public:
VectorCombine(Function &F, const TargetTransformInfo &TTI,
const DominatorTree &DT, AAResults &AA, AssumptionCache &AC,
- bool TryEarlyFoldsOnly)
- : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC),
+ const DataLayout *DL, bool TryEarlyFoldsOnly)
+ : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC), DL(DL),
TryEarlyFoldsOnly(TryEarlyFoldsOnly) {}
bool run();
@@ -79,6 +79,7 @@ private:
const DominatorTree &DT;
AAResults &AA;
AssumptionCache &AC;
+ const DataLayout *DL;
/// If true, only perform beneficial early IR transforms. Do not introduce new
/// vector operations.
@@ -181,7 +182,6 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
// We use minimal alignment (maximum flexibility) because we only care about
// the dereferenceable region. When calculating cost and creating a new op,
// we may use a larger value based on alignment attributes.
- const DataLayout &DL = I.getModule()->getDataLayout();
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
@@ -189,15 +189,15 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
unsigned OffsetEltIndex = 0;
Align Alignment = Load->getAlign();
- if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &AC,
+ if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
&DT)) {
// It is not safe to load directly from the pointer, but we can still peek
// through gep offsets and check if it safe to load from a base address with
// updated alignment. If it is, we can shuffle the element(s) into place
// after loading.
- unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
+ unsigned OffsetBitWidth = DL->getIndexTypeSizeInBits(SrcPtr->getType());
APInt Offset(OffsetBitWidth, 0);
- SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
+ SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
// We want to shuffle the result down from a high element of a vector, so
// the offset must be positive.
@@ -215,7 +215,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
if (OffsetEltIndex >= MinVecNumElts)
return false;
- if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &AC,
+ if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
&DT))
return false;
@@ -227,7 +227,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
// Original pattern: insertelt undef, load [free casts of] PtrOp, 0
// Use the greater of the alignment on the load or its source pointer.
- Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
+ Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
Type *LoadTy = Load->getType();
unsigned AS = Load->getPointerAddressSpace();
InstructionCost OldCost =
@@ -298,14 +298,13 @@ bool VectorCombine::widenSubvectorLoad(Instruction &I) {
// the dereferenceable region. When calculating cost and creating a new op,
// we may use a larger value based on alignment attributes.
auto *Ty = cast<FixedVectorType>(I.getType());
- const DataLayout &DL = I.getModule()->getDataLayout();
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
Align Alignment = Load->getAlign();
- if (!isSafeToLoadUnconditionally(SrcPtr, Ty, Align(1), DL, Load, &AC, &DT))
+ if (!isSafeToLoadUnconditionally(SrcPtr, Ty, Align(1), *DL, Load, &AC, &DT))
return false;
- Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
+ Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
Type *LoadTy = Load->getType();
unsigned AS = Load->getPointerAddressSpace();
@@ -854,7 +853,6 @@ bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
// Scalarize the intrinsic
ElementCount EC = cast<VectorType>(Op0->getType())->getElementCount();
Value *EVL = VPI.getArgOperand(3);
- const DataLayout &DL = VPI.getModule()->getDataLayout();
// If the VP op might introduce UB or poison, we can scalarize it provided
// that we know the EVL > 0: If the EVL is zero, then the original VP op
@@ -867,7 +865,7 @@ bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
else
SafeToSpeculate = isSafeToSpeculativelyExecuteWithOpcode(
*FunctionalOpcode, &VPI, nullptr, &AC, &DT);
- if (!SafeToSpeculate && !isKnownNonZero(EVL, DL, 0, &AC, &VPI, &DT))
+ if (!SafeToSpeculate && !isKnownNonZero(EVL, *DL, 0, &AC, &VPI, &DT))
return false;
Value *ScalarVal =
@@ -1246,12 +1244,11 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
if (auto *Load = dyn_cast<LoadInst>(Source)) {
auto VecTy = cast<VectorType>(SI->getValueOperand()->getType());
- const DataLayout &DL = I.getModule()->getDataLayout();
Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
// Don't optimize for atomic/volatile load or store. Ensure memory is not
// modified between, vector type matches store size, and index is inbounds.
if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
- !DL.typeSizeEqualsStoreSize(Load->getType()->getScalarType()) ||
+ !DL->typeSizeEqualsStoreSize(Load->getType()->getScalarType()) ||
SrcAddr != SI->getPointerOperand()->stripPointerCasts())
return false;
@@ -1270,7 +1267,7 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
NSI->copyMetadata(*SI);
Align ScalarOpAlignment = computeAlignmentAfterScalarization(
std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
- DL);
+ *DL);
NSI->setAlignment(ScalarOpAlignment);
replaceValue(I, *NSI);
eraseInstruction(I);
@@ -1288,8 +1285,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
auto *VecTy = cast<VectorType>(I.getType());
auto *LI = cast<LoadInst>(&I);
- const DataLayout &DL = I.getModule()->getDataLayout();
- if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(VecTy->getScalarType()))
+ if (LI->isVolatile() || !DL->typeSizeEqualsStoreSize(VecTy->getScalarType()))
return false;
InstructionCost OriginalCost =
@@ -1367,7 +1363,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
VecTy->getElementType(), GEP, EI->getName() + ".scalar"));
Align ScalarOpAlignment = computeAlignmentAfterScalarization(
- LI->getAlign(), VecTy->getElementType(), Idx, DL);
+ LI->getAlign(), VecTy->getElementType(), Idx, *DL);
NewLoad->setAlignment(ScalarOpAlignment);
replaceValue(*EI, *NewLoad);
@@ -2042,7 +2038,8 @@ PreservedAnalyses VectorCombinePass::run(Function &F,
TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
AAResults &AA = FAM.getResult<AAManager>(F);
- VectorCombine Combiner(F, TTI, DT, AA, AC, TryEarlyFoldsOnly);
+ const DataLayout *DL = &F.getParent()->getDataLayout();
+ VectorCombine Combiner(F, TTI, DT, AA, AC, DL, TryEarlyFoldsOnly);
if (!Combiner.run())
return PreservedAnalyses::all();
PreservedAnalyses PA;
diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt
index 623c43d564cc..8159d7f8a0a1 100644
--- a/llvm/runtimes/CMakeLists.txt
+++ b/llvm/runtimes/CMakeLists.txt
@@ -435,7 +435,7 @@ if(runtimes)
list(APPEND extra_deps "flang-new")
endif()
foreach(dep opt llvm-link llvm-extract clang clang-offload-packager)
- if(TARGET ${dep} AND OPENMP_ENABLE_LIBOMPTARGET)
+ if(TARGET ${dep})
list(APPEND extra_deps ${dep})
endif()
endforeach()
@@ -531,7 +531,7 @@ if(runtimes)
check_apple_target(${name} runtime)
runtime_register_target(${name}
- DEPENDS ${builtins_dep_name} ${hdrgen_deps}
+ DEPENDS ${builtins_dep_name} ${extra_deps}
CMAKE_ARGS -DLLVM_DEFAULT_TARGET_TRIPLE=${name} ${libc_cmake_args}
EXTRA_ARGS TARGET_TRIPLE ${name})
endforeach()
diff --git a/llvm/test/Analysis/CostModel/RISCV/cast.ll b/llvm/test/Analysis/CostModel/RISCV/cast.ll
index bd26c19c2f2c..14da9a3f79d7 100644
--- a/llvm/test/Analysis/CostModel/RISCV/cast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/cast.ll
@@ -16,74 +16,74 @@ define void @sext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = sext <2 x i1> undef to <2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = sext <4 x i8> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = sext <4 x i8> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = sext <4 x i16> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = sext <4 x i1> undef to <4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = sext <4 x i1> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = sext <4 x i1> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = sext <8 x i8> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = sext <8 x i1> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = sext <8 x i1> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = sext <16 x i1> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = sext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = sext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = sext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -96,73 +96,73 @@ define void @sext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = sext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = sext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = sext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = sext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = sext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = sext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = sext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = sext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = sext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = sext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = sext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = sext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = sext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = sext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = sext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -179,74 +179,74 @@ define void @sext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = sext <2 x i1> undef to <2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = sext <4 x i8> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = sext <4 x i8> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = sext <4 x i8> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = sext <4 x i16> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = sext <4 x i16> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = sext <4 x i32> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = sext <4 x i1> undef to <4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = sext <4 x i1> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = sext <4 x i1> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = sext <4 x i1> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = sext <8 x i8> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = sext <8 x i8> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = sext <8 x i8> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = sext <8 x i16> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = sext <8 x i16> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = sext <8 x i32> undef to <8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = sext <8 x i1> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = sext <8 x i1> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = sext <8 x i1> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = sext <8 x i1> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = sext <16 x i8> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = sext <16 x i8> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = sext <16 x i8> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = sext <16 x i16> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = sext <16 x i16> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = sext <16 x i32> undef to <16 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = sext <16 x i1> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = sext <16 x i1> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = sext <16 x i1> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = sext <16 x i1> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = sext <32 x i8> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = sext <32 x i8> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = sext <32 x i8> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = sext <32 x i16> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = sext <32 x i16> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = sext <32 x i32> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = sext <32 x i1> undef to <32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = sext <32 x i1> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = sext <32 x i1> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = sext <32 x i1> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = sext <64 x i8> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = sext <64 x i8> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = sext <64 x i8> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = sext <64 x i16> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = sext <64 x i16> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = sext <64 x i32> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = sext <64 x i1> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = sext <64 x i1> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = sext <64 x i1> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = sext <64 x i1> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = sext <128 x i8> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = sext <128 x i8> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = sext <128 x i8> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = sext <128 x i16> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = sext <128 x i16> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = sext <128 x i32> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = sext <128 x i1> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = sext <128 x i1> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = sext <128 x i1> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = sext <128 x i1> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = sext <256 x i8> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = sext <256 x i8> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = sext <256 x i8> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = sext <256 x i16> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = sext <256 x i16> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = sext <256 x i32> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = sext <256 x i1> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = sext <256 x i1> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = sext <256 x i1> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = sext <256 x i1> undef to <256 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = sext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = sext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = sext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -259,73 +259,73 @@ define void @sext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = sext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = sext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = sext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = sext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = sext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = sext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = sext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = sext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = sext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = sext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = sext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = sext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = sext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = sext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = sext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = sext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = sext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = sext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = sext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = sext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = sext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = sext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = sext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = sext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = sext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = sext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = sext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = sext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = sext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = sext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = sext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = sext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = sext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = sext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = sext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = sext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = sext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = sext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = sext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = sext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = sext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = sext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = sext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = sext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = sext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = sext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = sext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = sext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = sext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = sext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = sext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = sext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = sext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i8_nxv64i64 = sext <vscale x 64 x i8> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = sext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv64i16_nxv64i64 = sext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv64i32_nxv64i64 = sext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = sext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = sext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = sext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %nxv64i1_nxv64i64 = sext <vscale x 64 x i1> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = sext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = sext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = sext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = sext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = sext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = sext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = sext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = sext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = sext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = sext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -522,74 +522,74 @@ define void @zext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = zext <2 x i1> undef to <2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = zext <4 x i8> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = zext <4 x i8> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = zext <4 x i16> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = zext <4 x i1> undef to <4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = zext <4 x i1> undef to <4 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = zext <4 x i1> undef to <4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = zext <8 x i8> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = zext <8 x i1> undef to <8 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = zext <8 x i1> undef to <8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = zext <16 x i1> undef to <16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = zext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = zext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = zext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -602,73 +602,73 @@ define void @zext() {
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = zext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = zext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = zext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = zext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = zext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = zext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = zext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = zext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = zext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = zext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = zext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV32-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 69 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV32-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV32-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = zext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = zext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = zext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV32-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV32-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV32-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV32-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV32-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = zext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV32-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
@@ -685,74 +685,74 @@ define void @zext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i1_v2i64 = zext <2 x i1> undef to <2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i16 = zext <4 x i8> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i32 = zext <4 x i8> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_v4i64 = zext <4 x i8> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i32 = zext <4 x i16> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i16_v4i64 = zext <4 x i16> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i32_v4i64 = zext <4 x i32> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i8 = zext <4 x i1> undef to <4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i16 = zext <4 x i1> undef to <4 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i32 = zext <4 x i1> undef to <4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_v4i64 = zext <4 x i1> undef to <4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i16 = zext <8 x i8> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_v8i32 = zext <8 x i8> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i8_v8i64 = zext <8 x i8> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i16_v8i32 = zext <8 x i16> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i16_v8i64 = zext <8 x i16> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i32_v8i64 = zext <8 x i32> undef to <8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i8 = zext <8 x i1> undef to <8 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i16 = zext <8 x i1> undef to <8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_v8i32 = zext <8 x i1> undef to <8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i1_v8i64 = zext <8 x i1> undef to <8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i8_v16i16 = zext <16 x i8> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i8_v16i32 = zext <16 x i8> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_v16i64 = zext <16 x i8> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i16_v16i32 = zext <16 x i16> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i16_v16i64 = zext <16 x i16> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i32_v16i64 = zext <16 x i32> undef to <16 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i8 = zext <16 x i1> undef to <16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_v16i16 = zext <16 x i1> undef to <16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_v16i32 = zext <16 x i1> undef to <16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i1_v16i64 = zext <16 x i1> undef to <16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i8_v32i16 = zext <32 x i8> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i8_v32i32 = zext <32 x i8> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i8_v32i64 = zext <32 x i8> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i16_v32i32 = zext <32 x i16> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i16_v32i64 = zext <32 x i16> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v32i32_v32i64 = zext <32 x i32> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_v32i8 = zext <32 x i1> undef to <32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_v32i16 = zext <32 x i1> undef to <32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v32i1_v32i32 = zext <32 x i1> undef to <32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v32i1_v32i64 = zext <32 x i1> undef to <32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i8_v64i16 = zext <64 x i8> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i8_v64i32 = zext <64 x i8> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i8_v64i64 = zext <64 x i8> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v64i16_v64i32 = zext <64 x i16> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v64i16_v64i64 = zext <64 x i16> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v64i32_v64i64 = zext <64 x i32> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v64i1_v64i8 = zext <64 x i1> undef to <64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v64i1_v64i16 = zext <64 x i1> undef to <64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v64i1_v64i32 = zext <64 x i1> undef to <64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v64i1_v64i64 = zext <64 x i1> undef to <64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %v128i8_v128i16 = zext <128 x i8> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %v128i8_v128i32 = zext <128 x i8> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %v128i8_v128i64 = zext <128 x i8> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v128i16_v128i32 = zext <128 x i16> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v128i16_v128i64 = zext <128 x i16> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v128i32_v128i64 = zext <128 x i32> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v128i1_v128i8 = zext <128 x i1> undef to <128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %v128i1_v128i16 = zext <128 x i1> undef to <128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %v128i1_v128i32 = zext <128 x i1> undef to <128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %v128i1_v128i64 = zext <128 x i1> undef to <128 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v256i8_v256i16 = zext <256 x i8> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %v256i8_v256i32 = zext <256 x i8> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 142 for instruction: %v256i8_v256i64 = zext <256 x i8> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v256i16_v256i32 = zext <256 x i16> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 140 for instruction: %v256i16_v256i64 = zext <256 x i16> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v256i32_v256i64 = zext <256 x i32> undef to <256 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v256i1_v256i8 = zext <256 x i1> undef to <256 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %v256i1_v256i16 = zext <256 x i1> undef to <256 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v256i1_v256i32 = zext <256 x i1> undef to <256 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 270 for instruction: %v256i1_v256i64 = zext <256 x i1> undef to <256 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i16 = zext <vscale x 1 x i8> undef to <vscale x 1 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i32 = zext <vscale x 1 x i8> undef to <vscale x 1 x i32>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv1i8_nxv1i64 = zext <vscale x 1 x i8> undef to <vscale x 1 x i64>
@@ -765,73 +765,73 @@ define void @zext() {
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv1i1_nxv1i64 = zext <vscale x 1 x i1> undef to <vscale x 1 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i16 = zext <vscale x 2 x i8> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i32 = zext <vscale x 2 x i8> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i8_nxv2i64 = zext <vscale x 2 x i8> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i32 = zext <vscale x 2 x i16> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i16_nxv2i64 = zext <vscale x 2 x i16> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i32_nxv2i64 = zext <vscale x 2 x i32> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i8 = zext <vscale x 2 x i1> undef to <vscale x 2 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i16 = zext <vscale x 2 x i1> undef to <vscale x 2 x i16>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i32 = zext <vscale x 2 x i1> undef to <vscale x 2 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_nxv2i64 = zext <vscale x 2 x i1> undef to <vscale x 2 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i16 = zext <vscale x 4 x i8> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i8_nxv4i32 = zext <vscale x 4 x i8> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i8_nxv4i64 = zext <vscale x 4 x i8> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i16_nxv4i32 = zext <vscale x 4 x i16> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i16_nxv4i64 = zext <vscale x 4 x i16> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i32_nxv4i64 = zext <vscale x 4 x i32> undef to <vscale x 4 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i8 = zext <vscale x 4 x i1> undef to <vscale x 4 x i8>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i16 = zext <vscale x 4 x i1> undef to <vscale x 4 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_nxv4i32 = zext <vscale x 4 x i1> undef to <vscale x 4 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv4i1_nxv4i64 = zext <vscale x 4 x i1> undef to <vscale x 4 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i8_nxv8i16 = zext <vscale x 8 x i8> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i8_nxv8i32 = zext <vscale x 8 x i8> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i8_nxv8i64 = zext <vscale x 8 x i8> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i16_nxv8i32 = zext <vscale x 8 x i16> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i16_nxv8i64 = zext <vscale x 8 x i16> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i32_nxv8i64 = zext <vscale x 8 x i32> undef to <vscale x 8 x i64>
; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i8 = zext <vscale x 8 x i1> undef to <vscale x 8 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
-; RV64-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
-; RV64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_nxv8i16 = zext <vscale x 8 x i1> undef to <vscale x 8 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv8i1_nxv8i32 = zext <vscale x 8 x i1> undef to <vscale x 8 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv8i1_nxv8i64 = zext <vscale x 8 x i1> undef to <vscale x 8 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i8_nxv16i16 = zext <vscale x 16 x i8> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i8_nxv16i32 = zext <vscale x 16 x i8> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i8_nxv16i64 = zext <vscale x 16 x i8> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i16_nxv16i32 = zext <vscale x 16 x i16> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i16_nxv16i64 = zext <vscale x 16 x i16> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv16i32_nxv16i64 = zext <vscale x 16 x i32> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_nxv16i8 = zext <vscale x 16 x i1> undef to <vscale x 16 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_nxv16i16 = zext <vscale x 16 x i1> undef to <vscale x 16 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv16i1_nxv16i32 = zext <vscale x 16 x i1> undef to <vscale x 16 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv16i1_nxv16i64 = zext <vscale x 16 x i1> undef to <vscale x 16 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i8_nxv32i16 = zext <vscale x 32 x i8> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i8_nxv32i32 = zext <vscale x 32 x i8> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i8_nxv32i64 = zext <vscale x 32 x i8> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv32i16_nxv32i32 = zext <vscale x 32 x i16> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv32i16_nxv32i64 = zext <vscale x 32 x i16> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv32i32_nxv32i64 = zext <vscale x 32 x i32> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv32i1_nxv32i8 = zext <vscale x 32 x i1> undef to <vscale x 32 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv32i1_nxv32i16 = zext <vscale x 32 x i1> undef to <vscale x 32 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv32i1_nxv32i32 = zext <vscale x 32 x i1> undef to <vscale x 32 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv32i1_nxv32i64 = zext <vscale x 32 x i1> undef to <vscale x 32 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %nxv64i8_nxv64i16 = zext <vscale x 64 x i8> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %nxv64i8_nxv64i32 = zext <vscale x 64 x i8> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 71 for instruction: %nxv64i8_nxv64i64 = zext <vscale x 64 x i8> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv64i16_nxv64i32 = zext <vscale x 64 x i16> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv64i16_nxv64i64 = zext <vscale x 64 x i16> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv64i32_nxv64i64 = zext <vscale x 64 x i32> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %nxv64i1_nxv64i8 = zext <vscale x 64 x i1> undef to <vscale x 64 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %nxv64i1_nxv64i16 = zext <vscale x 64 x i1> undef to <vscale x 64 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 67 for instruction: %nxv64i1_nxv64i32 = zext <vscale x 64 x i1> undef to <vscale x 64 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 135 for instruction: %nxv64i1_nxv64i64 = zext <vscale x 64 x i1> undef to <vscale x 64 x i64>
+; RV64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %nxv128i8_nxv128i16 = zext <vscale x 128 x i8> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %nxv128i8_nxv128i32 = zext <vscale x 128 x i8> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i8_nxv128i128 = zext <vscale x 128 x i8> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %nxv128i16_nxv128i32 = zext <vscale x 128 x i16> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i16_nxv128i128 = zext <vscale x 128 x i16> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i32_nxv128i128 = zext <vscale x 128 x i32> undef to <vscale x 128 x i128>
-; RV64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
-; RV64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
-; RV64-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
+; RV64-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %nxv128i1_nxv128i8 = zext <vscale x 128 x i1> undef to <vscale x 128 x i8>
+; RV64-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %nxv128i1_nxv128i16 = zext <vscale x 128 x i1> undef to <vscale x 128 x i16>
+; RV64-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %nxv128i1_nxv128i32 = zext <vscale x 128 x i1> undef to <vscale x 128 x i32>
; RV64-NEXT: Cost Model: Invalid cost for instruction: %nxv128i1_nxv128i128 = zext <vscale x 128 x i1> undef to <vscale x 128 x i128>
; RV64-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
index 1618c3833a97..f91f13b2d9ec 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fmaximum.ll
@@ -6,23 +6,37 @@
define float @reduce_fmaximum_f32(float %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %4 = call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %5 = call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float undef
;
; SIZE-LABEL: 'reduce_fmaximum_f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float undef
;
%V2 = call float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
@@ -32,6 +46,13 @@ define float @reduce_fmaximum_f32(float %arg) {
%V32 = call float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
%V64 = call float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
%V128 = call float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v2f32(<2 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v16f32(<16 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v32f32(<32 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v64f32(<64 x float> undef)
+call fast float @llvm.vector.reduce.fmaximum.v128f32(<128 x float> undef)
ret float undef
}
declare float @llvm.vector.reduce.fmaximum.v2f32(<2 x float>)
@@ -44,21 +65,33 @@ declare float @llvm.vector.reduce.fmaximum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %4 = call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %5 = call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %6 = call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double undef
;
; SIZE-LABEL: 'reduce_fmaximum_f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double undef
;
%V2 = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
@@ -67,6 +100,12 @@ define double @reduce_fmaximum_f64(double %arg) {
%V16 = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
%V32 = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
%V64 = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> undef)
+call fast double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> undef)
ret double undef
}
declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
index 35b18645b1f2..86b84025ad54 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-fminimum.ll
@@ -6,23 +6,23 @@
define float @reduce_fmaximum_f32(float %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret float undef
;
; SIZE-LABEL: 'reduce_fmaximum_f32'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call float @llvm.vector.reduce.fminimum.v16f32(<16 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call float @llvm.vector.reduce.fminimum.v32f32(<32 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call float @llvm.vector.reduce.fminimum.v64f32(<64 x float> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V128 = call float @llvm.vector.reduce.fminimum.v128f32(<128 x float> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret float undef
;
%V2 = call float @llvm.vector.reduce.fminimum.v2f32(<2 x float> undef)
@@ -44,21 +44,21 @@ declare float @llvm.vector.reduce.fminimum.v128f32(<128 x float>)
define double @reduce_fmaximum_f64(double %arg) {
; CHECK-LABEL: 'reduce_fmaximum_f64'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 17 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret double undef
;
; SIZE-LABEL: 'reduce_fmaximum_f64'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4 = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8 = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16 = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V32 = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> undef)
+; SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V64 = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> undef)
; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret double undef
;
%V2 = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> undef)
diff --git a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
index 80efe912c869..30cb32ce4eaf 100644
--- a/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/reduce-scalable-int.ll
@@ -1141,7 +1141,7 @@ define signext i32 @vreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
define signext i32 @vwreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv4i16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 4 x i16> %v to <vscale x 4 x i32>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = sext <vscale x 4 x i16> %v to <vscale x 4 x i32>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
@@ -1157,7 +1157,7 @@ define signext i32 @vwreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
define signext i32 @vwreduce_uadd_nxv4i16(<vscale x 4 x i16> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv4i16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 4 x i16> %v to <vscale x 4 x i32>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = zext <vscale x 4 x i16> %v to <vscale x 4 x i32>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 %red
;
@@ -1445,7 +1445,7 @@ define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv2i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1461,7 +1461,7 @@ define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv2i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1597,7 +1597,7 @@ define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vwreduce_add_nxv4i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
@@ -1613,7 +1613,7 @@ define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
; CHECK-LABEL: 'vwreduce_uadd_nxv4i32'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i64 %red
;
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
index 225bad6da591..aa7a90bece33 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-extractelement.ll
@@ -12,12 +12,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -66,12 +66,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -120,12 +120,12 @@ define void @extractelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -177,12 +177,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -231,12 +231,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -285,12 +285,12 @@ define void @extractelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -341,13 +341,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i1_0 = extractelement <2 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -395,13 +395,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_1 = extractelement <2 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -449,13 +449,13 @@ define void @extractelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_x = extractelement <2 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
@@ -506,13 +506,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i1_0 = extractelement <2 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i1_0 = extractelement <4 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i1_0 = extractelement <8 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = extractelement <16 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_0 = extractelement <32 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv2i1_0 = extractelement <vscale x 2 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv4i1_0 = extractelement <vscale x 4 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv8i1_0 = extractelement <vscale x 8 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = extractelement <vscale x 16 x i1> undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv32i1_0 = extractelement <vscale x 32 x i1> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = extractelement <2 x i8> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = extractelement <4 x i8> undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = extractelement <8 x i8> undef, i32 0
@@ -560,13 +560,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_1 = extractelement <2 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_1 = extractelement <4 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_1 = extractelement <8 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = extractelement <16 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_1 = extractelement <32 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_1 = extractelement <vscale x 2 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_1 = extractelement <vscale x 4 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_1 = extractelement <vscale x 8 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = extractelement <vscale x 16 x i1> undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_1 = extractelement <vscale x 32 x i1> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = extractelement <2 x i8> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = extractelement <4 x i8> undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = extractelement <8 x i8> undef, i32 1
@@ -614,13 +614,13 @@ define void @extractelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i1_x = extractelement <2 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i1_x = extractelement <4 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i1_x = extractelement <8 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_x = extractelement <16 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v32i1_x = extractelement <32 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv2i1_x = extractelement <vscale x 2 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv4i1_x = extractelement <vscale x 4 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv8i1_x = extractelement <vscale x 8 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_x = extractelement <vscale x 16 x i1> undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %nxv32i1_x = extractelement <vscale x 32 x i1> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_x = extractelement <2 x i8> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_x = extractelement <4 x i8> undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_x = extractelement <8 x i8> undef, i32 %x
diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
index 5387c8dc3594..6e1ae0216f76 100644
--- a/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/rvv-insertelement.ll
@@ -12,12 +12,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV32V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV32V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV32V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -66,12 +66,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV32V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV32V-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV32V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -120,12 +120,12 @@ define void @insertelement_int(i32 %x) {
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV32V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV32V-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV32V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -177,12 +177,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV64V-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV64V-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV64V-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -231,12 +231,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV64V-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV64V-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV64V-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -285,12 +285,12 @@ define void @insertelement_int(i32 %x) {
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV64V-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV64V-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV64V-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -341,13 +341,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v2i1_0 = insertelement <2 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -395,13 +395,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2i1_1 = insertelement <2 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -449,13 +449,13 @@ define void @insertelement_int(i32 %x) {
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v2i1_x = insertelement <2 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV32ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
@@ -506,13 +506,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v2i1_0 = insertelement <2 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v4i1_0 = insertelement <4 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8i1_0 = insertelement <8 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_0 = insertelement <16 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v32i1_0 = insertelement <32 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv2i1_0 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv4i1_0 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv8i1_0 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_0 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 0
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %nxv32i1_0 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i8_0 = insertelement <2 x i8> undef, i8 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i8_0 = insertelement <4 x i8> undef, i8 undef, i32 0
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i8_0 = insertelement <8 x i8> undef, i8 undef, i32 0
@@ -560,13 +560,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2i1_1 = insertelement <2 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i1_1 = insertelement <4 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i1_1 = insertelement <8 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i1_1 = insertelement <16 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v32i1_1 = insertelement <32 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv2i1_1 = insertelement <vscale x 2 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv4i1_1 = insertelement <vscale x 4 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv8i1_1 = insertelement <vscale x 8 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %nxv16i1_1 = insertelement <vscale x 16 x i1> undef, i1 undef, i32 1
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %nxv32i1_1 = insertelement <vscale x 32 x i1> undef, i1 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i8_1 = insertelement <2 x i8> undef, i8 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i8_1 = insertelement <4 x i8> undef, i8 undef, i32 1
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8i8_1 = insertelement <8 x i8> undef, i8 undef, i32 1
@@ -614,13 +614,13 @@ define void @insertelement_int(i32 %x) {
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v2i1_x = insertelement <2 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v4i1_x = insertelement <4 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i1_x = insertelement <8 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16i1_x = insertelement <16 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %v32i1_x = insertelement <32 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv2i1_x = insertelement <vscale x 2 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv4i1_x = insertelement <vscale x 4 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv8i1_x = insertelement <vscale x 8 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
-; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %nxv16i1_x = insertelement <vscale x 16 x i1> undef, i1 undef, i32 %x
+; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %nxv32i1_x = insertelement <vscale x 32 x i1> undef, i1 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2i8_x = insertelement <2 x i8> undef, i8 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i8_x = insertelement <4 x i8> undef, i8 undef, i32 %x
; RV64ZVE64X-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v8i8_x = insertelement <8 x i8> undef, i8 undef, i32 %x
diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
index 46bf3152ac5b..b763198e98ba 100644
--- a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll
@@ -197,7 +197,7 @@ define void @broadcast_fixed() #0{
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %41 = shufflevector <32 x i1> undef, <32 x i1> undef, <32 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %42 = shufflevector <64 x i1> undef, <64 x i1> undef, <64 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %43 = shufflevector <128 x i1> undef, <128 x i1> undef, <128 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %ins1 = insertelement <128 x i1> poison, i1 poison, i32 0
+; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %ins1 = insertelement <128 x i1> poison, i1 poison, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %44 = shufflevector <128 x i1> %ins1, <128 x i1> poison, <128 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ins2 = insertelement <2 x i8> poison, i8 3, i32 0
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %45 = shufflevector <2 x i8> %ins2, <2 x i8> undef, <2 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index 333ec6dcaf48..26c85e83b53a 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -197,78 +197,38 @@ bb:
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b64_v2i32(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1) %gep)
+ %tmp0 = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1) %addr)
store <2 x i32> %tmp0, ptr addrspace(1) %out, align 8
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b128_v8i16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1) %gep)
+ %tmp0 = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1) %addr)
store <8 x i16> %tmp0, ptr addrspace(1) %out, align 16
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v8f16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1) %gep)
- store <8 x half> %tmp0, ptr addrspace(1) %out, align 16
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v8bf16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1) %gep)
- store <8 x bfloat> %tmp0, ptr addrspace(1) %out, align 16
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1) %gep)
+; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1) %addr)
define amdgpu_kernel void @global_load_tr_b64_i32(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1) %gep)
+ %tmp0 = call i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1) %addr)
store i32 %tmp0, ptr addrspace(1) %out, align 4
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4i16_(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
+; CHECK: DIVERGENT: %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1) %addr)
+define amdgpu_kernel void @global_load_tr_b128_v4i16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1) %gep)
+ %tmp0 = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1) %addr)
store <4 x i16> %tmp0, ptr addrspace(1) %out, align 8
ret void
}
-; CHECK: DIVERGENT: %tmp0 = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4f16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1) %gep)
- store <4 x half> %tmp0, ptr addrspace(1) %out, align 8
- ret void
-}
-
-; CHECK: DIVERGENT: %tmp0 = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1) %gep)
-define amdgpu_kernel void @global_load_tr_b128_v4bf16(ptr addrspace(1) %addr, ptr addrspace(1) %out) {
-bb:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %tmp0 = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1) %gep)
- store <4 x bfloat> %tmp0, ptr addrspace(1) %out, align 8
- ret void
-}
-
declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
declare i32 @llvm.amdgcn.permlane16(i32, i32, i32, i32, i1, i1) #1
declare i32 @llvm.amdgcn.permlanex16(i32, i32, i32, i32, i1, i1) #1
@@ -296,14 +256,10 @@ declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.fp8.bf8(<2 x i32>, <4 x i32
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf8.fp8(<2 x i32>, <4 x i32>, <8 x float>, i16)
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf8.bf8(<2 x i32>, <4 x i32>, <8 x float>, i16)
-declare <2 x i32> @llvm.amdgcn.global.load.tr.v2i32(ptr addrspace(1))
-declare <8 x i16> @llvm.amdgcn.global.load.tr.v8i16(ptr addrspace(1))
-declare <8 x half> @llvm.amdgcn.global.load.tr.v8f16(ptr addrspace(1))
-declare <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16(ptr addrspace(1))
-declare i32 @llvm.amdgcn.global.load.tr.i32(ptr addrspace(1))
-declare <4 x i16> @llvm.amdgcn.global.load.tr.v4i16(ptr addrspace(1))
-declare <4 x half> @llvm.amdgcn.global.load.tr.v4f16(ptr addrspace(1))
-declare <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16(ptr addrspace(1))
+declare <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1))
+declare <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1))
+declare i32 @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1))
+declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1))
attributes #0 = { nounwind convergent }
attributes #1 = { nounwind readnone convergent }
diff --git a/llvm/test/Assembler/flags.ll b/llvm/test/Assembler/flags.ll
index 04bddd02f50c..d75b0cb0ea82 100644
--- a/llvm/test/Assembler/flags.ll
+++ b/llvm/test/Assembler/flags.ll
@@ -261,3 +261,51 @@ define i64 @test_or(i64 %a, i64 %b) {
%res = or disjoint i64 %a, %b
ret i64 %res
}
+
+define i32 @test_trunc_signed(i64 %a) {
+; CHECK: %res = trunc nsw i64 %a to i32
+ %res = trunc nsw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_unsigned(i64 %a) {
+; CHECK: %res = trunc nuw i64 %a to i32
+ %res = trunc nuw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_both(i64 %a) {
+; CHECK: %res = trunc nuw nsw i64 %a to i32
+ %res = trunc nuw nsw i64 %a to i32
+ ret i32 %res
+}
+
+define i32 @test_trunc_both_reversed(i64 %a) {
+; CHECK: %res = trunc nuw nsw i64 %a to i32
+ %res = trunc nsw nuw i64 %a to i32
+ ret i32 %res
+}
+
+define <2 x i32> @test_trunc_signed_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nsw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_unsigned_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw <2 x i64> %a to <2 x i32>
+ %res = trunc nuw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_both_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
+
+define <2 x i32> @test_trunc_both_reversed_vector(<2 x i64> %a) {
+; CHECK: %res = trunc nuw nsw <2 x i64> %a to <2 x i32>
+ %res = trunc nsw nuw <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %res
+}
diff --git a/llvm/test/Bindings/OCaml/core.ml b/llvm/test/Bindings/OCaml/core.ml
index a9abc9d17fe4..64bfa8ee412d 100644
--- a/llvm/test/Bindings/OCaml/core.ml
+++ b/llvm/test/Bindings/OCaml/core.ml
@@ -252,7 +252,7 @@ let test_constants () =
group "constant arithmetic";
(* CHECK: @const_neg = global i64 sub
* CHECK: @const_nsw_neg = global i64 sub nsw
- * CHECK: @const_nuw_neg = global i64 sub nuw
+ * CHECK: @const_nuw_neg = global i64 sub
* CHECK: @const_not = global i64 xor
* CHECK: @const_add = global i64 add
* CHECK: @const_nsw_add = global i64 add nsw
diff --git a/llvm/test/Bindings/OCaml/debuginfo.ml b/llvm/test/Bindings/OCaml/debuginfo.ml
index d469d4715b00..f95800dfcb02 100644
--- a/llvm/test/Bindings/OCaml/debuginfo.ml
+++ b/llvm/test/Bindings/OCaml/debuginfo.ml
@@ -39,6 +39,8 @@ let prepare_target llmod =
let new_module () =
let m = Llvm.create_module context module_name in
let () = prepare_target m in
+ let () = Llvm_debuginfo.set_is_new_dbg_info_format m true in
+ insist (Llvm_debuginfo.is_new_dbg_info_format m);
m
let test_get_module () =
@@ -285,8 +287,8 @@ let test_variables f dibuilder file_di fun_di =
~var_info:auto_var ~expr:(Llvm_debuginfo.dibuild_expression dibuilder [||])
~location ~instr:entry_term
in
- let () = Printf.printf "%s\n" (Llvm.string_of_llvalue vdi) in
- (* CHECK: call void @llvm.dbg.declare(metadata ptr %my_alloca, metadata {{![0-9]+}}, metadata !DIExpression()), !dbg {{\![0-9]+}}
+ let () = Printf.printf "%s\n" (Llvm.string_of_lldbgrecord vdi) in
+ (* CHECK: dbg_declare(ptr %my_alloca, ![[#]], !DIExpression(), ![[#]])
*)
let arg0 = (Llvm.params f).(0) in
let arg_var = Llvm_debuginfo.dibuild_create_parameter_variable dibuilder ~scope:fun_di
@@ -297,8 +299,8 @@ let test_variables f dibuilder file_di fun_di =
~var_info:arg_var ~expr:(Llvm_debuginfo.dibuild_expression dibuilder [||])
~location ~instr:entry_term
in
- let () = Printf.printf "%s\n" (Llvm.string_of_llvalue argdi) in
- (* CHECK: call void @llvm.dbg.declare(metadata i32 %0, metadata {{![0-9]+}}, metadata !DIExpression()), !dbg {{\![0-9]+}}
+ let () = Printf.printf "%s\n" (Llvm.string_of_lldbgrecord argdi) in
+ (* CHECK: dbg_declare(i32 %0, ![[#]], !DIExpression(), ![[#]])
*)
()
diff --git a/llvm/test/Bitcode/compatibility-3.6.ll b/llvm/test/Bitcode/compatibility-3.6.ll
index b1f4abf7b8c5..2190e2fbccf2 100644
--- a/llvm/test/Bitcode/compatibility-3.6.ll
+++ b/llvm/test/Bitcode/compatibility-3.6.ll
@@ -1061,16 +1061,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1178,11 +1178,11 @@ define void @intrinsics.codegen() {
; CHECK: attributes #27 = { uwtable }
; CHECK: attributes #28 = { "cpu"="cortex-a8" }
; CHECK: attributes #29 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #30 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #31 = { nounwind memory(argmem: read) }
-; CHECK: attributes #32 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #33 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #34 = { nocallback nounwind }
+; CHECK: attributes #30 = { nounwind memory(argmem: read) }
+; CHECK: attributes #31 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #32 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #33 = { nocallback nounwind }
+; CHECK: attributes #34 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #36 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.7.ll b/llvm/test/Bitcode/compatibility-3.7.ll
index 91e55f6eda59..7e59b5c1be6e 100644
--- a/llvm/test/Bitcode/compatibility-3.7.ll
+++ b/llvm/test/Bitcode/compatibility-3.7.ll
@@ -1092,16 +1092,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1241,11 +1241,11 @@ define void @misc.metadata() {
; CHECK: attributes #30 = { uwtable }
; CHECK: attributes #31 = { "cpu"="cortex-a8" }
; CHECK: attributes #32 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #33 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #34 = { nounwind memory(argmem: read) }
-; CHECK: attributes #35 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #37 = { nocallback nounwind }
+; CHECK: attributes #33 = { nounwind memory(argmem: read) }
+; CHECK: attributes #34 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #36 = { nocallback nounwind }
+; CHECK: attributes #37 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #39 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.8.ll b/llvm/test/Bitcode/compatibility-3.8.ll
index aa4d8b14968c..ebd1f2fff8c9 100644
--- a/llvm/test/Bitcode/compatibility-3.8.ll
+++ b/llvm/test/Bitcode/compatibility-3.8.ll
@@ -1247,16 +1247,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1551,11 +1551,11 @@ normal:
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #42 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-3.9.ll b/llvm/test/Bitcode/compatibility-3.9.ll
index e3c84f6e6007..c34f04ceb0de 100644
--- a/llvm/test/Bitcode/compatibility-3.9.ll
+++ b/llvm/test/Bitcode/compatibility-3.9.ll
@@ -1318,16 +1318,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1624,11 +1624,11 @@ declare void @f.writeonly() writeonly
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #43 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-4.0.ll b/llvm/test/Bitcode/compatibility-4.0.ll
index 06cb842059a4..05bffda1d117 100644
--- a/llvm/test/Bitcode/compatibility-4.0.ll
+++ b/llvm/test/Bitcode/compatibility-4.0.ll
@@ -1318,16 +1318,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1649,11 +1649,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #43 = { builtin }
diff --git a/llvm/test/Bitcode/compatibility-5.0.ll b/llvm/test/Bitcode/compatibility-5.0.ll
index f9ae558917cd..0c872289c62b 100644
--- a/llvm/test/Bitcode/compatibility-5.0.ll
+++ b/llvm/test/Bitcode/compatibility-5.0.ll
@@ -1330,16 +1330,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1664,11 +1664,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { speculatable }
; CHECK: attributes #43 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/compatibility-6.0.ll b/llvm/test/Bitcode/compatibility-6.0.ll
index 1458e1b639ad..44c680885be3 100644
--- a/llvm/test/Bitcode/compatibility-6.0.ll
+++ b/llvm/test/Bitcode/compatibility-6.0.ll
@@ -1340,16 +1340,16 @@ define void @instructions.va_arg(i8* %v, ...) {
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- ; CHECK: call void @llvm.va_start(ptr %ap2)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap2)
va_arg i8* %ap2, i32
; CHECK: va_arg ptr %ap2, i32
call void @llvm.va_copy(i8* %v, i8* %ap2)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap2)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap2)
call void @llvm.va_end(i8* %ap2)
- ; CHECK: call void @llvm.va_end(ptr %ap2)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap2)
ret void
}
@@ -1674,11 +1674,11 @@ define i8** @constexpr() {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #41 = { memory(write) }
; CHECK: attributes #42 = { speculatable }
; CHECK: attributes #43 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll
index fa8b0520567a..b374924516d6 100644
--- a/llvm/test/Bitcode/compatibility.ll
+++ b/llvm/test/Bitcode/compatibility.ll
@@ -1648,16 +1648,16 @@ define void @instructions.va_arg(ptr %v, ...) {
%ap = alloca ptr
call void @llvm.va_start(ptr %ap)
- ; CHECK: call void @llvm.va_start(ptr %ap)
+ ; CHECK: call void @llvm.va_start.p0(ptr %ap)
va_arg ptr %ap, i32
; CHECK: va_arg ptr %ap, i32
call void @llvm.va_copy(ptr %v, ptr %ap)
- ; CHECK: call void @llvm.va_copy(ptr %v, ptr %ap)
+ ; CHECK: call void @llvm.va_copy.p0(ptr %v, ptr %ap)
call void @llvm.va_end(ptr %ap)
- ; CHECK: call void @llvm.va_end(ptr %ap)
+ ; CHECK: call void @llvm.va_end.p0(ptr %ap)
ret void
}
@@ -2091,12 +2091,12 @@ define float @nofpclass_callsites(float %arg) {
; CHECK: attributes #33 = { memory(inaccessiblemem: readwrite) }
; CHECK: attributes #34 = { memory(argmem: readwrite, inaccessiblemem: readwrite) }
; CHECK: attributes #35 = { nocallback nofree nosync nounwind willreturn memory(none) }
-; CHECK: attributes #36 = { nocallback nofree nosync nounwind willreturn }
-; CHECK: attributes #37 = { nounwind memory(argmem: read) }
-; CHECK: attributes #38 = { nounwind memory(argmem: readwrite) }
-; CHECK: attributes #39 = { nocallback nofree nosync nounwind willreturn memory(read) }
-; CHECK: attributes #40 = { nocallback nounwind }
-; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #36 = { nounwind memory(argmem: read) }
+; CHECK: attributes #37 = { nounwind memory(argmem: readwrite) }
+; CHECK: attributes #38 = { nocallback nofree nosync nounwind willreturn memory(read) }
+; CHECK: attributes #39 = { nocallback nounwind }
+; CHECK: attributes #40 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite, inaccessiblemem: readwrite) }
+; CHECK: attributes #41 = { nocallback nofree nosync nounwind willreturn }
; CHECK: attributes #42 = { memory(write) }
; CHECK: attributes #43 = { speculatable }
; CHECK: attributes #44 = { strictfp }
diff --git a/llvm/test/Bitcode/flags.ll b/llvm/test/Bitcode/flags.ll
index e3fc827d865d..96995ec570c9 100644
--- a/llvm/test/Bitcode/flags.ll
+++ b/llvm/test/Bitcode/flags.ll
@@ -20,17 +20,34 @@ second: ; preds = %first
%ll = zext i32 %s to i64
%jj = or disjoint i32 %a, 0
%oo = or i32 %a, 0
+ %tu = trunc nuw i32 %a to i16
+ %ts = trunc nsw i32 %a to i16
+ %tus = trunc nuw nsw i32 %a to i16
+ %t = trunc i32 %a to i16
+ %tuv = trunc nuw <2 x i32> %aa to <2 x i16>
+ %tsv = trunc nsw <2 x i32> %aa to <2 x i16>
+ %tusv = trunc nuw nsw <2 x i32> %aa to <2 x i16>
+ %tv = trunc <2 x i32> %aa to <2 x i16>
unreachable
-first: ; preds = %entry
- %a = bitcast i32 0 to i32 ; <i32> [#uses=8]
- %uu = add nuw i32 %a, 0 ; <i32> [#uses=0]
- %ss = add nsw i32 %a, 0 ; <i32> [#uses=0]
- %uuss = add nuw nsw i32 %a, 0 ; <i32> [#uses=0]
- %zz = add i32 %a, 0 ; <i32> [#uses=0]
+first: ; preds = %entry
+ %aa = bitcast <2 x i32> <i32 0, i32 0> to <2 x i32>
+ %a = bitcast i32 0 to i32 ; <i32> [#uses=8]
+ %uu = add nuw i32 %a, 0 ; <i32> [#uses=0]
+ %ss = add nsw i32 %a, 0 ; <i32> [#uses=0]
+ %uuss = add nuw nsw i32 %a, 0 ; <i32> [#uses=0]
+ %zz = add i32 %a, 0 ; <i32> [#uses=0]
%kk = zext nneg i32 %a to i64
%rr = zext i32 %ss to i64
%mm = or disjoint i32 %a, 0
%nn = or i32 %a, 0
+ %tuu = trunc nuw i32 %a to i16
+ %tss = trunc nsw i32 %a to i16
+ %tuss = trunc nuw nsw i32 %a to i16
+ %tt = trunc i32 %a to i16
+ %ttuv = trunc nuw <2 x i32> %aa to <2 x i16>
+ %ttsv = trunc nsw <2 x i32> %aa to <2 x i16>
+ %ttusv = trunc nuw nsw <2 x i32> %aa to <2 x i16>
+ %ttv = trunc <2 x i32> %aa to <2 x i16>
br label %second
}
diff --git a/llvm/test/Bitcode/thinlto-function-summary.ll b/llvm/test/Bitcode/thinlto-function-summary.ll
index 799759ebcac1..13c6611843d6 100644
--- a/llvm/test/Bitcode/thinlto-function-summary.ll
+++ b/llvm/test/Bitcode/thinlto-function-summary.ll
@@ -13,9 +13,9 @@
; "variadic"
; BC-NEXT: <FUNCTION op0=46 op1=8
; "llvm.va_start"
-; BC-NEXT: <FUNCTION op0=54 op1=13
+; BC-NEXT: <FUNCTION op0=54 op1=16
; "f"
-; BC-NEXT: <ALIAS op0=67 op1=1
+; BC-NEXT: <ALIAS op0=70 op1=1
; BC: <GLOBALVAL_SUMMARY_BLOCK
; BC-NEXT: <VERSION
; BC-NEXT: <FLAGS
@@ -26,7 +26,7 @@
; BC-NEXT: <ALIAS {{.*}} op0=6 op1=0 op2=3
; BC-NEXT: </GLOBALVAL_SUMMARY_BLOCK
; BC: <STRTAB_BLOCK
-; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicllvm.va_startf{{.*}}'
+; BC-NEXT: blob data = 'hfoobaranon.{{................................}}.0variadicllvm.va_start.p{{[0-9]+}}f{{.*}}'
; RUN: opt -passes=name-anon-globals -module-summary < %s | llvm-dis | FileCheck %s
diff --git a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
index fad7b8ea6a58..fd3f500de791 100644
--- a/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
+++ b/llvm/test/Bitcode/variableArgumentIntrinsic.3.2.ll
@@ -10,7 +10,7 @@ define i32 @varArgIntrinsic(i32 %X, ...) {
%ap = alloca i8*
%ap2 = bitcast i8** %ap to i8*
-; CHECK: call void @llvm.va_start(ptr %ap2)
+; CHECK: call void @llvm.va_start.p0(ptr %ap2)
call void @llvm.va_start(i8* %ap2)
; CHECK-NEXT: %tmp = va_arg ptr %ap, i32
@@ -19,12 +19,12 @@ define i32 @varArgIntrinsic(i32 %X, ...) {
%aq = alloca i8*
%aq2 = bitcast i8** %aq to i8*
-; CHECK: call void @llvm.va_copy(ptr %aq2, ptr %ap2)
+; CHECK: call void @llvm.va_copy.p0(ptr %aq2, ptr %ap2)
call void @llvm.va_copy(i8* %aq2, i8* %ap2)
-; CHECK-NEXT: call void @llvm.va_end(ptr %aq2)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr %aq2)
call void @llvm.va_end(i8* %aq2)
-; CHECK-NEXT: call void @llvm.va_end(ptr %ap2)
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr %ap2)
call void @llvm.va_end(i8* %ap2)
ret i32 %tmp
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 458c2cb76d9e..7163da0dc024 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -512,9 +512,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB6_2
; CHECK-NOLSE-O0-NEXT: LBB6_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB6_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-NOLSE-O0-NEXT: b LBB6_5
@@ -540,9 +540,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn w1, w8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-OUTLINE-O0-NEXT: b LBB6_2
@@ -582,9 +582,9 @@ define i32 @fetch_and_nand(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn w10, w9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casl w9, w10, [x11]
-; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs w8, w9, w8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB6_1
; CHECK-LSE-O0-NEXT: b LBB6_2
@@ -649,9 +649,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB7_2
; CHECK-NOLSE-O0-NEXT: LBB7_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB7_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-NOLSE-O0-NEXT: b LBB7_5
@@ -677,9 +677,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-OUTLINE-O0-NEXT: mvn x1, x8
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #8] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #24] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-OUTLINE-O0-NEXT: b LBB7_2
@@ -719,9 +719,9 @@ define i64 @fetch_and_nand_64(ptr %p) #0 {
; CHECK-LSE-O0-NEXT: mvn x10, x9
; CHECK-LSE-O0-NEXT: mov x9, x8
; CHECK-LSE-O0-NEXT: casal x9, x10, [x11]
-; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: subs x8, x9, x8
; CHECK-LSE-O0-NEXT: cset w8, eq
+; CHECK-LSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-LSE-O0-NEXT: tbz w8, #0, LBB7_1
; CHECK-LSE-O0-NEXT: b LBB7_2
@@ -782,9 +782,9 @@ define i32 @fetch_and_or(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB8_2
; CHECK-NOLSE-O0-NEXT: LBB8_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB8_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB8_1
; CHECK-NOLSE-O0-NEXT: b LBB8_5
@@ -855,9 +855,9 @@ define i64 @fetch_and_or_64(ptr %p) #0 {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB9_2
; CHECK-NOLSE-O0-NEXT: LBB9_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB9_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp, #8] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB9_1
; CHECK-NOLSE-O0-NEXT: b LBB9_5
@@ -4005,9 +4005,9 @@ define i32 @atomicrmw_add_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB47_2
; CHECK-NOLSE-O0-NEXT: LBB47_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB47_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB47_1
; CHECK-NOLSE-O0-NEXT: b LBB47_5
@@ -4097,9 +4097,9 @@ define i32 @atomicrmw_xchg_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB48_2
; CHECK-NOLSE-O0-NEXT: LBB48_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB48_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB48_1
; CHECK-NOLSE-O0-NEXT: b LBB48_5
@@ -4190,9 +4190,9 @@ define i32 @atomicrmw_sub_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB49_2
; CHECK-NOLSE-O0-NEXT: LBB49_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB49_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB49_1
; CHECK-NOLSE-O0-NEXT: b LBB49_5
@@ -4287,9 +4287,9 @@ define i32 @atomicrmw_and_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB50_2
; CHECK-NOLSE-O0-NEXT: LBB50_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB50_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB50_1
; CHECK-NOLSE-O0-NEXT: b LBB50_5
@@ -4384,9 +4384,9 @@ define i32 @atomicrmw_or_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB51_2
; CHECK-NOLSE-O0-NEXT: LBB51_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB51_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB51_1
; CHECK-NOLSE-O0-NEXT: b LBB51_5
@@ -4477,9 +4477,9 @@ define i32 @atomicrmw_xor_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB52_2
; CHECK-NOLSE-O0-NEXT: LBB52_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB52_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB52_1
; CHECK-NOLSE-O0-NEXT: b LBB52_5
@@ -4572,9 +4572,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB53_2
; CHECK-NOLSE-O0-NEXT: LBB53_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB53_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-NOLSE-O0-NEXT: b LBB53_5
@@ -4605,9 +4605,9 @@ define i32 @atomicrmw_min_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB53_1
; CHECK-OUTLINE-O0-NEXT: b LBB53_2
@@ -4686,9 +4686,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB54_2
; CHECK-NOLSE-O0-NEXT: LBB54_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB54_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-NOLSE-O0-NEXT: b LBB54_5
@@ -4719,9 +4719,9 @@ define i32 @atomicrmw_max_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB54_1
; CHECK-OUTLINE-O0-NEXT: b LBB54_2
@@ -4800,9 +4800,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB55_2
; CHECK-NOLSE-O0-NEXT: LBB55_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB55_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-NOLSE-O0-NEXT: b LBB55_5
@@ -4833,9 +4833,9 @@ define i32 @atomicrmw_umin_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB55_1
; CHECK-OUTLINE-O0-NEXT: b LBB55_2
@@ -4914,9 +4914,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB56_2
; CHECK-NOLSE-O0-NEXT: LBB56_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB56_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs w8, w9, w8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str w9, [sp, #12] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str w9, [sp, #28] ; 4-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-NOLSE-O0-NEXT: b LBB56_5
@@ -4947,9 +4947,9 @@ define i32 @atomicrmw_umax_i32(ptr %ptr, i32 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel w1, w0, w8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas4_relax
; CHECK-OUTLINE-O0-NEXT: ldr w8, [sp, #8] ; 4-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs w8, w0, w8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #12] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str w0, [sp, #28] ; 4-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB56_1
; CHECK-OUTLINE-O0-NEXT: b LBB56_2
@@ -5026,9 +5026,9 @@ define i64 @atomicrmw_add_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB57_2
; CHECK-NOLSE-O0-NEXT: LBB57_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB57_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB57_1
; CHECK-NOLSE-O0-NEXT: b LBB57_5
@@ -5117,9 +5117,9 @@ define i64 @atomicrmw_xchg_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB58_2
; CHECK-NOLSE-O0-NEXT: LBB58_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB58_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB58_1
; CHECK-NOLSE-O0-NEXT: b LBB58_5
@@ -5210,9 +5210,9 @@ define i64 @atomicrmw_sub_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB59_2
; CHECK-NOLSE-O0-NEXT: LBB59_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB59_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB59_1
; CHECK-NOLSE-O0-NEXT: b LBB59_5
@@ -5307,9 +5307,9 @@ define i64 @atomicrmw_and_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB60_2
; CHECK-NOLSE-O0-NEXT: LBB60_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB60_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB60_1
; CHECK-NOLSE-O0-NEXT: b LBB60_5
@@ -5404,9 +5404,9 @@ define i64 @atomicrmw_or_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB61_2
; CHECK-NOLSE-O0-NEXT: LBB61_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB61_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB61_1
; CHECK-NOLSE-O0-NEXT: b LBB61_5
@@ -5497,9 +5497,9 @@ define i64 @atomicrmw_xor_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB62_2
; CHECK-NOLSE-O0-NEXT: LBB62_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB62_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB62_1
; CHECK-NOLSE-O0-NEXT: b LBB62_5
@@ -5592,9 +5592,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB63_2
; CHECK-NOLSE-O0-NEXT: LBB63_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB63_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-NOLSE-O0-NEXT: b LBB63_5
@@ -5625,9 +5625,9 @@ define i64 @atomicrmw_min_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, le
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB63_1
; CHECK-OUTLINE-O0-NEXT: b LBB63_2
@@ -5706,9 +5706,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB64_2
; CHECK-NOLSE-O0-NEXT: LBB64_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB64_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-NOLSE-O0-NEXT: b LBB64_5
@@ -5739,9 +5739,9 @@ define i64 @atomicrmw_max_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, gt
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB64_1
; CHECK-OUTLINE-O0-NEXT: b LBB64_2
@@ -5820,9 +5820,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB65_2
; CHECK-NOLSE-O0-NEXT: LBB65_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB65_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-NOLSE-O0-NEXT: b LBB65_5
@@ -5853,9 +5853,9 @@ define i64 @atomicrmw_umin_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, ls
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_acq_rel
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB65_1
; CHECK-OUTLINE-O0-NEXT: b LBB65_2
@@ -5934,9 +5934,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-NOLSE-O0-NEXT: cbnz w10, LBB66_2
; CHECK-NOLSE-O0-NEXT: LBB66_4: ; %atomicrmw.start
; CHECK-NOLSE-O0-NEXT: ; in Loop: Header=BB66_1 Depth=1
-; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: subs x8, x9, x8
; CHECK-NOLSE-O0-NEXT: cset w8, eq
+; CHECK-NOLSE-O0-NEXT: str x9, [sp] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: str x9, [sp, #24] ; 8-byte Folded Spill
; CHECK-NOLSE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-NOLSE-O0-NEXT: b LBB66_5
@@ -5967,9 +5967,9 @@ define i64 @atomicrmw_umax_i64(ptr %ptr, i64 %rhs) {
; CHECK-OUTLINE-O0-NEXT: csel x1, x0, x8, hi
; CHECK-OUTLINE-O0-NEXT: bl ___aarch64_cas8_relax
; CHECK-OUTLINE-O0-NEXT: ldr x8, [sp, #8] ; 8-byte Folded Reload
-; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: subs x8, x0, x8
; CHECK-OUTLINE-O0-NEXT: cset w8, eq
+; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #16] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: str x0, [sp, #40] ; 8-byte Folded Spill
; CHECK-OUTLINE-O0-NEXT: tbz w8, #0, LBB66_1
; CHECK-OUTLINE-O0-NEXT: b LBB66_2
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
index 6fced31a622d..ec66892b98fc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-overflow.mir
@@ -92,3 +92,87 @@ body: |
$w1 = COPY %o_wide
RET_ReallyLR implicit $w0
...
+---
+name: add_multiuse
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_multiuse
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: %const:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w1 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $w2 = COPY %const(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %const:_(s32) = G_CONSTANT i32 0
+ %add:_(s32), %o:_(s1) = G_SADDO %0, %const
+ %o_wide:_(s32) = G_ZEXT %o(s1)
+ $w0 = COPY %add(s32)
+ $w1 = COPY %add(s32)
+ $w2 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $w3
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: %bv1:_(<4 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32), [[COPY2]](s32), [[COPY3]](s32)
+ ; CHECK-NEXT: %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %add(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %2:_(s32), %3:_(s32), %2:_(s32), %3:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_UADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
+---
+name: add_splat_vector
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_splat_vector
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %bv0:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: %o:_(<4 x s1>) = G_BUILD_VECTOR [[C]](s1), [[C]](s1), [[C]](s1), [[C]](s1)
+ ; CHECK-NEXT: %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ ; CHECK-NEXT: $q0 = COPY %bv0(<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY %o_wide(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %2:_(s32) = COPY $w2
+ %3:_(s32) = COPY $w3
+ %const:_(s32) = G_CONSTANT i32 0
+ %bv0:_(<4 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %0:_(s32), %1:_(s32)
+ %bv1:_(<4 x s32>) = G_BUILD_VECTOR %const:_(s32), %const:_(s32), %const:_(s32), %const:_(s32)
+ %add:_(<4 x s32>), %o:_(<4 x s1>) = G_SADDO %bv0, %bv1
+ %o_wide:_(<4 x s32>) = G_ZEXT %o(<4 x s1>)
+ $q0 = COPY %add(<4 x s32>)
+ $q1 = COPY %o_wide
+ RET_ReallyLR implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index 8aea944b55c2..ceef0c49a45e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -65,22 +65,17 @@ define <8 x i16> @combine_vec_udiv_nonuniform(<8 x i16> %x) {
; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_1]
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_1]
; GISEL-NEXT: adrp x8, .LCPI1_0
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
-; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI1_0]
-; GISEL-NEXT: adrp x8, .LCPI1_4
-; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI1_4]
-; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: umull2 v3.4s, v0.8h, v2.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v2.4h
+; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI1_0]
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v3.8h
+; GISEL-NEXT: add v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: neg v1.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 23, i16 34, i16 -23, i16 56, i16 128, i16 -1, i16 -256, i16 -32768>
ret <8 x i16> %1
@@ -107,21 +102,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform2(<8 x i16> %x) {
; GISEL-NEXT: adrp x8, .LCPI2_2
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_2]
; GISEL-NEXT: adrp x8, .LCPI2_1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_1]
+; GISEL-NEXT: neg v1.8h, v1.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_1]
; GISEL-NEXT: adrp x8, .LCPI2_0
+; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
+; GISEL-NEXT: umull v0.4s, v0.4h, v1.4h
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI2_0]
; GISEL-NEXT: neg v1.8h, v1.8h
-; GISEL-NEXT: ushl v1.8h, v0.8h, v1.8h
-; GISEL-NEXT: umull2 v3.4s, v1.8h, v2.8h
-; GISEL-NEXT: umull v1.4s, v1.4h, v2.4h
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI2_0]
-; GISEL-NEXT: adrp x8, .LCPI2_3
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI2_3]
-; GISEL-NEXT: uzp2 v1.8h, v1.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: uzp2 v0.8h, v0.8h, v2.8h
+; GISEL-NEXT: ushl v0.8h, v0.8h, v1.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 -34, i16 35, i16 36, i16 -37, i16 38, i16 -39, i16 40, i16 -41>
ret <8 x i16> %1
@@ -145,21 +135,16 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) {
; GISEL-LABEL: combine_vec_udiv_nonuniform3:
; GISEL: // %bb.0:
; GISEL-NEXT: adrp x8, .LCPI3_1
-; GISEL-NEXT: movi v3.8h, #1
; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI3_1]
; GISEL-NEXT: adrp x8, .LCPI3_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
; GISEL-NEXT: uzp2 v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
-; GISEL-NEXT: usra v1.8h, v2.8h, #1
-; GISEL-NEXT: ldr q2, [x8, :lo12:.LCPI3_0]
-; GISEL-NEXT: adrp x8, .LCPI3_2
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI3_2]
-; GISEL-NEXT: neg v2.8h, v2.8h
-; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
-; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
+; GISEL-NEXT: sub v0.8h, v0.8h, v1.8h
+; GISEL-NEXT: usra v1.8h, v0.8h, #1
+; GISEL-NEXT: ldr q0, [x8, :lo12:.LCPI3_0]
+; GISEL-NEXT: neg v0.8h, v0.8h
+; GISEL-NEXT: ushl v0.8h, v1.8h, v0.8h
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %x, <i16 7, i16 23, i16 25, i16 27, i16 31, i16 47, i16 63, i16 127>
ret <8 x i16> %1
@@ -184,19 +169,19 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
;
; GISEL-LABEL: combine_vec_udiv_nonuniform4:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI4_2
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_2]
; GISEL-NEXT: adrp x8, .LCPI4_1
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI4_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_1]
; GISEL-NEXT: adrp x8, .LCPI4_0
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI4_0]
-; GISEL-NEXT: adrp x8, .LCPI4_2
; GISEL-NEXT: umull2 v2.8h, v0.16b, v1.16b
; GISEL-NEXT: umull v1.8h, v0.8b, v1.8b
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_2]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI4_0]
; GISEL-NEXT: uzp2 v1.16b, v1.16b, v2.16b
; GISEL-NEXT: neg v2.16b, v3.16b
-; GISEL-NEXT: movi v3.16b, #1
+; GISEL-NEXT: shl v3.16b, v4.16b, #7
; GISEL-NEXT: ushl v1.16b, v1.16b, v2.16b
-; GISEL-NEXT: cmeq v2.16b, v4.16b, v3.16b
+; GISEL-NEXT: sshr v2.16b, v3.16b, #7
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%div = udiv <16 x i8> %x, <i8 -64, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -232,10 +217,10 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
;
; GISEL-LABEL: pr38477:
; GISEL: // %bb.0:
+; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_3]
; GISEL-NEXT: adrp x8, .LCPI5_2
-; GISEL-NEXT: ldr q1, [x8, :lo12:.LCPI5_2]
-; GISEL-NEXT: adrp x8, .LCPI5_1
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_2]
; GISEL-NEXT: adrp x8, .LCPI5_0
; GISEL-NEXT: umull2 v2.4s, v0.8h, v1.8h
; GISEL-NEXT: umull v1.4s, v0.4h, v1.4h
@@ -243,15 +228,16 @@ define <8 x i16> @pr38477(<8 x i16> %a0) {
; GISEL-NEXT: sub v2.8h, v0.8h, v1.8h
; GISEL-NEXT: umull2 v4.4s, v2.8h, v3.8h
; GISEL-NEXT: umull v2.4s, v2.4h, v3.4h
-; GISEL-NEXT: ldr q3, [x8, :lo12:.LCPI5_0]
-; GISEL-NEXT: adrp x8, .LCPI5_3
+; GISEL-NEXT: ldr d3, [x8, :lo12:.LCPI5_0]
+; GISEL-NEXT: adrp x8, .LCPI5_1
+; GISEL-NEXT: ushll v3.8h, v3.8b, #0
; GISEL-NEXT: uzp2 v2.8h, v2.8h, v4.8h
-; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_3]
+; GISEL-NEXT: ldr q4, [x8, :lo12:.LCPI5_1]
+; GISEL-NEXT: shl v3.8h, v3.8h, #15
; GISEL-NEXT: add v1.8h, v2.8h, v1.8h
-; GISEL-NEXT: neg v2.8h, v3.8h
-; GISEL-NEXT: movi v3.8h, #1
+; GISEL-NEXT: neg v2.8h, v4.8h
; GISEL-NEXT: ushl v1.8h, v1.8h, v2.8h
-; GISEL-NEXT: cmeq v2.8h, v4.8h, v3.8h
+; GISEL-NEXT: sshr v2.8h, v3.8h, #15
; GISEL-NEXT: bif v0.16b, v1.16b, v2.16b
; GISEL-NEXT: ret
%1 = udiv <8 x i16> %a0, <i16 1, i16 119, i16 73, i16 -111, i16 -3, i16 118, i16 32, i16 31>
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
index ee33b9c50cbe..02233b9f498b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
@@ -6,7 +6,9 @@ body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: udiv_by_scalar_const
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 818089009
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
@@ -68,44 +70,32 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 34
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -23
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 56
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 128
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -256
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
- ; CHECK-NEXT: [[C21:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C15]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16), [[C17]](s16), [[C18]](s16), [[C20]](s16), [[C21]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C7]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C10]](s16), [[C12]](s16), [[C14]](s16), [[C8]](s16), [[C8]](s16), [[C19]](s16), [[C19]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3855
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 8195
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 512
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32767
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32639
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C8]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C2]](s16), [[C4]](s16), [[C6]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C13]](s16), [[C14]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C7]](s16), [[C1]](s16), [[C1]](s16), [[C12]](s16), [[C12]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR4]](<8 x s16>)
- ; CHECK-NEXT: [[C22:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16), [[C22]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR5]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 23
@@ -136,38 +126,26 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -34
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 35
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 36
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -37
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 38
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -39
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 40
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -41
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C12]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C11]](s16), [[C13]](s16), [[C13]](s16), [[C16]](s16), [[C13]](s16), [[C11]](s16), [[C13]](s16), [[C16]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR1]](<8 x s16>)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR2]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 16393
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 13
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -5617
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -7281
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32749
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 15
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -10347
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 8197
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 -13107
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32747
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C4]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C3]](s16), [[C5]](s16), [[C5]](s16), [[C8]](s16), [[C5]](s16), [[C3]](s16), [[C5]](s16), [[C8]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<8 x s16>)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[LSHR]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 -34
@@ -198,39 +176,28 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 7
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 23
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 27
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 47
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 63
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 127
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C12]](s16), [[C13]](s16), [[C14]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C9]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C11]](s16), [[C16]](s16), [[C16]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9363
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 25645
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 18351
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 12137
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 23705
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 5
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 1041
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 517
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C3]](s16), [[C8]](s16), [[C8]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR3]](<8 x s16>)
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[SUB]], [[BUILD_VECTOR2]](<8 x s16>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[LSHR]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR1]]
- ; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR1]](<8 x s16>)
+ ; CHECK-NEXT: $q0 = COPY [[LSHR1]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
%2:_(s16) = G_CONSTANT i16 7
@@ -261,19 +228,17 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -64
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C3]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C4]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR2]](<16 x s8>)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<16 x s8>), [[BUILD_VECTOR3]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[ICMP]](<16 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 -85
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<16 x s8>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<16 x s8>) = G_LSHR [[UMULH]], [[BUILD_VECTOR1]](<16 x s8>)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s1>) = G_BUILD_VECTOR [[C3]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1), [[C4]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<16 x s8>) = G_SELECT [[BUILD_VECTOR2]](<16 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<16 x s8>) = COPY $q0
@@ -299,39 +264,31 @@ body: |
; CHECK: liveins: $q0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 119
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 73
- ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -111
- ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -3
- ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 118
- ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 32
- ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 31
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C2]](s16), [[C3]](s16), [[C4]](s16), [[C5]](s16), [[C6]](s16), [[C7]](s16)
- ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
- ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
- ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
- ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
- ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
- ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
- ; CHECK-NEXT: [[C15:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
- ; CHECK-NEXT: [[C16:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
- ; CHECK-NEXT: [[C17:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
- ; CHECK-NEXT: [[C18:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
- ; CHECK-NEXT: [[C19:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
- ; CHECK-NEXT: [[C20:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C9]](s16), [[C12]](s16), [[C13]](s16), [[C15]](s16), [[C17]](s16), [[C18]](s16), [[C19]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C10]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C8]](s16), [[C10]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C8]](s16), [[C11]](s16), [[C11]](s16), [[C14]](s16), [[C16]](s16), [[C11]](s16), [[C8]](s16), [[C20]](s16)
- ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 4957
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 6
+ ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -8079
+ ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 4103
+ ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 12
+ ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 16385
+ ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s16) = G_CONSTANT i16 14
+ ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s16) = G_CONSTANT i16 -29991
+ ; CHECK-NEXT: [[C10:%[0-9]+]]:_(s16) = G_CONSTANT i16 2048
+ ; CHECK-NEXT: [[C11:%[0-9]+]]:_(s16) = G_CONSTANT i16 2115
+ ; CHECK-NEXT: [[C12:%[0-9]+]]:_(s16) = G_CONSTANT i16 4
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C1]](s16), [[C4]](s16), [[C5]](s16), [[C7]](s16), [[C9]](s16), [[C10]](s16), [[C11]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C2]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C3]](s16), [[C3]](s16), [[C6]](s16), [[C8]](s16), [[C3]](s16), [[C]](s16), [[C12]](s16)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[COPY]], [[BUILD_VECTOR]]
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<8 x s16>) = G_SUB [[COPY]], [[UMULH]]
- ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR2]]
+ ; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(<8 x s16>) = G_UMULH [[SUB]], [[BUILD_VECTOR1]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UMULH1]], [[UMULH]]
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR3]](<8 x s16>)
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16), [[C]](s16)
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<8 x s1>) = G_ICMP intpred(eq), [[BUILD_VECTOR]](<8 x s16>), [[BUILD_VECTOR4]]
- ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[ICMP]](<8 x s1>), [[COPY]], [[LSHR]]
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<8 x s16>) = G_LSHR [[ADD]], [[BUILD_VECTOR2]](<8 x s16>)
+ ; CHECK-NEXT: [[C13:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK-NEXT: [[C14:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s1>) = G_BUILD_VECTOR [[C13]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1), [[C14]](s1)
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<8 x s16>) = G_SELECT [[BUILD_VECTOR3]](<8 x s1>), [[COPY]], [[LSHR]]
; CHECK-NEXT: $q0 = COPY [[SELECT]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:_(<8 x s16>) = COPY $q0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
new file mode 100644
index 000000000000..32c742316b87
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-nneg-disjoint.ll
@@ -0,0 +1,135 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
+
+define i32 @call_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: %2:_(s32) = nneg G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext nneg i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_not_nneg(i16 %a) {
+ ; CHECK-LABEL: name: call_not_nneg
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = zext i16 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: %2:_(s32) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY %2(s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or disjoint i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_add(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_add
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = nsw G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = add nsw i32 %a, %b
+ ret i32 %result
+}
+
+define i32 @call_not_disjoint(i32 %a, i32 %b) {
+ ; CHECK-LABEL: name: call_not_disjoint
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $w0, $w1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $w0 = COPY [[OR]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = or i32 %a, %b
+ ret i32 %result
+}
+
+define <2 x i64> @call_not_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_not_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_disjoint_vector(<2 x i64> %a, <2 x i64> %b) {
+ ; CHECK-LABEL: name: call_disjoint_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0, $q1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+ ; CHECK-NEXT: %2:_(<2 x s64>) = disjoint G_OR [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: $q0 = COPY %2(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = or disjoint <2 x i64> %a, %b
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: %1:_(<2 x s64>) = nneg G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY %1(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext nneg <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_not_nneg_vector(<2 x i32> %a) {
+ ; CHECK-LABEL: name: call_not_nneg_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %result = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
new file mode 100644
index 000000000000..d87e9c4b1855
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-trunc.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -O0 -mtriple=aarch64-linux-gnu -global-isel -stop-after=irtranslator %s -o - | FileCheck %s
+
+define i32 @call_trunc_no_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_no_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nsw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nsw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_nuw_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_nuw_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nuw i64 %a to i32
+ ret i32 %result
+}
+
+define i32 @call_trunc_all_flags(i64 %a) {
+ ; CHECK-LABEL: name: call_trunc_all_flags
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = nuw nsw G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+entry:
+ %result = trunc nsw nuw i64 %a to i32
+ ret i32 %result
+}
+
+define <2 x i64> @call_trunc_noop_signed_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_signed_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nsw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[SEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nsw <2 x i64> %a to <2 x i32>
+ %result = sext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
+
+define <2 x i64> @call_trunc_noop_unsigned_vector(<2 x i64> %a) {
+ ; CHECK-LABEL: name: call_trunc_noop_unsigned_vector
+ ; CHECK: bb.1.entry:
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s32>) = nuw G_TRUNC [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<2 x s64>) = G_ZEXT [[TRUNC]](<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[ZEXT]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+entry:
+ %truncate = trunc nuw <2 x i64> %a to <2 x i32>
+ %result = zext <2 x i32> %truncate to <2 x i64>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
index fe9427d2678a..edae903fae84 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unreachable.ll
@@ -6,7 +6,7 @@ declare void @llvm.trap()
define void @unreachable() {
; CHECK-LABEL: name: unreachable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
unreachable
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
index 3123e304116f..0d429ae38402 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-abs.mir
@@ -8,11 +8,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $w0 = COPY [[XOR]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
+ ;
; CHECK-CSSC-LABEL: name: abs_s32
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s32) = G_ABS [[COPY]]
@@ -28,11 +29,12 @@ body: |
bb.0:
; CHECK-LABEL: name: abs_s64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[ASHR]]
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
- ; CHECK-NEXT: $x0 = COPY [[XOR]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY]]
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s32), [[COPY]], [[SUB]]
+ ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
+ ;
; CHECK-CSSC-LABEL: name: abs_s64
; CHECK-CSSC: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK-CSSC-NEXT: [[ABS:%[0-9]+]]:_(s64) = G_ABS [[COPY]]
@@ -55,6 +57,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s16
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -82,6 +85,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s16>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<8 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v8s16
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -109,6 +113,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<2 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v2s32
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -136,6 +141,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<4 x s32>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s32
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
@@ -163,6 +169,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<8 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $d0 = COPY [[ABS]](<8 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
+ ;
; CHECK-CSSC-LABEL: name: abs_v4s8
; CHECK-CSSC: liveins: $d0
; CHECK-CSSC-NEXT: {{ $}}
@@ -190,6 +197,7 @@ body: |
; CHECK-NEXT: [[ABS:%[0-9]+]]:_(<16 x s8>) = G_ABS [[COPY]]
; CHECK-NEXT: $q0 = COPY [[ABS]](<16 x s8>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
+ ;
; CHECK-CSSC-LABEL: name: abs_v16s8
; CHECK-CSSC: liveins: $q0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
index c9556e27c634..a63d8b9c1377 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
@@ -121,10 +121,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY $h1
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s16) = COPY $h0
%1:_(s16) = COPY $h1
@@ -141,8 +142,8 @@ body: |
; CHECK-LABEL: name: widen_v2s8
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
- ; CHECK-NEXT: %3:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
- ; CHECK-NEXT: $d0 = COPY %3(<2 x s32>)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
@@ -157,12 +158,14 @@ name: widen_v4s8
body: |
bb.0:
; CHECK-LABEL: name: widen_v4s8
- ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF]](s16)
- ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF1]](s8), [[DEF2]](s8), [[DEF3]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR
%0:_(s8) = G_IMPLICIT_DEF
%1:_(s8) = G_IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
index 6a6e0b63b103..26230efbbe86 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-128.mir
@@ -12,22 +12,6 @@ body: |
liveins: $x0, $x1, $x2, $x3, $x4
- ; CHECK-LABEL: name: compare_swap_128
- ; CHECK: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
- ; CHECK: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
- ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
- ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
- ; CHECK: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
- ; CHECK: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
- ; CHECK: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
- ; CHECK: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire 16)
- ; CHECK: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
- ; CHECK: RET_ReallyLR
; CHECK-NOLSE-LABEL: name: compare_swap_128
; CHECK-NOLSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-NOLSE-NEXT: {{ $}}
@@ -40,11 +24,13 @@ body: |
; CHECK-NOLSE-NEXT: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
; CHECK-NOLSE-NEXT: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
; CHECK-NOLSE-NEXT: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
- ; CHECK-NOLSE-NEXT: early-clobber %13:gpr64common(s64), early-clobber %14:gpr64common(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
- ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
- ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
- ; CHECK-NOLSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-NOLSE-NEXT: early-clobber %14:gpr64common(s64), early-clobber %15:gpr64common(s64), early-clobber %17:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
+ ; CHECK-NOLSE-NEXT: [[COPY9:%[0-9]+]]:gpr64 = COPY %17
+ ; CHECK-NOLSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %14(s64), %15(s64)
+ ; CHECK-NOLSE-NEXT: [[COPY10:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-NOLSE-NEXT: G_STORE [[COPY10]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-NOLSE-NEXT: RET_ReallyLR
+ ;
; CHECK-LSE-LABEL: name: compare_swap_128
; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
; CHECK-LSE-NEXT: {{ $}}
@@ -59,7 +45,8 @@ body: |
; CHECK-LSE-NEXT: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
; CHECK-LSE-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
; CHECK-LSE-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
- ; CHECK-LSE-NEXT: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
+ ; CHECK-LSE-NEXT: [[COPY5:%[0-9]+]]:_(s128) = COPY [[MV]](s128)
+ ; CHECK-LSE-NEXT: G_STORE [[COPY5]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-LSE-NEXT: RET_ReallyLR
%0:_(p0) = COPY $x0
%3:_(s64) = COPY $x1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
index 3c010789a2b7..05e6212af062 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
@@ -16,13 +16,16 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
- ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[ATOMIC_CMPXCHG]], [[ICMP]]
- ; CHECK: $w0 = COPY [[MUL]](s32)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[ICMP]]
+ ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
@@ -40,14 +43,17 @@ body: |
liveins: $x0
; CHECK-LABEL: name: cmpxchg_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ATOMIC_CMPXCHG]], [[ANYEXT]]
- ; CHECK: $x0 = COPY [[MUL]](s64)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[C]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[ANYEXT]]
+ ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
index d2352be81503..27f2f0bafa95 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ctpop-no-implicit-float.mir
@@ -37,6 +37,7 @@ body: |
; CHECK-NEXT: %ctpop:_(s32) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $w0 = COPY %ctpop(s32)
; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ;
; CHECK-CSSC-LABEL: name: s32
; CHECK-CSSC: liveins: $w0
; CHECK-CSSC-NEXT: {{ $}}
@@ -77,11 +78,12 @@ body: |
; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; CHECK-NEXT: %ctpop:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; CHECK-NEXT: $x0 = COPY %ctpop(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
+ ;
; CHECK-CSSC-LABEL: name: s64
; CHECK-CSSC: liveins: $x0
; CHECK-CSSC-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 5662de4cbdca..f7550ceb2799 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -48,7 +48,7 @@ define void @bar() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: $x0 = COPY [[LOAD]](p0)
; CHECK-NEXT: BL @_Unwind_Resume, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
%exn.slot = alloca ptr
%ehselector.slot = alloca i32
%1 = invoke i32 @foo(i32 42) to label %continue unwind label %cleanup
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
index e12353c7ef5b..d3db2432e84c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir
@@ -235,31 +235,32 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[DEF2]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[C2]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[C2]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<16 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 16, 16, 16, 1, 16, 16, 16, 2, 16, 16, 16, undef, undef, undef, undef)
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[SHUF]](<16 x s8>)
; CHECK-NEXT: [[UITOFP:%[0-9]+]]:_(<4 x s32>) = G_UITOFP [[BITCAST]](<4 x s32>)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
- ; CHECK-NEXT: G_STORE [[UV4]](s32), [[COPY]](p0) :: (store (s32), align 16)
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[UV6]](s32), [[COPY]](p0) :: (store (s32), align 16)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; CHECK-NEXT: G_STORE [[UV5]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
+ ; CHECK-NEXT: G_STORE [[UV7]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4)
; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
- ; CHECK-NEXT: G_STORE [[UV6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
+ ; CHECK-NEXT: G_STORE [[UV8]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8)
; CHECK-NEXT: G_BR %bb.1
bb.1:
liveins: $w1, $w2, $w3, $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
index 63a26dcfea47..e49a94c12ed4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
@@ -293,41 +293,44 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), %w0(s32), [[C]]
; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP2]], 1
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[DEF1]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY]](s16), [[COPY1]](s16), [[COPY2]](s16), [[DEF1]](s16)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SEXT_INREG]](s32)
- ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[BUILD_VECTOR]], [[TRUNC]](s16), [[C1]](s64)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s16)
- ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
- ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
- ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8), [[DEF2]](s8)
+ ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s16>) = G_INSERT_VECTOR_ELT [[UV]], [[TRUNC]](s16), [[C1]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[IVEC]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s16)
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[UV3]](s16)
+ ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[UV4]](s16)
+ ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[UV5]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<8 x s8>) = G_SHUFFLE_VECTOR [[BUILD_VECTOR1]](<8 x s8>), [[BUILD_VECTOR2]], shufflemask(0, 0, 0, 0, undef, undef, undef, undef)
- ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(<4 x s8>), [[UV5:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
- ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s16) = COPY [[C2]](s16)
- ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[C2]](s16)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[ANYEXT]], [[BUILD_VECTOR3]]
+ ; CHECK-NEXT: [[UV6:%[0-9]+]]:_(<4 x s8>), [[UV7:%[0-9]+]]:_(<4 x s8>) = G_UNMERGE_VALUES [[SHUF]](<8 x s8>)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; CHECK-NEXT: [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV8]](s8), [[UV9]](s8), [[UV10]](s8), [[UV11]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR3]](<8 x s8>)
+ ; CHECK-NEXT: [[UV12:%[0-9]+]]:_(<4 x s16>), [[UV13:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT1]](<8 x s16>)
+ ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR4]](<8 x s8>)
+ ; CHECK-NEXT: [[UV14:%[0-9]+]]:_(<4 x s16>), [[UV15:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT2]](<8 x s16>)
+ ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[UV12]], [[UV14]]
; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP]](<4 x s32>)
- ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[UV4]](<4 x s8>)
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[ANYEXT1]]
+ ; CHECK-NEXT: [[UV16:%[0-9]+]]:_(s8), [[UV17:%[0-9]+]]:_(s8), [[UV18:%[0-9]+]]:_(s8), [[UV19:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UV6]](<4 x s8>)
+ ; CHECK-NEXT: [[BUILD_VECTOR5:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[UV16]](s8), [[UV17]](s8), [[UV18]](s8), [[UV19]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8), [[DEF1]](s8)
+ ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR5]](<8 x s8>)
+ ; CHECK-NEXT: [[UV20:%[0-9]+]]:_(<4 x s16>), [[UV21:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT3]](<8 x s16>)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC5]], [[UV20]]
; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[ICMP1]](<4 x s32>)
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s16>) = G_AND [[TRUNC6]], [[XOR]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[AND]], [[AND1]]
- ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
+ ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[OR]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; CHECK-NEXT: [[BUILD_VECTOR4:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
- ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT2]], [[BUILD_VECTOR4]]
+ ; CHECK-NEXT: [[BUILD_VECTOR6:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+ ; CHECK-NEXT: %zext_select:_(<4 x s32>) = G_AND [[ANYEXT4]], [[BUILD_VECTOR6]]
; CHECK-NEXT: $q0 = COPY %zext_select(<4 x s32>)
; CHECK-NEXT: RET_ReallyLR implicit $q0
%w0:_(s32) = COPY $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
index 42a8f51002f2..f7efaeaa5070 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll
@@ -1,16 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel-abort=2 -global-isel -o - %s | FileCheck %s
+; RUN: llc -global-isel -o - %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx11.0.0"
declare i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32>) #0
-; This test currently falls back but ensures we don't crash.
-
define i32 @bar() {
; CHECK-LABEL: bar:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: movi.2d v0, #0000000000000000
+; CHECK-NEXT: mov b1, v0[1]
+; CHECK-NEXT: mov b2, v0[2]
+; CHECK-NEXT: mov b3, v0[3]
+; CHECK-NEXT: mov.h v0[1], v1[0]
+; CHECK-NEXT: mov.h v2[1], v3[0]
+; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: ushll.4s v1, v2, #0
+; CHECK-NEXT: mov.d v0[1], v1[0]
+; CHECK-NEXT: movi.4s v1, #1
+; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: addv.4s s0, v0
; CHECK-NEXT: fmov w0, s0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
index ed40a2ff7ea7..e729f027baa7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xtn.mir
@@ -541,17 +541,13 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[DEF]](s32), [[DEF]](s32)
- ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<4 x s32>)
- ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[TRUNC]](<4 x s16>), [[TRUNC1]](<4 x s16>)
- ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(<8 x s8>) = G_TRUNC [[CONCAT_VECTORS]](<8 x s16>)
- ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(<2 x s8>), [[UV3:%[0-9]+]]:_(<2 x s8>), [[UV4:%[0-9]+]]:_(<2 x s8>), [[UV5:%[0-9]+]]:_(<2 x s8>) = G_UNMERGE_VALUES [[TRUNC2]](<8 x s8>)
- ; CHECK-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x s8>) = G_CONCAT_VECTORS [[UV2]](<2 x s8>), [[UV2]](<2 x s8>)
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[CONCAT_VECTORS1]](<4 x s8>)
- ; CHECK-NEXT: $d0 = COPY [[ANYEXT]](<4 x s16>)
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[UV1]](s32)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s8>) = G_BUILD_VECTOR [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC]](s8), [[TRUNC1]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<8 x s16>) = G_ANYEXT [[BUILD_VECTOR]](<8 x s8>)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(<4 x s16>), [[UV3:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[ANYEXT]](<8 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[UV2]](<4 x s16>)
; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s8>) = G_TRUNC %0(<2 x s32>)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index c9e5f8924f8a..ac3c47c8001d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -752,6 +752,15 @@
# DEBUG-NEXT: G_BZERO (opcode {{[0-9]+}}): 2 type indices, 1 imm index
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: G_TRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_DEBUGTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: G_UBSANTRAP (opcode {{[0-9]+}}): 0 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_VECREDUCE_SEQ_FADD (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 0cf9602adbb0..499c08fa4966 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -40,11 +40,12 @@ body: |
; CHECK-LABEL: name: ldrxrox_breg_oreg
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $x0 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -65,11 +66,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrdrox_breg_oreg
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d0 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -78,6 +80,9 @@ body: |
RET_ReallyLR implicit $d0
...
---
+# This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
+# the G_LOAD
+
name: more_than_one_use
alignment: 4
legalized: true
@@ -87,18 +92,17 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1
- ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
- ; the G_LOAD
; CHECK-LABEL: name: more_than_one_use
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
- ; CHECK: $x0 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -121,11 +125,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_shl
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -148,11 +153,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_shl
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -175,11 +181,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_rhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -202,11 +209,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_rhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %0, %1(s64)
@@ -229,11 +237,12 @@ body: |
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: ldrxrox_mul_lhs
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $x2 = COPY [[LDRXroX]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -256,11 +265,12 @@ body: |
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: ldrdrox_mul_lhs
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -272,6 +282,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have a
+# power of 2. (The bit isn't set on the load.)
+
name: mul_not_pow_2
alignment: 4
legalized: true
@@ -280,19 +293,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have a
- ; power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_not_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 7
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -304,6 +316,9 @@ body: |
...
---
+# Show that we don't get a shifted load from a mul when we don't have
+# the right power of 2. (The bit isn't set on the load.)
+
name: mul_wrong_pow_2
alignment: 4
legalized: true
@@ -312,19 +327,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't get a shifted load from a mul when we don't have
- ; the right power of 2. (The bit isn't set on the load.)
liveins: $x0, $x1, $d2
; CHECK-LABEL: name: mul_wrong_pow_2
; CHECK: liveins: $x0, $x1, $d2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
- ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: $d2 = COPY [[LDRDroX]]
- ; CHECK: RET_ReallyLR implicit $d2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 16
%2:gpr(s64) = G_MUL %1, %0(s64)
@@ -336,6 +350,9 @@ body: |
...
---
+# Show that we can still fall back to the register-register addressing
+# mode when we fail to pull in the shift.
+
name: more_than_one_use_shl_1
alignment: 4
legalized: true
@@ -344,19 +361,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we can still fall back to the register-register addressing
- ; mode when we fail to pull in the shift.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_1
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -370,6 +386,9 @@ body: |
...
---
+# Show that when the GEP is used outside a memory op, we don't do any
+# folding at all.
+
name: more_than_one_use_shl_2
alignment: 4
legalized: true
@@ -378,22 +397,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when the GEP is used outside a memory op, we don't do any
- ; folding at all.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_2
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
- ; CHECK: $x2 = COPY [[ADDXrr2]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -409,6 +427,9 @@ body: |
...
---
+# Show that when we have a fastpath for shift-left, we perform the folding
+# if it has more than one use.
+
name: more_than_one_use_shl_lsl_fast
alignment: 4
legalized: true
@@ -417,18 +438,17 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we have a fastpath for shift-left, we perform the folding
- ; if it has more than one use.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -442,6 +462,9 @@ body: |
...
---
+# Show that we don't fold into multiple memory ops when we don't have a
+# fastpath for shift-left.
+
name: more_than_one_use_shl_lsl_slow
alignment: 4
legalized: true
@@ -450,19 +473,18 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that we don't fold into multiple memory ops when we don't have a
- ; fastpath for shift-left.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
- ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
- ; CHECK: $x2 = COPY [[ADDXrr]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -476,6 +498,9 @@ body: |
...
---
+# Show that when we're optimizing for size, we'll do the folding no matter
+# what.
+
name: more_than_one_use_shl_minsize
alignment: 4
legalized: true
@@ -484,22 +509,21 @@ tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
- ; Show that when we're optimizing for size, we'll do the folding no matter
- ; what.
liveins: $x0, $x1, $x2
; CHECK-LABEL: name: more_than_one_use_shl_minsize
; CHECK: liveins: $x0, $x1, $x2
- ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
- ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
- ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
- ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
- ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
- ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
- ; CHECK: $x2 = COPY [[ADDXrr1]]
- ; CHECK: RET_ReallyLR implicit $x2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
+ ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 3
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -525,11 +549,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrwrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRWroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -549,11 +574,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrsrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
- ; CHECK: $s2 = COPY [[LDRSroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -573,11 +599,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldrhrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
- ; CHECK: $h2 = COPY [[LDRHroX]]
- ; CHECK: RET_ReallyLR implicit $h2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
+ ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -597,11 +624,12 @@ body: |
liveins: $x0, $x1
; CHECK-LABEL: name: ldbbrox
; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
- ; CHECK: $w2 = COPY [[LDRBBroX]]
- ; CHECK: RET_ReallyLR implicit $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
+ ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
@@ -621,11 +649,12 @@ body: |
liveins: $d0, $x1
; CHECK-LABEL: name: ldrqrox
; CHECK: liveins: $d0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
- ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
- ; CHECK: $q0 = COPY [[LDRQroX]]
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
+ ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
index ad66fa5623e3..25ecce4dd92b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-LABEL: name: foo
; CHECK: BRK 1
; CHECK: RET_ReallyLR
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
RET_ReallyLR
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
index bcdd77ac5757..b3613f52c4ec 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
# RUN: llc -O0 -mtriple=aarch64-apple-ios -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=IOS
# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LINUX-PIC
@@ -26,40 +27,35 @@
...
---
-# CHECK-LABEL: name: frame_index
name: frame_index
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64sp, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
-
stack:
- { id: 0, name: ptr0, offset: 0, size: 8, alignment: 8 }
-
-# CHECK: body:
-# CHECK: %0:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
body: |
bb.0:
- %0(p0) = G_FRAME_INDEX %stack.0.ptr0
+ ; CHECK-LABEL: name: frame_index
+ ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0.ptr0, 0, 0
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ %0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr0
$x0 = COPY %0(p0)
...
---
---
-# CHECK-LABEL: name: ptr_mask
name: ptr_mask
legalized: true
regBankSelected: true
-
-# CHECK: body:
-# CHECK: %2:gpr64sp = ANDXri %0, 8060
body: |
bb.0:
liveins: $x0
+ ; CHECK-LABEL: name: ptr_mask
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[COPY]], 8060
+ ; CHECK-NEXT: $x0 = COPY [[ANDXri]]
%0:gpr(p0) = COPY $x0
%const:gpr(s64) = G_CONSTANT i64 -8
%1:gpr(p0) = G_PTRMASK %0, %const
@@ -68,180 +64,171 @@ body: |
---
# Global defined in the same linkage unit so no GOT is needed
-# CHECK-LABEL: name: global_local
name: global_local
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_local
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_local
+ ; IOS-LABEL: name: global_local
+ ; IOS: [[MOVaddr:%[0-9]+]]:gpr64common = MOVaddr target-flags(aarch64-page) @var_local, target-flags(aarch64-pageoff, aarch64-nc) @var_local
+ ; IOS-NEXT: $x0 = COPY [[MOVaddr]]
+ ;
+ ; LINUX-PIC-LABEL: name: global_local
+ ; LINUX-PIC: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_local
+ ; LINUX-PIC-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_local
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: global_got
name: global_got
legalized: true
regBankSelected: true
-registers:
- - { id: 0, class: gpr }
-
-# CHECK: body:
-# IOS: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
-# LINUX-PIC: %0:gpr64common = LOADgot target-flags(aarch64-got) @var_got
body: |
bb.0:
- %0(p0) = G_GLOBAL_VALUE @var_got
+ ; CHECK-LABEL: name: global_got
+ ; CHECK: [[LOADgot:%[0-9]+]]:gpr64common = LOADgot target-flags(aarch64-got) @var_got
+ ; CHECK-NEXT: $x0 = COPY [[LOADgot]]
+ %0:gpr(p0) = G_GLOBAL_VALUE @var_got
$x0 = COPY %0(p0)
...
---
-# CHECK-LABEL: name: icmp
name: icmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
- - { id: 10, class: gpr }
- - { id: 11, class: gpr }
-
-# CHECK: body:
-# CHECK: SUBSWrr %0, %0, implicit-def $nzcv
-# CHECK: %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-
-# CHECK: SUBSXrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-
-# CHECK: SUBSXrr %4, %4, implicit-def $nzcv
-# CHECK: %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $w0
- %1(s32) = G_ICMP intpred(eq), %0, %0
- %6(s8) = G_TRUNC %1(s32)
- %9(s32) = G_ANYEXT %6
+ ; CHECK-LABEL: name: icmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[CSINCWr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY2]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr1]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY4]], [[COPY4]], implicit-def $nzcv
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_ICMP intpred(eq), %0, %0
+ %6:gpr(s8) = G_TRUNC %1(s32)
+ %9:gpr(s32) = G_ANYEXT %6
$w0 = COPY %9(s32)
- %2(s64) = COPY $x0
- %3(s32) = G_ICMP intpred(uge), %2, %2
- %7(s8) = G_TRUNC %3(s32)
- %10(s32) = G_ANYEXT %7
+ %2:gpr(s64) = COPY $x0
+ %3:gpr(s32) = G_ICMP intpred(uge), %2, %2
+ %7:gpr(s8) = G_TRUNC %3(s32)
+ %10:gpr(s32) = G_ANYEXT %7
$w0 = COPY %10(s32)
- %4(p0) = COPY $x0
- %5(s32) = G_ICMP intpred(ne), %4, %4
- %8(s8) = G_TRUNC %5(s32)
- %11(s32) = G_ANYEXT %8
+ %4:gpr(p0) = COPY $x0
+ %5:gpr(s32) = G_ICMP intpred(ne), %4, %4
+ %8:gpr(s8) = G_TRUNC %5(s32)
+ %11:gpr(s32) = G_ANYEXT %8
$w0 = COPY %11(s32)
...
---
-# CHECK-LABEL: name: fcmp
name: fcmp
legalized: true
regBankSelected: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
-
-# CHECK: body:
-# CHECK: nofpexcept FCMPSrr %0, %0, implicit-def $nzcv
-# CHECK: [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
-# CHECK: [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-# CHECK: %1:gpr32 = ORRWrr [[TST_MI]], [[TST_GT]]
-
-# CHECK: nofpexcept FCMPDrr %2, %2, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
-
body: |
bb.0:
liveins: $w0, $x0
- %0(s32) = COPY $s0
- %1(s32) = G_FCMP floatpred(one), %0, %0
- %4(s8) = G_TRUNC %1(s32)
- %6(s32) = G_ANYEXT %4
- $w0 = COPY %6(s32)
+ ; CHECK-LABEL: name: fcmp
+ ; CHECK: liveins: $w0, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY]], [[COPY]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
+ ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+ ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[ORRWrr]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY2]], [[COPY2]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr2:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32all = COPY [[CSINCWr2]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY4]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr3:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 15, implicit $nzcv
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr32all = COPY [[CSINCWr3]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY5]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY6]], [[COPY6]], implicit-def $nzcv, implicit $fpcr
+ ; CHECK-NEXT: [[CSINCWr4:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 14, implicit $nzcv
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gpr32all = COPY [[CSINCWr4]]
+ ; CHECK-NEXT: $w0 = COPY [[COPY7]]
+ %0:fpr(s32) = COPY $s0
+ %1:gpr(s32) = G_FCMP floatpred(one), %0, %0
+ %2:gpr(s8) = G_TRUNC %1(s32)
+ %3:gpr(s32) = G_ANYEXT %2
+ $w0 = COPY %3(s32)
- %2(s64) = COPY $d0
- %3(s32) = G_FCMP floatpred(uge), %2, %2
- %5(s8) = G_TRUNC %3(s32)
- %7(s32) = G_ANYEXT %5
+ %4:fpr(s64) = COPY $d0
+ %5:gpr(s32) = G_FCMP floatpred(uge), %4, %4
+ %6:gpr(s8) = G_TRUNC %5(s32)
+ %7:gpr(s32) = G_ANYEXT %6
$w0 = COPY %7(s32)
+ %8:fpr(s32) = COPY $s0
+ %9:gpr(s32) = G_FCMP floatpred(true), %8, %8
+ %10:gpr(s8) = G_TRUNC %9(s32)
+ %11:gpr(s32) = G_ANYEXT %10
+ $w0 = COPY %11(s32)
+
+ %12:fpr(s64) = COPY $d0
+ %13:gpr(s32) = G_FCMP floatpred(false), %12, %12
+ %14:gpr(s8) = G_TRUNC %13(s32)
+ %15:gpr(s32) = G_ANYEXT %14
+ $w0 = COPY %15(s32)
+
...
---
-# CHECK-LABEL: name: phi
name: phi
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: fpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: fpr }
- - { id: 1, class: gpr }
- - { id: 2, class: fpr }
-
-# CHECK: body:
-# CHECK: bb.1:
-# CHECK: %2:fpr32 = PHI %0, %bb.0, %2, %bb.1
-
body: |
+ ; CHECK-LABEL: name: phi
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $s0, $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:fpr32 = PHI [[COPY]], %bb.0, [[PHI]], %bb.1
+ ; CHECK-NEXT: TBNZW [[COPY1]], 0, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $s0 = COPY [[PHI]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $s0
bb.0:
liveins: $s0, $w0
successors: %bb.1
- %0(s32) = COPY $s0
+ %0:fpr(s32) = COPY $s0
%3:gpr(s32) = COPY $w0
bb.1:
successors: %bb.1, %bb.2
- %2(s32) = PHI %0, %bb.0, %2, %bb.1
+ %2:fpr(s32) = PHI %0, %bb.0, %2, %bb.1
G_BRCOND %3, %bb.1
bb.2:
@@ -250,60 +237,46 @@ body: |
...
---
-# CHECK-LABEL: name: select
name: select
legalized: true
regBankSelected: true
tracksRegLiveness: true
-
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 2, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 3, class: gpr32, preferred-register: '' }
-# CHECK-NEXT: - { id: 4, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 5, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 6, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 7, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 8, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 9, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 10, class: gpr32, preferred-register: '' }
-registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
- - { id: 3, class: gpr }
- - { id: 4, class: gpr }
- - { id: 5, class: gpr }
- - { id: 6, class: gpr }
- - { id: 7, class: gpr }
- - { id: 8, class: gpr }
- - { id: 9, class: gpr }
-
-# CHECK: body:
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv
-# CHECK: ANDSWri %10, 0, implicit-def $nzcv
-# CHECK: %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv
body: |
bb.0:
liveins: $w0, $w1, $w2
+ ; CHECK-LABEL: name: select
+ ; CHECK: liveins: $w0, $w1, $w2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[COPY1]], [[COPY2]], 1, implicit $nzcv
+ ; CHECK-NEXT: $w0 = COPY [[CSELWr]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri1:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[COPY3]], [[COPY4]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[ANDSWri2:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
+ ; CHECK-NEXT: [[CSELXr1:%[0-9]+]]:gpr64 = CSELXr [[COPY5]], [[COPY6]], 1, implicit $nzcv
+ ; CHECK-NEXT: $x0 = COPY [[CSELXr1]]
%10:gpr(s32) = COPY $w0
- %1(s32) = COPY $w1
- %2(s32) = COPY $w2
- %3(s32) = G_SELECT %10, %1, %2
+ %1:gpr(s32) = COPY $w1
+ %2:gpr(s32) = COPY $w2
+ %3:gpr(s32) = G_SELECT %10, %1, %2
$w0 = COPY %3(s32)
- %4(s64) = COPY $x0
- %5(s64) = COPY $x1
- %6(s64) = G_SELECT %10, %4, %5
+ %4:gpr(s64) = COPY $x0
+ %5:gpr(s64) = COPY $x1
+ %6:gpr(s64) = G_SELECT %10, %4, %5
$x0 = COPY %6(s64)
- %7(p0) = COPY $x0
- %8(p0) = COPY $x1
- %9(p0) = G_SELECT %10, %7, %8
+ %7:gpr(p0) = COPY $x0
+ %8:gpr(p0) = COPY $x1
+ %9:gpr(p0) = G_SELECT %10, %7, %8
$x0 = COPY %9(p0)
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
index f4366fb7888e..b242c68e3b07 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/uaddo-8-16-bits.mir
@@ -26,7 +26,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -48,7 +48,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s8)
@@ -80,7 +80,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -102,7 +102,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -134,7 +134,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -165,7 +165,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -206,7 +206,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -228,7 +228,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ANYEXT %6(s16)
@@ -261,7 +261,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -284,7 +284,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -317,7 +317,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
@@ -340,7 +340,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%8:_(s32) = G_ZEXT %6(s16)
@@ -377,7 +377,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: liveins: $x2
@@ -410,7 +410,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
liveins: $x2
@@ -512,7 +512,7 @@ body: |
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s16)
@@ -544,7 +544,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %6(s16)
@@ -577,7 +577,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -601,7 +601,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -634,7 +634,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -658,7 +658,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%9:_(s32) = G_ZEXT %7(s8)
@@ -692,7 +692,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO]](s8)
@@ -717,7 +717,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%10:_(s32) = G_ZEXT %8(s8)
@@ -783,7 +783,7 @@ body: |
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors:
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDO]](s16)
@@ -804,7 +804,7 @@ body: |
bb.2:
successors:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.3:
%6:_(s32) = G_ANYEXT %4(s16)
@@ -839,7 +839,7 @@ body: |
; CHECK-NEXT: RET_ReallyLR implicit $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; CHECK-NEXT: G_TRAP
bb.1:
successors: %bb.2(0x7ffff800), %bb.3(0x00000800)
liveins: $w0, $w1
@@ -860,6 +860,6 @@ body: |
RET_ReallyLR implicit $w0
bb.3:
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
index 58299696e78f..d4d803a91cfa 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sme2-asm.ll
@@ -3,7 +3,7 @@
define void @UphPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_p8to15 = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_p8to15 */, %1
; CHECK: RET_ReallyLR
@@ -17,7 +17,7 @@ entry:
define void @UpaPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr = COPY %0
; CHECK: INLINEASM &"ld1w {z0.s,z1.s,z2.s,z3.s}, $0/z, [x10]", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR */, %1
; CHECK: RET_ReallyLR
@@ -31,7 +31,7 @@ entry:
define void @UplPNR(target("aarch64.svcount") %predcnt) {
entry:
; CHECK: %0:ppr = COPY $p0
-; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store unknown-size into %ir.predcnt.addr, align 2)
+; CHECK: STR_PXI %0, %stack.0.predcnt.addr, 0 :: (store (<vscale x 1 x s16>) into %ir.predcnt.addr)
; CHECK: %1:pnr_3b = COPY %0
; CHECK: INLINEASM &"fadd z0.h, $0/m, z0.h, #0.5", 1 /* sideeffect attdialect */, {{[0-9]+}} /* reguse:PNR_3b */, %1
; CHECK: RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index dbc5417e2313..61a4f64ac2bf 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -3,8 +3,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve < %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SVE
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-; CHECK-GI: warning: Instruction selection used fallback path for smull_zext_v4i16_v4i32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
+; CHECK-GI: warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
@@ -189,13 +188,49 @@ define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(ptr %A, ptr %B) nounwind {
}
define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
-; CHECK-LABEL: smull_zext_v4i16_v4i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ldr s0, [x0]
-; CHECK-NEXT: ldr d1, [x1]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-NEON-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-NEON: // %bb.0:
+; CHECK-NEON-NEXT: ldr s0, [x0]
+; CHECK-NEON-NEXT: ldr d1, [x1]
+; CHECK-NEON-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-NEON-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-NEON-NEXT: ret
+;
+; CHECK-SVE-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-SVE: // %bb.0:
+; CHECK-SVE-NEXT: ldr s0, [x0]
+; CHECK-SVE-NEXT: ldr d1, [x1]
+; CHECK-SVE-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SVE-NEXT: smull v0.4s, v0.4h, v1.4h
+; CHECK-SVE-NEXT: ret
+;
+; CHECK-GI-LABEL: smull_zext_v4i16_v4i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr w8, [x0]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov w9, s1
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: fmov s2, w10
+; CHECK-GI-NEXT: fmov s3, w11
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ldr d2, [x1]
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: sshll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: ret
%load.A = load <4 x i8>, ptr %A
%load.B = load <4 x i16>, ptr %B
%zext.A = zext <4 x i8> %load.A to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index cf9ed4d5f0e1..573f921e638c 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -20,7 +20,7 @@ entry:
define i8 @test2(i32 %a) {
; CHECK-LABEL: test2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
; CHECK-NEXT: cset w0, eq
@@ -37,7 +37,7 @@ entry:
define i8 @test3(i32 %a) {
; CHECK-LABEL: test3:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: cmp w8, #1024
@@ -84,7 +84,7 @@ entry:
define i8 @test6(i64 %a) {
; CHECK-LABEL: test6:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #135
+; CHECK-NEXT: mov w8, #135 // =0x87
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
@@ -101,7 +101,7 @@ entry:
define i8 @test7(i64 %a) {
; CHECK-LABEL: test7:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #1024
+; CHECK-NEXT: mov w8, #1024 // =0x400
; CHECK-NEXT: movk w8, #33, lsl #16
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
@@ -175,7 +175,7 @@ define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
; CHECK-NEXT: cmp w2, #1
; CHECK-NEXT: b.lt .LBB8_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: mov w8, w2
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: .LBB8_2: // %for.body
@@ -226,7 +226,7 @@ define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %
; CHECK-LABEL: test10:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr w8, [x1]
-; CHECK-NEXT: mov w9, #1024
+; CHECK-NEXT: mov w9, #1024 // =0x400
; CHECK-NEXT: movk w9, #32, lsl #16
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: str w8, [x0]
@@ -253,7 +253,7 @@ entry:
define i8 @test11(i64 %a) {
; CHECK-LABEL: test11:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #-1610612736
+; CHECK-NEXT: mov w8, #-1610612736 // =0xa0000000
; CHECK-NEXT: and x8, x0, x8
; CHECK-NEXT: cmp x8, #1024
; CHECK-NEXT: cset w0, eq
diff --git a/llvm/test/CodeGen/AArch64/abs.ll b/llvm/test/CodeGen/AArch64/abs.ll
index e00f70b94e3b..78c1ff7b9937 100644
--- a/llvm/test/CodeGen/AArch64/abs.ll
+++ b/llvm/test/CodeGen/AArch64/abs.ll
@@ -15,9 +15,8 @@ define i8 @abs_i8(i8 %a){
; CHECK-GI-LABEL: abs_i8:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #7
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i8 @llvm.abs.i8(i8 %a, i1 0)
@@ -36,9 +35,8 @@ define i16 @abs_i16(i16 %a){
; CHECK-GI-LABEL: abs_i16:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: sxth w8, w0
-; CHECK-GI-NEXT: asr w8, w8, #15
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i16 @llvm.abs.i16(i16 %a, i1 0)
@@ -55,9 +53,8 @@ define i32 @abs_i32(i32 %a){
;
; CHECK-GI-LABEL: abs_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr w8, w0, #31
-; CHECK-GI-NEXT: add w9, w0, w8
-; CHECK-GI-NEXT: eor w0, w9, w8
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cneg w0, w0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i32 @llvm.abs.i32(i32 %a, i1 0)
@@ -74,9 +71,8 @@ define i64 @abs_i64(i64 %a){
;
; CHECK-GI-LABEL: abs_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: asr x8, x0, #63
-; CHECK-GI-NEXT: add x9, x0, x8
-; CHECK-GI-NEXT: eor x0, x9, x8
+; CHECK-GI-NEXT: cmp x0, #0
+; CHECK-GI-NEXT: cneg x0, x0, le
; CHECK-GI-NEXT: ret
entry:
%res = call i64 @llvm.abs.i64(i64 %a, i1 0)
@@ -248,9 +244,9 @@ define <1 x i32> @abs_v1i32(<1 x i32> %a){
; CHECK-GI-LABEL: abs_v1i32:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: asr w9, w8, #31
-; CHECK-GI-NEXT: add w8, w8, w9
-; CHECK-GI-NEXT: eor w8, w8, w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cneg w8, w9, le
; CHECK-GI-NEXT: fmov s0, w8
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 9a4e01a29ecb..7244ac949ab8 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -14,12 +14,12 @@ define void @array_1D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
@@ -81,18 +81,18 @@ define void @array_2D(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #5, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #5, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #4, mul vl]
-; CHECK-NEXT: st1d { z3.d }, p0, [sp, #3, mul vl]
-; CHECK-NEXT: st1d { z5.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z4.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #5, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #5, mul vl]
+; CHECK-NEXT: st1d { z3.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: st1d { z5.d }, p0, [sp, #3, mul vl]
+; CHECK-NEXT: st1d { z4.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #6
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index 7292d52aaf47..f03a6f018d34 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -13,12 +13,12 @@ define void @test(ptr %addr) #0 {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
-; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0]
-; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
-; CHECK-NEXT: st1d { z1.d }, p0, [sp, #1, mul vl]
-; CHECK-NEXT: st1d { z2.d }, p0, [sp]
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: st1d { z0.d }, p0, [sp]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp, #2, mul vl]
+; CHECK-NEXT: st1d { z2.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/allow-check.ll b/llvm/test/CodeGen/AArch64/allow-check.ll
index c315b216e222..9e4a47357906 100644
--- a/llvm/test/CodeGen/AArch64/allow-check.ll
+++ b/llvm/test/CodeGen/AArch64/allow-check.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s
-; RUN: llc < %s -mtriple=aarch64 -fast-isel | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -fast-isel=1 | FileCheck %s
target triple = "aarch64-linux"
diff --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll
index 4d085869de24..f298a55dab72 100644
--- a/llvm/test/CodeGen/AArch64/and-sink.ll
+++ b/llvm/test/CodeGen/AArch64/and-sink.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -cgpp-huge-func=0 -mtriple=aarch64-linux %s | FileCheck --check-prefix=CHECK-CGP %s
@@ -9,9 +10,18 @@
; Test that and is sunk into cmp block to form tbz.
define dso_local i32 @and_sink1(i32 %a, i1 %c) {
; CHECK-LABEL: and_sink1:
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: tbz w1, #0, .LBB0_3
+; CHECK-NEXT: // %bb.1: // %bb0
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: tbnz w0, #2, .LBB0_3
+; CHECK-NEXT: // %bb.2:
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_3: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink1(
; CHECK-CGP-NOT: and i32
@@ -35,12 +45,30 @@ bb2:
; Test that both 'and' and cmp get sunk to form tbz.
define dso_local i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
; CHECK-LABEL: and_sink2:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: tbz w1, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:B]
-; CHECK: tbz w2, #0
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:C]
-; CHECK: tbnz {{w[0-9]+}}, #2
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: adrp x9, A
+; CHECK-NEXT: str wzr, [x9, :lo12:A]
+; CHECK-NEXT: tbz w1, #0, .LBB1_5
+; CHECK-NEXT: // %bb.1: // %bb0.preheader
+; CHECK-NEXT: adrp x8, B
+; CHECK-NEXT: adrp x9, C
+; CHECK-NEXT: .LBB1_2: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:B]
+; CHECK-NEXT: tbz w2, #0, .LBB1_6
+; CHECK-NEXT: // %bb.3: // %bb1
+; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1
+; CHECK-NEXT: str wzr, [x9, :lo12:C]
+; CHECK-NEXT: tbnz w0, #2, .LBB1_2
+; CHECK-NEXT: // %bb.4:
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: .LBB1_5: // %common.ret
+; CHECK-NEXT: mov w0, w8
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB1_6:
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink2(
; CHECK-CGP-NOT: and i32
@@ -71,10 +99,16 @@ bb3:
; Test that 'and' is not sunk since cbz is a better alternative.
define dso_local i32 @and_sink3(i32 %a) {
; CHECK-LABEL: and_sink3:
-; CHECK: and [[REG:w[0-9]+]], w0, #0x3
-; CHECK: [[LOOP:.L[A-Z0-9_]+]]:
-; CHECK: str wzr, [x{{[0-9]+}}, :lo12:A]
-; CHECK: cbz [[REG]], [[LOOP]]
+; CHECK: // %bb.0:
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: and w9, w0, #0x3
+; CHECK-NEXT: .LBB2_1: // %bb0
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: str wzr, [x8, :lo12:A]
+; CHECK-NEXT: cbz w9, .LBB2_1
+; CHECK-NEXT: // %bb.2: // %bb2
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
; CHECK-CGP-LABEL: @and_sink3(
; CHECK-CGP-NEXT: and i32
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
index 225d4c602f18..cb6586718450 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -debug-entry-values -mtriple=arm64-apple-darwin | FileCheck %s
-; Stackmap Header: no constants - 6 callsites
+; Stackmap Header: no constants - 18 callsites
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -8,11 +8,11 @@
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 0
; Num Functions
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Num LargeConstants
; CHECK-NEXT: .long 0
; Num Callsites
-; CHECK-NEXT: .long 8
+; CHECK-NEXT: .long 18
; Functions and stack size
; CHECK-NEXT: .quad _test
@@ -39,6 +39,36 @@
; CHECK-NEXT: .quad _patchpoint_spillargs
; CHECK-NEXT: .quad 128
; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_i64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_p0
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f16
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v16i8
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4i32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v4f32
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad _generic_test_v2f64
+; CHECK-NEXT: .quad 16
+; CHECK-NEXT: .quad 1
; test
@@ -457,5 +487,194 @@ entry:
ret i64 %result
}
+; generic_test_i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i32 @generic_test_i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 14, i32 20, ptr null, i32 0)
+ ret i32 %ret
+}
+
+; generic_test_i64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_i64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define i64 @generic_test_i64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 14, i32 20, ptr null, i32 0)
+ ret i64 %ret
+}
+
+; generic_test_p0
+; CHECK-LABEL: .long L{{.*}}-_generic_test_p0
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define ptr @generic_test_p0() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 14, i32 20, ptr null, i32 0)
+ ret ptr %ret
+}
+
+; generic_test_f16
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f16
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 2
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define half @generic_test_f16() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 14, i32 20, ptr null, i32 0)
+ ret half %ret
+}
+
+; generic_test_f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 4
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define float @generic_test_f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 14, i32 20, ptr null, i32 0)
+ ret float %ret
+}
+
+; generic_test_f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 8
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define double @generic_test_f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 14, i32 20, ptr null, i32 0)
+ ret double %ret
+}
+
+; generic_test_v16i8
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v16i8
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <16 x i8> @generic_test_v16i8() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 14, i32 20, ptr null, i32 0)
+ ret <16 x i8> %ret
+}
+
+; generic_test_v4i32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4i32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x i32> @generic_test_v4i32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x i32> %ret
+}
+
+; generic_test_v4f32
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v4f32
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <4 x float> @generic_test_v4f32() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 14, i32 20, ptr null, i32 0)
+ ret <4 x float> %ret
+}
+
+; generic_test_v2f64
+; CHECK-LABEL: .long L{{.*}}-_generic_test_v2f64
+; CHECK-NEXT: .short 0
+; 1 location
+; CHECK-NEXT: .short 1
+; Loc 0: Register <-- this is the return register
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 16
+; CHECK-NEXT: .short {{[0-9]+}}
+; CHECK-NEXT: .short 0
+; CHECK-NEXT: .long 0
+define <2 x double> @generic_test_v2f64() nounwind ssp uwtable {
+entry:
+ %ret = call anyregcc <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 14, i32 20, ptr null, i32 0)
+ ret <2 x double> %ret
+}
+
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
index a1e069368527..bc399c8d4ff0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-SDAG
-; RUN: llc < %s -global-isel -global-isel-abort=2 -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
+; RUN: llc < %s -global-isel -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast -aarch64-enable-sink-fold=true | FileCheck %s --check-prefix=CHECK-GISEL
define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-SDAG-LABEL: test_varidx_extract_v8s8:
@@ -29,20 +29,20 @@ define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) {
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
; CHECK-GISEL-NEXT: mov w9, w0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov b1, v0.b[1]
; CHECK-GISEL-NEXT: add x8, sp, #8
-; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: and x9, x9, #0x7
-; CHECK-GISEL-NEXT: mov b2, v0.b[1]
+; CHECK-GISEL-NEXT: str d0, [sp, #8]
; CHECK-GISEL-NEXT: mov b3, v0.b[2]
; CHECK-GISEL-NEXT: lsl x10, x9, #1
; CHECK-GISEL-NEXT: mov b0, v0.b[3]
; CHECK-GISEL-NEXT: sub x9, x10, x9
-; CHECK-GISEL-NEXT: ldrb w8, [x8, x9]
-; CHECK-GISEL-NEXT: fmov s1, w8
-; CHECK-GISEL-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[2], v3.h[0]
-; CHECK-GISEL-NEXT: mov v1.h[3], v0.h[0]
-; CHECK-GISEL-NEXT: fmov d0, d1
+; CHECK-GISEL-NEXT: ldr b2, [x8, x9]
+; CHECK-GISEL-NEXT: mov v2.b[1], v1.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[2], v3.b[0]
+; CHECK-GISEL-NEXT: mov v2.b[3], v0.b[0]
+; CHECK-GISEL-NEXT: ushll v0.8h, v2.8b, #0
+; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
%tmp = extractelement <8 x i8> %x, i32 %idx
@@ -176,17 +176,15 @@ define <2 x i16> @test_varidx_extract_v4s16(<4 x i16> %x, i32 %idx) {
; CHECK-GISEL: // %bb.0:
; CHECK-GISEL-NEXT: sub sp, sp, #16
; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GISEL-NEXT: mov w9, w0
-; CHECK-GISEL-NEXT: mov w8, #2 // =0x2
-; CHECK-GISEL-NEXT: add x10, sp, #8
-; CHECK-GISEL-NEXT: and x9, x9, #0x3
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GISEL-NEXT: mov w9, w0
+; CHECK-GISEL-NEXT: mov h1, v0.h[1]
+; CHECK-GISEL-NEXT: add x8, sp, #8
; CHECK-GISEL-NEXT: str d0, [sp, #8]
-; CHECK-GISEL-NEXT: madd x8, x9, x8, x10
-; CHECK-GISEL-NEXT: umov w9, v0.h[1]
-; CHECK-GISEL-NEXT: fmov s1, w9
-; CHECK-GISEL-NEXT: ldr h0, [x8]
-; CHECK-GISEL-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GISEL-NEXT: and x9, x9, #0x3
+; CHECK-GISEL-NEXT: ldr h0, [x8, x9, lsl #1]
+; CHECK-GISEL-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GISEL-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GISEL-NEXT: add sp, sp, #16
; CHECK-GISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
index c58f4b102909..f948d78723e9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-patchpoint.ll
@@ -79,6 +79,145 @@ entry:
ret void
}
+; Test register allocation for an i32 result value of patchpoint.
+define i32 @generic_patchpoint_i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in w0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i32 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i32(i64 5, i32 4, ptr null, i32 0)
+ ret i32 %result
+}
+
+; Test register allocation for an i64 result value of patchpoint.
+define i64 @generic_patchpoint_i64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_i64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 4, ptr null, i32 0)
+ ret i64 %result
+}
+
+; Test register allocation for a ptr result value of patchpoint.
+define ptr @generic_patchpoint_p0() {
+entry:
+; CHECK-LABEL: generic_patchpoint_p0:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in x0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call ptr (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.p0(i64 5, i32 4, ptr null, i32 0)
+ ret ptr %result
+}
+
+; Test register allocation for a half result value of patchpoint.
+define half @generic_patchpoint_f16() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f16:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in h0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call half (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f16(i64 5, i32 4, ptr null, i32 0)
+ ret half %result
+}
+
+; Test register allocation for a float result value of patchpoint.
+define float @generic_patchpoint_f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in s0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call float (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f32(i64 5, i32 4, ptr null, i32 0)
+ ret float %result
+}
+
+; Test register allocation for a double result value of patchpoint.
+define double @generic_patchpoint_f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in d0.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call double (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.f64(i64 5, i32 4, ptr null, i32 0)
+ ret double %result
+}
+
+; Test register allocation for a <16 x i8> result value of patchpoint.
+define <16 x i8> @generic_patchpoint_v16i8() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v16i8:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.16b.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <16 x i8> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v16i8(i64 5, i32 4, ptr null, i32 0)
+ ret <16 x i8> %result
+}
+
+; Test register allocation for a <4 x i32> result value of patchpoint.
+define <4 x i32> @generic_patchpoint_v4i32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4i32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x i32> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4i32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x i32> %result
+}
+
+; Test register allocation for a <4 x float> result value of patchpoint.
+define <4 x float> @generic_patchpoint_v4f32() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v4f32:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.4s.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <4 x float> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v4f32(i64 5, i32 4, ptr null, i32 0)
+ ret <4 x float> %result
+}
+
+; Test register allocation for a <2 x double> result value of patchpoint.
+define <2 x double> @generic_patchpoint_v2f64() {
+entry:
+; CHECK-LABEL: generic_patchpoint_v2f64:
+; CHECK: Ltmp
+; CHECK-NEXT: nop
+; The return value is already in v0.2d.
+; CHECK-NEXT: ldp
+; CHECK-NEXT: ret
+ %result = tail call <2 x double> (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.v2f64(i64 5, i32 4, ptr null, i32 0)
+ ret <2 x double> %result
+}
+
declare void @llvm.experimental.stackmap(i64, i32, ...)
declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
+declare i32 @llvm.experimental.patchpoint.i32(i64, i32, ptr, i32, ...)
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+declare ptr @llvm.experimental.patchpoint.p0(i64, i32, ptr, i32, ...)
+declare half @llvm.experimental.patchpoint.f16(i64, i32, ptr, i32, ...)
+declare float @llvm.experimental.patchpoint.f32(i64, i32, ptr, i32, ...)
+declare double @llvm.experimental.patchpoint.f64(i64, i32, ptr, i32, ...)
+declare <16 x i8> @llvm.experimental.patchpoint.v16i8(i64, i32, ptr, i32, ...)
+declare <4 x i32> @llvm.experimental.patchpoint.v4i32(i64, i32, ptr, i32, ...)
+declare <4 x float> @llvm.experimental.patchpoint.v4f32(i64, i32, ptr, i32, ...)
+declare <2 x double> @llvm.experimental.patchpoint.v2f64(i64, i32, ptr, i32, ...)
diff --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index 77c70668b65a..0ec2d763685e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -2643,8 +2643,7 @@ define i8 @pr60530() {
;
; GISEL-LABEL: pr60530:
; GISEL: // %bb.0:
-; GISEL-NEXT: mov w8, #1 // =0x1
-; GISEL-NEXT: sbfx w0, w8, #0, #1
+; GISEL-NEXT: mov w0, #255 // =0xff
; GISEL-NEXT: ret
%1 = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 1)
%2 = extractvalue { i8, i1 } %1, 1
diff --git a/llvm/test/CodeGen/AArch64/bitcast.ll b/llvm/test/CodeGen/AArch64/bitcast.ll
index b7c02d6855a9..e0851fd8739e 100644
--- a/llvm/test/CodeGen/AArch64/bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast.ll
@@ -1,14 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; PR23065: SCALAR_TO_VECTOR implies the top elements 1 to N-1 of the N-element vector are undefined.
-; CHECK-GI: warning: Instruction selection used fallback path for bitcast_i32_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_i32_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v2i16_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for bitcast_v4i8_v2i16
-
define <4 x i16> @foo1(<2 x i32> %a) {
; CHECK-SD-LABEL: foo1:
; CHECK-SD: // %bb.0:
@@ -80,12 +75,26 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a, <4 x i8> %b){
}
define <4 x i8> @bitcast_i32_v4i8(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <4 x i8>
ret <4 x i8> %d
@@ -119,13 +128,23 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a, <2 x i16> %b){
}
define <2 x i16> @bitcast_i32_v2i16(i32 %a, i32 %b){
-; CHECK-LABEL: bitcast_i32_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_i32_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: fmov s0, w8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_i32_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add i32 %a, %b
%d = bitcast i32 %c to <2 x i16>
ret <2 x i16> %d
@@ -382,40 +401,72 @@ define <8 x i16> @bitcast_v16i8_v8i16(<16 x i8> %a, <16 x i8> %b){
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){
-; CHECK-LABEL: bitcast_v2i16_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: strh w9, [sp, #12]
-; CHECK-NEXT: strh w8, [sp, #14]
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v2i16_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: mov w8, v0.s[1]
+; CHECK-SD-NEXT: fmov w9, s0
+; CHECK-SD-NEXT: strh w9, [sp, #12]
+; CHECK-SD-NEXT: strh w8, [sp, #14]
+; CHECK-SD-NEXT: ldr s0, [sp, #12]
+; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v2i16_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <2 x i16> %a, %b
%d = bitcast <2 x i16> %c to <4 x i8>
ret <4 x i8> %d
}
define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){
-; CHECK-LABEL: bitcast_v4i8_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: add x8, sp, #12
-; CHECK-NEXT: uzp1 v0.8b, v0.8b, v0.8b
-; CHECK-NEXT: str s0, [sp, #12]
-; CHECK-NEXT: ld1 { v0.h }[0], [x8]
-; CHECK-NEXT: orr x8, x8, #0x2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bitcast_v4i8_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #16
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: add x8, sp, #12
+; CHECK-SD-NEXT: uzp1 v0.8b, v0.8b, v0.8b
+; CHECK-SD-NEXT: str s0, [sp, #12]
+; CHECK-SD-NEXT: ld1 { v0.h }[0], [x8]
+; CHECK-SD-NEXT: orr x8, x8, #0x2
+; CHECK-SD-NEXT: ld1 { v0.h }[2], [x8]
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bitcast_v4i8_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov h3, v0.h[3]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[3], v3.h[0]
+; CHECK-GI-NEXT: xtn v0.8b, v0.8h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%c = add <4 x i8> %a, %b
%d = bitcast <4 x i8> %c to <2 x i16>
ret <2 x i16> %d
diff --git a/llvm/test/CodeGen/AArch64/bswap.ll b/llvm/test/CodeGen/AArch64/bswap.ll
index f4221accfcbc..071613b9cc01 100644
--- a/llvm/test/CodeGen/AArch64/bswap.ll
+++ b/llvm/test/CodeGen/AArch64/bswap.ll
@@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for bswap_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; ====== Scalar Tests =====
define i16 @bswap_i16(i16 %a){
@@ -103,11 +101,23 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
define <2 x i16> @bswap_v2i16(<2 x i16> %a){
-; CHECK-LABEL: bswap_v2i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: rev32 v0.8b, v0.8b
-; CHECK-NEXT: ushr v0.2s, v0.2s, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: bswap_v2i16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: rev32 v0.8b, v0.8b
+; CHECK-SD-NEXT: ushr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: bswap_v2i16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: rev16 v0.8b, v0.8b
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
entry:
%res = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a)
ret <2 x i16> %res
diff --git a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
index 9040937d027d..1592c86e267f 100644
--- a/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
+++ b/llvm/test/CodeGen/AArch64/clear-dead-implicit-def-impdef.mir
@@ -3,6 +3,8 @@
---
name: func
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0:
liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6
diff --git a/llvm/test/CodeGen/AArch64/dllexport.ll b/llvm/test/CodeGen/AArch64/dllexport.ll
index 81ba674a0ded..580fb5fd9e79 100644
--- a/llvm/test/CodeGen/AArch64/dllexport.ll
+++ b/llvm/test/CodeGen/AArch64/dllexport.ll
@@ -1,5 +1,7 @@
; RUN: llc -mtriple aarch64-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU
; RUN: llc -mtriple aarch64-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC
+; RUN: llc -mtriple arm64ec-windows-gnu -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-GNU-EC
+; RUN: llc -mtriple arm64ec-windows-msvc -filetype asm -o - %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MSVC-EC
define void @f() {
ret void
@@ -71,3 +73,40 @@ define weak_odr dllexport void @l() {
; CHECK-MSVC: .ascii " /EXPORT:s"
; CHECK-MSVC: .ascii " /EXPORT:t"
; CHECK-MSVC: .ascii " /EXPORT:u"
+
+; CHECK-GNU-EC-NOT: -export:f
+; CHECK-GNU-EC-NOT: -export:#f,EXPORTAS,f
+; CHECK-GNU-EC: .ascii " -export:#g,EXPORTAS,g
+; CHECK-GNU-EC: .ascii " -export:#h,EXPORTAS,h
+; CHECK-GNU-EC-NOT: -export:i
+; CHECK-GNU-EC-NOT: -export:#i,EXPORTAS,i
+; CHECK-GNU-EC: .ascii " -export:#j,EXPORTAS,j"
+; CHECK-GNU-EC: .ascii " -export:#k,EXPORTAS,k"
+; CHECK-GNU-EC: .ascii " -export:#l,EXPORTAS,l"
+; CHECK-GNU-EC: .ascii " -export:m,data"
+; CHECK-GNU-EC: .ascii " -export:n,data"
+; CHECK-GNU-EC: .ascii " -export:o,data"
+; CHECK-GNU-EC: .ascii " -export:p,data"
+; CHECK-GNU-EC: .ascii " -export:q,data"
+; CHECK-GNU-EC: .ascii " -export:r"
+; CHECK-GNU-EC: .ascii " -export:s"
+; CHECK-GNU-EC: .ascii " -export:t"
+; CHECK-GNU-EC: .ascii " -export:u"
+; CHECK-MSVC-EC-NOT: /EXPORT:f
+; CHECK-MSVC-EC-NOT: /EXPORT:#f,EXPORTAS,f
+; CHECK-MSVC-EC: .ascii " /EXPORT:#g,EXPORTAS,g"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#h,EXPORTAS,h"
+; CHECK-MSVC-EC-NOT: /EXPORT:i
+; CHECK-MSVC-EC-NOT: /EXPORT:#i,EXPORTAS,i
+; CHECK-MSVC-EC: .ascii " /EXPORT:#j,EXPORTAS,j"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#k,EXPORTAS,k"
+; CHECK-MSVC-EC: .ascii " /EXPORT:#l,EXPORTAS,l"
+; CHECK-MSVC-EC: .ascii " /EXPORT:m,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:n,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:o,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:p,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:q,DATA"
+; CHECK-MSVC-EC: .ascii " /EXPORT:r"
+; CHECK-MSVC-EC: .ascii " /EXPORT:s"
+; CHECK-MSVC-EC: .ascii " /EXPORT:t"
+; CHECK-MSVC-EC: .ascii " /EXPORT:u"
diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll
index 7af01b53dae7..01585d02adcb 100644
--- a/llvm/test/CodeGen/AArch64/fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/fptoi.ll
@@ -1,13 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
-; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-
-; CHECK-GI-FP16: warning: Instruction selection used fallback path for fptos_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i16
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptos_v2f16_v2i8
-; CHECK-GI-FP16-NEXT: warning: Instruction selection used fallback path for fptou_v2f16_v2i8
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
define i64 @fptos_f64_i64(double %a) {
; CHECK-LABEL: fptos_f64_i64:
@@ -5184,8 +5179,13 @@ define <2 x i16> @fptos_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5212,8 +5212,13 @@ define <2 x i16> @fptou_v2f16_v2i16(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i16:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5658,8 +5663,13 @@ define <2 x i8> @fptos_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptos_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzs v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5686,8 +5696,13 @@ define <2 x i8> @fptou_v2f16_v2i8(<2 x half> %a) {
;
; CHECK-GI-FP16-LABEL: fptou_v2f16_v2i8:
; CHECK-GI-FP16: // %bb.0: // %entry
-; CHECK-GI-FP16-NEXT: fcvtl v0.4s, v0.4h
-; CHECK-GI-FP16-NEXT: fcvtzs v0.4s, v0.4s
+; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: fcvtzu v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/hadd-combine.ll b/llvm/test/CodeGen/AArch64/hadd-combine.ll
index e12502980790..491bf40ea4aa 100644
--- a/llvm/test/CodeGen/AArch64/hadd-combine.ll
+++ b/llvm/test/CodeGen/AArch64/hadd-combine.ll
@@ -341,6 +341,18 @@ define <8 x i16> @sub_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %res
}
+define <8 x i16> @srhadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: srhadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %or = or <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = sub <8 x i16> %or, %srl
+ ret <8 x i16> %res
+}
+
define <8 x i16> @rhaddu_base(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: rhaddu_base:
; CHECK: // %bb.0:
@@ -879,6 +891,18 @@ define <8 x i16> @uhadd_fixedwidth_v4i32(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %res
}
+define <8 x i16> @shadd_fixedwidth_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; CHECK-LABEL: shadd_fixedwidth_v8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h
+; CHECK-NEXT: ret
+ %and = and <8 x i16> %a0, %a1
+ %xor = xor <8 x i16> %a0, %a1
+ %srl = ashr <8 x i16> %xor, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %res = add <8 x i16> %and, %srl
+ ret <8 x i16> %res
+}
+
declare <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8>, <8 x i8>)
declare <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16>, <4 x i16>)
declare <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
index aa94a03786f5..47aa34e3c011 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-remat-requires-impdef-check.mir
@@ -22,6 +22,7 @@
name: inst_stores_to_dead_spill_implicit_def_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
@@ -59,6 +60,7 @@ body: |
name: inst_stores_to_dead_spill_movimm_impdef
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
index e5395b20afd4..a5d74ef75f0a 100644
--- a/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
+++ b/llvm/test/CodeGen/AArch64/implicit-def-with-impdef-greedy-assert.mir
@@ -4,6 +4,8 @@
---
name: widget
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
jumpTable:
kind: label-difference32
entries:
diff --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll
index d7656e1cd341..6828fa9f1508 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll
@@ -374,18 +374,115 @@ define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, ptr %a) {
ret <16 x i8> %s2
}
+define <8 x i8> @load_v8i8_2_1(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[0], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_15(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_15:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: adrp x8, .LCPI33_0
+; CHECK-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 0, i32 1, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_2(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 0, i32 1, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_3(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[2], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 14, i32 15>
+ ret <8 x i8> %s2
+}
+
+define <8 x i8> @load_v8i8_2_4(float %tmp, <8 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v8i8_2_4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr h2, [x0]
+; CHECK-NEXT: mov v0.h[3], v2.h[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 1>
+ ret <8 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_1(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT: zip1 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT: mov v0.s[1], v1.s[1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x i8> %s2
+}
+
+define <4 x i8> @load_v4i8_2_2(float %tmp, <4 x i8> %b, ptr %a) {
+; CHECK-LABEL: load_v4i8_2_2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr h0, [x0]
+; CHECK-NEXT: zip1 v2.8b, v0.8b, v0.8b
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %l = load <2 x i8>, ptr %a
+ %s1 = shufflevector <2 x i8> %l, <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %s2 = shufflevector <4 x i8> %s1, <4 x i8> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
+ ret <4 x i8> %s2
+}
+
; i16
define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[0], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[0], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -396,14 +493,10 @@ define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_15:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
; CHECK-NEXT: // kill: def $q1 killed $q1 def $q0_q1
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: adrp x8, .LCPI33_0
-; CHECK-NEXT: ld1 { v2.h }[2], [x9]
-; CHECK-NEXT: xtn v0.4h, v2.4s
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT: adrp x8, .LCPI40_0
+; CHECK-NEXT: ldr s0, [x0]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI40_0]
; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
@@ -415,13 +508,8 @@ define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[1], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -432,13 +520,8 @@ define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_3:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[2], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[2], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -449,13 +532,8 @@ define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v8i16_2_4:
; CHECK: // %bb.0:
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: add x9, x0, #2
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: ld1 { v0.h }[2], [x9]
-; CHECK-NEXT: xtn v2.4h, v0.4s
; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.s[3], v2.s[0]
+; CHECK-NEXT: ld1 { v0.s }[3], [x0]
; CHECK-NEXT: ret
%l = load <2 x i16>, ptr %a
%s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -466,11 +544,8 @@ define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_1:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
+; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v0.4h, v0.4h, v0.4h
; CHECK-NEXT: mov v0.s[1], v1.s[1]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
@@ -483,11 +558,8 @@ define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) {
; CHECK-LABEL: load_v4i16_2_2:
; CHECK: // %bb.0:
-; CHECK-NEXT: ld1 { v0.h }[0], [x0]
-; CHECK-NEXT: add x8, x0, #2
-; CHECK-NEXT: ld1 { v0.h }[2], [x8]
-; CHECK-NEXT: uzp1 v2.4h, v0.4h, v0.4h
; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ldr s2, [x0]
; CHECK-NEXT: mov v0.s[1], v2.s[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index ceac37f91238..f5a7b5dc9f49 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -4,13 +4,6 @@
; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
-; CHECK-GI: warning: Instruction selection used fallback path for stofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f32
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for stofp_v3i8_v3f16
-; CHECK-GI-NOFP16-NEXT: warning: Instruction selection used fallback path for utofp_v3i8_v3f16
-
define double @stofp_i64_f64(i64 %a) {
; CHECK-LABEL: stofp_i64_f64:
; CHECK: // %bb.0: // %entry
@@ -1754,47 +1747,109 @@ entry:
}
define <3 x double> @stofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: fmov s1, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: shl v1.2s, v1.2s, #24
-; CHECK-NEXT: sshr v1.2s, v1.2s, #24
-; CHECK-NEXT: shl v0.2s, v0.2s, #24
-; CHECK-NEXT: sshll v1.2d, v1.2s, #0
-; CHECK-NEXT: sshr v0.2s, v0.2s, #24
-; CHECK-NEXT: scvtf v2.2d, v1.2d
-; CHECK-NEXT: sshll v0.2d, v0.2s, #0
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: scvtf v0.2d, v0.2d
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: fmov s1, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: shl v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: sshr v1.2s, v1.2s, #24
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: sshll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #24
+; CHECK-SD-NEXT: scvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: sshll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: scvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: smov x8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: smov x9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: scvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: scvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
}
define <3 x double> @utofp_v3i8_v3f64(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f64:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: movi d1, #0x0000ff000000ff
-; CHECK-NEXT: fmov s2, w2
-; CHECK-NEXT: mov v0.s[1], w1
-; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT: and v1.8b, v2.8b, v1.8b
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: ushll v1.2d, v1.2s, #0
-; CHECK-NEXT: ucvtf v0.2d, v0.2d
-; CHECK-NEXT: ucvtf v2.2d, v1.2d
-; CHECK-NEXT: // kill: def $d2 killed $d2 killed $q2
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f64:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-SD-NEXT: fmov s2, w2
+; CHECK-SD-NEXT: mov v0.s[1], w1
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT: and v1.8b, v2.8b, v1.8b
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: ushll v1.2d, v1.2s, #0
+; CHECK-SD-NEXT: ucvtf v0.2d, v0.2d
+; CHECK-SD-NEXT: ucvtf v2.2d, v1.2d
+; CHECK-SD-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-SD-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f64:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: fmov d1, x8
+; CHECK-GI-NEXT: mov w8, v0.s[0]
+; CHECK-GI-NEXT: mov v1.d[1], x9
+; CHECK-GI-NEXT: mov w9, v0.s[1]
+; CHECK-GI-NEXT: fmov d2, x8
+; CHECK-GI-NEXT: ucvtf v0.2d, v1.2d
+; CHECK-GI-NEXT: mov v2.d[1], x9
+; CHECK-GI-NEXT: mov d1, v0.d[1]
+; CHECK-GI-NEXT: ucvtf v2.2d, v2.2d
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d2 killed $d2 killed $q2
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x double>
ret <3 x double> %c
@@ -3372,31 +3427,71 @@ entry:
}
define <3 x float> @stofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: stofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-NEXT: scvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: stofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: scvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: stofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: scvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = sitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
}
define <3 x float> @utofp_v3i8_v3f32(<3 x i8> %a) {
-; CHECK-LABEL: utofp_v3i8_v3f32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-NEXT: ucvtf v0.4s, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: utofp_v3i8_v3f32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w1
+; CHECK-SD-NEXT: mov v0.h[2], w2
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-SD-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: utofp_v3i8_v3f32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fmov s0, w0
+; CHECK-GI-NEXT: fmov s1, w1
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: fmov s1, w2
+; CHECK-GI-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-GI-NEXT: ucvtf v0.4s, v0.4s
+; CHECK-GI-NEXT: ret
entry:
%c = uitofp <3 x i8> %a to <3 x float>
ret <3 x float> %c
@@ -5582,12 +5677,18 @@ define <2 x half> @utofp_v2i8_v2f16(<2 x i8> %a) {
; CHECK-GI-FP16-LABEL: utofp_v2i8_v2f16:
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-GI-FP16-NEXT: mov w8, v0.s[1]
-; CHECK-GI-FP16-NEXT: fmov w9, s0
-; CHECK-GI-FP16-NEXT: and w9, w9, #0xff
-; CHECK-GI-FP16-NEXT: and w8, w8, #0xff
-; CHECK-GI-FP16-NEXT: ucvtf h0, w9
-; CHECK-GI-FP16-NEXT: ucvtf h1, w8
+; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-FP16-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-FP16-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: movi d1, #0x0000ff000000ff
+; CHECK-GI-FP16-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-FP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
+; CHECK-GI-FP16-NEXT: mov h1, v0.h[1]
; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
; CHECK-GI-FP16-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-FP16-NEXT: ret
@@ -5622,11 +5723,20 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: stofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
; CHECK-GI-NOFP16-NEXT: shl v0.4h, v0.4h, #8
; CHECK-GI-NOFP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: sshll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: sshll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: scvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5635,11 +5745,10 @@ define <3 x half> @stofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-GI-FP16-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: scvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
@@ -5671,10 +5780,20 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-NOFP16-LABEL: utofp_v3i8_v3f16:
; CHECK-GI-NOFP16: // %bb.0: // %entry
; CHECK-GI-NOFP16-NEXT: fmov s0, w0
-; CHECK-GI-NOFP16-NEXT: mov v0.h[1], w1
-; CHECK-GI-NOFP16-NEXT: mov v0.h[2], w2
-; CHECK-GI-NOFP16-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-GI-NOFP16-NEXT: fmov s1, w1
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: fmov s1, w2
+; CHECK-GI-NOFP16-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-GI-NOFP16-NEXT: movi d1, #0xff00ff00ff00ff
+; CHECK-GI-NOFP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NOFP16-NEXT: mov s1, v0.s[1]
+; CHECK-GI-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NOFP16-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NOFP16-NEXT: mov v1.h[1], v3.h[0]
; CHECK-GI-NOFP16-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NOFP16-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NOFP16-NEXT: mov v0.d[1], v1.d[0]
; CHECK-GI-NOFP16-NEXT: ucvtf v0.4s, v0.4s
; CHECK-GI-NOFP16-NEXT: fcvtn v0.4h, v0.4s
; CHECK-GI-NOFP16-NEXT: ret
@@ -5683,11 +5802,10 @@ define <3 x half> @utofp_v3i8_v3f16(<3 x i8> %a) {
; CHECK-GI-FP16: // %bb.0: // %entry
; CHECK-GI-FP16-NEXT: fmov s0, w0
; CHECK-GI-FP16-NEXT: fmov s1, w1
-; CHECK-GI-FP16-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-FP16-NEXT: mov v0.b[1], v1.b[0]
; CHECK-GI-FP16-NEXT: fmov s1, w2
-; CHECK-GI-FP16-NEXT: mov v0.h[2], v1.h[0]
-; CHECK-GI-FP16-NEXT: movi d1, #0xff00ff00ff00ff
-; CHECK-GI-FP16-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-GI-FP16-NEXT: mov v0.b[2], v1.b[0]
+; CHECK-GI-FP16-NEXT: ushll v0.8h, v0.8b, #0
; CHECK-GI-FP16-NEXT: ucvtf v0.4h, v0.4h
; CHECK-GI-FP16-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll
index 39143e5c53ff..c3c0ec5e3d9d 100644
--- a/llvm/test/CodeGen/AArch64/load.ll
+++ b/llvm/test/CodeGen/AArch64/load.ll
@@ -159,7 +159,8 @@ define <2 x i16> @load_v2i16(ptr %ptr){
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: ldr h0, [x0]
; CHECK-GI-NEXT: ldr h1, [x0, #2]
-; CHECK-GI-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-GI-NEXT: ret
%a = load <2 x i16>, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
index 23cf1dcda839..5b379c2bd562 100644
--- a/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
+++ b/llvm/test/CodeGen/AArch64/machine-cp-sub-reg.mir
@@ -10,7 +10,6 @@ body: |
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $w0
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: $x8 = ORRXrs $xzr, $x0, 0, implicit $w0
; CHECK-NEXT: $w8 = ORRWrs $wzr, $w0, 0, implicit-def $x8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
diff --git a/llvm/test/CodeGen/AArch64/misched-bundle.mir b/llvm/test/CodeGen/AArch64/misched-bundle.mir
new file mode 100644
index 000000000000..a947c04a4229
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/misched-bundle.mir
@@ -0,0 +1,195 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a510 -run-pass=machine-scheduler -debug-only=machine-scheduler %s -o - 2>&1 | FileCheck %s
+# REQUIRES: asserts
+
+# CHECK: SU(0): renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z0
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(1): renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 4
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Out Latency=1
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(2): renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 7
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(6): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(3): renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(4): renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(5): renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 3
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 3
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(6): $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0(tied-def 0), killed renamable $z1, killed renamable $z2
+# CHECK-NEXT: # preds left : 4
+# CHECK-NEXT: # succs left : 2
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 4
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 4
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(2): Data Latency=3 Reg=$z2
+# CHECK-NEXT: SU(1): Data Latency=3 Reg=$z1
+# CHECK-NEXT: SU(0): Out Latency=1
+# CHECK-NEXT: SU(0): Data Latency=3 Reg=$z0
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(8): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(7): Anti Latency=0
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(7): BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3
+# CHECK-NEXT: # preds left : 5
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 3
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Anti Latency=0
+# CHECK-NEXT: SU(5): Data Latency=0 Reg=$z5
+# CHECK-NEXT: SU(4): Data Latency=0 Reg=$z4
+# CHECK-NEXT: SU(3): Data Latency=0 Reg=$z3
+# CHECK-NEXT: SU(1): Out Latency=1
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Data Latency=0 Reg=$z1
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(8): ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 7
+# CHECK-NEXT: # succs left : 1
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(6): Data Latency=4 Reg=$z0
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Successors:
+# CHECK-NEXT: SU(9): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: SU(9): ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+# CHECK-NEXT: # preds left : 8
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 1
+# CHECK-NEXT: Depth : 7
+# CHECK-NEXT: Height : 0
+# CHECK-NEXT: Predecessors:
+# CHECK-NEXT: SU(8): Ord Latency=0 Memory
+# CHECK-NEXT: SU(7): Data Latency=0 Reg=$z1
+# CHECK-NEXT: SU(5): Ord Latency=0 Memory
+# CHECK-NEXT: SU(4): Ord Latency=0 Memory
+# CHECK-NEXT: SU(3): Ord Latency=0 Memory
+# CHECK-NEXT: SU(2): Ord Latency=0 Memory
+# CHECK-NEXT: SU(1): Ord Latency=0 Memory
+# CHECK-NEXT: SU(0): Ord Latency=0 Memory
+# CHECK-NEXT: Single Issue : false;
+# CHECK-NEXT: ExitSU: RET_ReallyLR
+# CHECK-NEXT: # preds left : 0
+# CHECK-NEXT: # succs left : 0
+# CHECK-NEXT: # rdefs left : 0
+# CHECK-NEXT: Latency : 0
+# CHECK-NEXT: Depth : 0
+# CHECK-NEXT: Height : 0
+
+---
+name: test
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $p0, $x0, $x1, $x2, $x10, $x11, $x12, $x13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, renamable $z1, killed renamable $z2
+ ; CHECK-NEXT: renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size, align 1)
+ ; CHECK-NEXT: ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit $z4, implicit $z3 {
+ ; CHECK-NEXT: $z1 = MOVPRFX_ZZ $z5
+ ; CHECK-NEXT: $z1 = FMLA_ZPmZZ_H renamable $p0, internal $z1, renamable $z4, renamable $z3
+ ; CHECK-NEXT: }
+ ; CHECK-NEXT: ST1H renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size, align 1)
+ ; CHECK-NEXT: RET_ReallyLR
+
+ renamable $z0 = LD1H renamable $p0, renamable $x1, renamable $x10 :: (load unknown-size)
+ renamable $z1 = LD1H renamable $p0, renamable $x2, renamable $x10 :: (load unknown-size)
+ renamable $z2 = LD1H renamable $p0, renamable $x0, renamable $x10 :: (load unknown-size)
+ renamable $z3 = LD1H renamable $p0, renamable $x11, renamable $x10 :: (load unknown-size)
+ renamable $z4 = LD1H renamable $p0, renamable $x12, renamable $x10 :: (load unknown-size)
+ renamable $z5 = LD1H renamable $p0, renamable $x13, renamable $x10 :: (load unknown-size)
+ $z0 = FMAD_ZPmZZ_H renamable $p0, killed $z0, killed renamable $z1, killed renamable $z2
+ BUNDLE implicit-def $z1, implicit-def $q1, implicit-def $d1, implicit-def $s1, implicit-def $h1, implicit-def $b1, implicit $z5, implicit $p0, implicit killed $z4, implicit killed $z3 {
+ $z1 = MOVPRFX_ZZ $z5
+ $z1 = FMLA_ZPmZZ_H renamable $p0, internal killed $z1, killed renamable $z4, killed renamable $z3
+ }
+ ST1H killed renamable $z0, renamable $p0, renamable $x0, renamable $x10 :: (store unknown-size)
+ ST1H killed renamable $z1, renamable $p0, renamable $x13, renamable $x10 :: (store unknown-size)
+ RET_ReallyLR
+
+...
diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
index 01620652301e..57f220f621cf 100644
--- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -1117,8 +1117,15 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI84_0
-; CHECK-GI-NEXT: ldr d1, [x8, :lo12:.LCPI84_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v1.8h, v3.8b, #0
; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15
; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15
; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b
@@ -1137,8 +1144,16 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) {
;
; CHECK-GI-LABEL: vselect_constant_cond_zero_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI85_0
-; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI85_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: mov v3.16b, v1.16b
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v2.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v2.d[0]
; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31
; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31
; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b
@@ -1181,8 +1196,15 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i16:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI87_0
-; CHECK-GI-NEXT: ldr d2, [x8, :lo12:.LCPI87_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[2], v3.b[0]
+; CHECK-GI-NEXT: mov v4.b[3], v2.b[0]
+; CHECK-GI-NEXT: ushll v2.8h, v4.8b, #0
; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15
; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15
; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b
@@ -1201,8 +1223,16 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) {
;
; CHECK-GI-LABEL: vselect_constant_cond_v4i32:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI88_0
-; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI88_0]
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: mov v4.16b, v2.16b
+; CHECK-GI-NEXT: mov v4.h[1], v3.h[0]
+; CHECK-GI-NEXT: mov v3.h[1], v2.h[0]
+; CHECK-GI-NEXT: ushll v2.4s, v4.4h, #0
+; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
+; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31
; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31
; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b
diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 632b6b326250..dbb5dfebd44a 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -2870,6 +2870,107 @@ define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
ret <2 x i64> %tmp4
}
+define <2 x i32> @fcmal2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-SD-LABEL: fcmal2xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v0.2s, #1
+; CHECK-GI-NEXT: shl v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmal4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmal2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-SD-LABEL: fcmal2xdouble:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0xffffffffffffffff
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmal2xdouble:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: adrp x8, .LCPI221_0
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI221_0]
+; CHECK-GI-NEXT: shl v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp true <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmnv2xfloat(<2 x float> %A, <2 x float> %B) {
+; CHECK-LABEL: fcmnv2xfloat:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) {
+; CHECK-SD-LABEL: fcmnv4xfloat:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: fcmnv4xfloat:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v1.h[1], v0.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v0.h[0]
+; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
+; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31
+; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT: ret
+ %tmp3 = fcmp false <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmnv2xdouble(<2 x double> %A, <2 x double> %B) {
+; CHECK-LABEL: fcmnv2xdouble:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: ret
+ %tmp3 = fcmp false <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
; CHECK-LABEL: fcmoeqz2xfloat:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/overflow.ll b/llvm/test/CodeGen/AArch64/overflow.ll
index 1fd60c030979..977141f2b84f 100644
--- a/llvm/test/CodeGen/AArch64/overflow.ll
+++ b/llvm/test/CodeGen/AArch64/overflow.ll
@@ -64,21 +64,10 @@ entry:
}
define i32 @saddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: saddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: saddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #13 // =0xd
-; GISEL-NEXT: and x9, x3, #0xc
-; GISEL-NEXT: and x8, x4, x8
-; GISEL-NEXT: cmn x9, x8
-; GISEL-NEXT: cset w8, vs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: saddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 12
%rhs = and i64 %v5, 13
@@ -89,22 +78,10 @@ entry:
}
define i32 @uaddo.select.i64(i32 %v1, i32 %v2, i1 %v3, i64 %v4, i64 %v5) {
-; SDAG-LABEL: uaddo.select.i64:
-; SDAG: // %bb.0: // %entry
-; SDAG-NEXT: mov w0, w1
-; SDAG-NEXT: ret
-;
-; GISEL-LABEL: uaddo.select.i64:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: mov w8, #9 // =0x9
-; GISEL-NEXT: mov w9, #10 // =0xa
-; GISEL-NEXT: and x8, x3, x8
-; GISEL-NEXT: and x9, x4, x9
-; GISEL-NEXT: cmn x8, x9
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w1, ne
-; GISEL-NEXT: ret
+; CHECK-LABEL: uaddo.select.i64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
entry:
%lhs = and i64 %v4, 9
%rhs = and i64 %v5, 10
diff --git a/llvm/test/CodeGen/AArch64/peephole-movd.mir b/llvm/test/CodeGen/AArch64/peephole-movd.mir
new file mode 100644
index 000000000000..bd7f0ab3f044
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/peephole-movd.mir
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-mi-peephole-opt -o - -mtriple=aarch64-unknown-linux -verify-machineinstrs %s | FileCheck %s
+
+---
+name: remove_kill_flags
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[TBLv8i8One:%[0-9]+]]:fpr64 = TBLv8i8One killed [[SUBREG_TO_REG]], [[UQSHLv8i8_shift]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, killed %3:fpr64, %subreg.dsub
+ %5:fpr64 = TBLv8i8One killed %4:fpr128, %2:fpr64
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+---
+name: remove_kill_flags2
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $w0
+ ; CHECK-LABEL: name: remove_kill_flags2
+ ; CHECK: liveins: $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+ ; CHECK-NEXT: [[UQSHLv8i8_shift:%[0-9]+]]:fpr64 = UQSHLv8i8_shift killed [[COPY]], 1
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr128 = SUBREG_TO_REG 0, [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[UQSHLv8i8_shift]], %subreg.dsub
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %0:fpr128 = MOVIv2d_ns 0
+ %1:fpr64 = COPY %0.dsub:fpr128
+ %2:fpr64 = UQSHLv8i8_shift killed %1:fpr64, 1
+ %3:fpr64 = FMOVDr %2:fpr64
+ %4:fpr128 = SUBREG_TO_REG 0, %3:fpr64, %subreg.dsub
+ %7:fpr128 = IMPLICIT_DEF
+ %6:fpr128 = INSERT_SUBREG %7:fpr128, killed %2:fpr64, %subreg.dsub
+ %9:fpr128 = IMPLICIT_DEF
+ %8:fpr128 = INSERT_SUBREG %9:fpr128, killed %3:fpr64, %subreg.dsub
+ RET_ReallyLR implicit $w0
+...
+
diff --git a/llvm/test/CodeGen/AArch64/pr86717.ll b/llvm/test/CodeGen/AArch64/pr86717.ll
new file mode 100644
index 000000000000..aa8be954be72
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr86717.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+
+define <16 x i8> @f(i32 %0) {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: sub w8, w8, w0
+; CHECK-NEXT: bfxil x9, x8, #0, #4
+; CHECK-NEXT: mov w8, #3 // =0x3
+; CHECK-NEXT: str q0, [sp]
+; CHECK-NEXT: strb w8, [x9]
+; CHECK-NEXT: ldr q0, [sp], #16
+; CHECK-NEXT: ret
+ %2 = sub nuw i32 1, %0
+ %3 = insertelement <16 x i8> zeroinitializer, i8 3, i32 %2
+ ret <16 x i8> %3
+}
diff --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index 932b230726a3..934ff44900c0 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -147,10 +147,10 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v19.16b, v23.16b
; CHECK-NEXT: mov v3.d[1], x20
; CHECK-NEXT: mov v23.16b, v27.16b
-; CHECK-NEXT: mov v27.16b, v9.16b
-; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: add v27.2d, v9.2d, v1.2d
+; CHECK-NEXT: mul x15, x4, x5
; CHECK-NEXT: str q11, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v4.d[1], x22
; CHECK-NEXT: add v19.2d, v19.2d, v1.2d
; CHECK-NEXT: add v7.2d, v7.2d, v1.2d
@@ -171,9 +171,7 @@ define dso_local void @run_test() local_unnamed_addr uwtable {
; CHECK-NEXT: mov v10.16b, v26.16b
; CHECK-NEXT: mov v14.d[1], x13
; CHECK-NEXT: mov v22.16b, v31.16b
-; CHECK-NEXT: mov v20.16b, v8.16b
; CHECK-NEXT: ldp q26, q31, [sp] // 32-byte Folded Reload
-; CHECK-NEXT: mov v11.16b, v15.16b
; CHECK-NEXT: mov v0.d[1], x12
; CHECK-NEXT: add v13.2d, v13.2d, v14.2d
; CHECK-NEXT: add v31.2d, v31.2d, v14.2d
diff --git a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
index bb9546af8bb7..8be63b04d8ce 100644
--- a/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
+++ b/llvm/test/CodeGen/AArch64/setcc_knownbits.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @load_bv_v4i8(i1 zeroext %a) {
; CHECK-LABEL: load_bv_v4i8:
@@ -11,18 +12,31 @@ define i1 @load_bv_v4i8(i1 zeroext %a) {
}
define noundef i1 @logger(i32 noundef %logLevel, ptr %ea, ptr %pll) {
-; CHECK-LABEL: logger:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr w8, [x2]
-; CHECK-NEXT: cmp w8, w0
-; CHECK-NEXT: b.ls .LBB1_2
-; CHECK-NEXT: // %bb.1:
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB1_2: // %land.rhs
-; CHECK-NEXT: ldr x8, [x1]
-; CHECK-NEXT: ldrb w0, [x8]
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: logger:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: ldr w8, [x2]
+; CHECK-SD-NEXT: cmp w8, w0
+; CHECK-SD-NEXT: b.ls .LBB1_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB1_2: // %land.rhs
+; CHECK-SD-NEXT: ldr x8, [x1]
+; CHECK-SD-NEXT: ldrb w0, [x8]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: logger:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: ldr w8, [x2]
+; CHECK-GI-NEXT: cmp w8, w0
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.hi .LBB1_2
+; CHECK-GI-NEXT: // %bb.1: // %land.rhs
+; CHECK-GI-NEXT: ldr x8, [x1]
+; CHECK-GI-NEXT: ldrb w8, [x8]
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: .LBB1_2: // %land.end
+; CHECK-GI-NEXT: ret
entry:
%0 = load i32, ptr %pll, align 4
%cmp.not = icmp ugt i32 %0, %logLevel
@@ -44,12 +58,18 @@ land.end: ; preds = %land.rhs, %entry
declare i64 @llvm.ctlz.i64(i64 %in, i1)
define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
-; CHECK-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: clz x8, x0
-; CHECK-NEXT: lsr x0, x8, #6
-; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: clz x8, x0
+; CHECK-SD-NEXT: lsr x0, x8, #6
+; CHECK-SD-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_ctlz_undef_cmpeq_one_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: clz x8, x0
+; CHECK-GI-NEXT: lsr w0, w8, #6
+; CHECK-GI-NEXT: ret
%ctlz = call i64 @llvm.ctlz.i64(i64 %in, i1 -1)
%lshr = lshr i64 %ctlz, 6
%icmp = icmp eq i64 %lshr, 1
@@ -57,17 +77,30 @@ define i1 @lshr_ctlz_undef_cmpeq_one_i64(i64 %in) {
}
define i32 @PR17487(i1 %tobool) {
-; CHECK-LABEL: PR17487:
-; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.2s, w0
-; CHECK-NEXT: mov w8, #1 // =0x1
-; CHECK-NEXT: dup v1.2d, x8
-; CHECK-NEXT: ushll v0.2d, v0.2s, #0
-; CHECK-NEXT: bic v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: mov x8, v0.d[1]
-; CHECK-NEXT: cmp x8, #1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: PR17487:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.2s, w0
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: dup v1.2d, x8
+; CHECK-SD-NEXT: ushll v0.2d, v0.2s, #0
+; CHECK-SD-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: mov x8, v0.d[1]
+; CHECK-SD-NEXT: cmp x8, #1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: PR17487:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: mov v0.d[1], x0
+; CHECK-GI-NEXT: adrp x8, .LCPI3_0
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-GI-NEXT: bic v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov d0, v0.d[1]
+; CHECK-GI-NEXT: fmov x8, d0
+; CHECK-GI-NEXT: cmp x8, #1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%tmp = insertelement <2 x i1> undef, i1 %tobool, i32 1
%tmp1 = zext <2 x i1> %tmp to <2 x i64>
%tmp2 = xor <2 x i64> %tmp1, <i64 1, i64 1>
diff --git a/llvm/test/CodeGen/AArch64/shift.ll b/llvm/test/CodeGen/AArch64/shift.ll
index 5287839ee7b7..9c8d3e0f07de 100644
--- a/llvm/test/CodeGen/AArch64/shift.ll
+++ b/llvm/test/CodeGen/AArch64/shift.ll
@@ -1,13 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
-
-; CHECK-GI: warning: Instruction selection used fallback path for shl_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for shl_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ashr_v2i16
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v4i8
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for lshr_v2i16
+; RUN: llc -mtriple=aarch64 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i1 @shl_i1(i1 %0, i1 %1){
; CHECK-SD-LABEL: shl_i1:
@@ -530,11 +523,38 @@ define <2 x i64> @lshr_v2i64(<2 x i64> %0, <2 x i64> %1){
; ===== Vector Larger/Smaller than Legal =====
define <4 x i8> @shl_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: shl_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v0.h[1]
+; CHECK-GI-NEXT: mov h3, v1.h[1]
+; CHECK-GI-NEXT: mov h4, v0.h[2]
+; CHECK-GI-NEXT: mov h5, v0.h[3]
+; CHECK-GI-NEXT: mov h6, v1.h[3]
+; CHECK-GI-NEXT: mov v0.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v1.h[2]
+; CHECK-GI-NEXT: mov v1.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v6.b[0]
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -556,12 +576,27 @@ define <32 x i8> @shl_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @shl_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: shl_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shl_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shl_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v0.s[1]
+; CHECK-GI-NEXT: mov s3, v1.s[1]
+; CHECK-GI-NEXT: mov v0.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = shl <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -633,14 +668,42 @@ define <4 x i64> @shl_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @ashr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: ashr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: shl v0.4h, v0.4h, #8
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: sshr v0.4h, v0.4h, #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: sshl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: sshl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -658,15 +721,31 @@ define <32 x i8> @ashr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @ashr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: ashr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: shl v0.2s, v0.2s, #16
-; CHECK-NEXT: sshr v0.2s, v0.2s, #16
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: sshl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: ashr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: shl v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: sshr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: sshl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: ashr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: sshl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = ashr <2 x i16> %0, %1
ret <2 x i16> %3
}
@@ -727,13 +806,41 @@ define <4 x i64> @ashr_v4i64(<4 x i64> %0, <4 x i64> %1){
}
define <4 x i8> @lshr_v4i8(<4 x i8> %0, <4 x i8> %1){
-; CHECK-LABEL: lshr_v4i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: bic v1.4h, #255, lsl #8
-; CHECK-NEXT: bic v0.4h, #255, lsl #8
-; CHECK-NEXT: neg v1.4h, v1.4h
-; CHECK-NEXT: ushl v0.4h, v0.4h, v1.4h
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v4i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: bic v1.4h, #255, lsl #8
+; CHECK-SD-NEXT: bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT: neg v1.4h, v1.4h
+; CHECK-SD-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v4i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov h2, v1.h[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h3, v0.h[1]
+; CHECK-GI-NEXT: mov h4, v1.h[2]
+; CHECK-GI-NEXT: mov h5, v1.h[3]
+; CHECK-GI-NEXT: mov h6, v0.h[3]
+; CHECK-GI-NEXT: mov v1.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov h2, v0.h[2]
+; CHECK-GI-NEXT: mov v0.b[1], v3.b[0]
+; CHECK-GI-NEXT: mov v1.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v1.b[3], v5.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v6.b[0]
+; CHECK-GI-NEXT: neg v1.8b, v1.8b
+; CHECK-GI-NEXT: ushl v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT: mov b1, v0.b[1]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b3, v0.b[3]
+; CHECK-GI-NEXT: mov v0.b[1], v1.b[0]
+; CHECK-GI-NEXT: mov v0.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v0.b[3], v3.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <4 x i8> %0, %1
ret <4 x i8> %3
}
@@ -751,14 +858,30 @@ define <32 x i8> @lshr_v32i8(<32 x i8> %0, <32 x i8> %1){
}
define <2 x i16> @lshr_v2i16(<2 x i16> %0, <2 x i16> %1){
-; CHECK-LABEL: lshr_v2i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi d2, #0x00ffff0000ffff
-; CHECK-NEXT: and v1.8b, v1.8b, v2.8b
-; CHECK-NEXT: and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT: neg v1.2s, v1.2s
-; CHECK-NEXT: ushl v0.2s, v0.2s, v1.2s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: lshr_v2i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi d2, #0x00ffff0000ffff
+; CHECK-SD-NEXT: and v1.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT: and v0.8b, v0.8b, v2.8b
+; CHECK-SD-NEXT: neg v1.2s, v1.2s
+; CHECK-SD-NEXT: ushl v0.2s, v0.2s, v1.2s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: lshr_v2i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
+; CHECK-GI-NEXT: mov s2, v1.s[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov s3, v0.s[1]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: neg v1.4h, v1.4h
+; CHECK-GI-NEXT: ushl v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mov h1, v0.h[1]
+; CHECK-GI-NEXT: mov v0.h[1], v1.h[0]
+; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: ret
%3 = lshr <2 x i16> %0, %1
ret <2 x i16> %3
}
diff --git a/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
new file mode 100644
index 000000000000..cd5046a9a647
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-avoid-coalescing-locally-streaming.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mattr=+sme -stop-after=finalize-isel < %s | FileCheck %s --check-prefix=CHECK-COALESCER-BARRIER
+; RUN: llc -mattr=+sme -stop-after=virtregrewriter < %s | FileCheck %s --check-prefix=CHECK-REGALLOC
+
+target triple = "aarch64"
+
+define void @dont_coalesce_args(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_args
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_args
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret void
+}
+
+define <2 x i64> @dont_coalesce_res() "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:zpr = COPY $z0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY [[COPY]].zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_res, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL renamable $q0, implicit killed $z0
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @scalable_res()
+ %res = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %sa, i64 0)
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @dont_coalesce_arg_that_is_also_res(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
+ ; CHECK-COALESCER-BARRIER-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-COALESCER-BARRIER: bb.0 (%ir-block.0):
+ ; CHECK-COALESCER-BARRIER-NEXT: liveins: $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: {{ $}}
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COPY]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: [[DEF:%[0-9]+]]:zpr = IMPLICIT_DEF
+ ; CHECK-COALESCER-BARRIER-NEXT: [[INSERT_SUBREG:%[0-9]+]]:zpr = INSERT_SUBREG [[DEF]], [[COALESCER_BARRIER_FPR128_]], %subreg.zsub
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: $z0 = COPY [[INSERT_SUBREG]]
+ ; CHECK-COALESCER-BARRIER-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-COALESCER-BARRIER-NEXT: [[COALESCER_BARRIER_FPR128_1:%[0-9]+]]:fpr128 = COALESCER_BARRIER_FPR128 [[COALESCER_BARRIER_FPR128_]]
+ ; CHECK-COALESCER-BARRIER-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def $q0, implicit $vg, implicit-def $vg
+ ; CHECK-COALESCER-BARRIER-NEXT: $q0 = COPY [[COALESCER_BARRIER_FPR128_1]]
+ ; CHECK-COALESCER-BARRIER-NEXT: RET_ReallyLR implicit $q0
+ ;
+ ; CHECK-REGALLOC-LABEL: name: dont_coalesce_arg_that_is_also_res
+ ; CHECK-REGALLOC: bb.0 (%ir-block.0):
+ ; CHECK-REGALLOC-NEXT: liveins: $q0
+ ; CHECK-REGALLOC-NEXT: {{ $}}
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 1, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = KILL killed renamable $q0, implicit-def $z0
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: BL @scalable_args, csr_aarch64_sve_aapcs, implicit-def dead $lr, implicit $sp, implicit $z0, implicit-def $sp
+ ; CHECK-REGALLOC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: renamable $q0 = COALESCER_BARRIER_FPR128 killed renamable $q0
+ ; CHECK-REGALLOC-NEXT: STRQui killed renamable $q0, %stack.0, 0 :: (store (s128) into %stack.0)
+ ; CHECK-REGALLOC-NEXT: MSRpstatesvcrImm1 1, 0, csr_aarch64_smstartstop, implicit-def dead $nzcv, implicit-def dead $q0, implicit $vg, implicit-def $vg
+ ; CHECK-REGALLOC-NEXT: $q0 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-REGALLOC-NEXT: RET_ReallyLR implicit $q0
+ %sa = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> %a, i64 0)
+ call void @scalable_args(<vscale x 2 x i64> %sa)
+ ret <2 x i64> %a
+}
+
+declare void @scalable_args(<vscale x 2 x i64>) "aarch64_pstate_sm_enabled"
+declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
+
+declare <vscale x 2 x i64> @scalable_res() "aarch64_pstate_sm_enabled"
+declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
index d67573384ca9..6e262cc0786e 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body-streaming-compatible-interface.ll
@@ -8,27 +8,31 @@ declare void @streaming_compatible_callee() "aarch64_pstate_sm_compatible";
define float @sm_body_sm_compatible_simple() "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: sm_body_sm_compatible_simple:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
; CHECK-NEXT: bl __arm_sme_state
; CHECK-NEXT: and x8, x0, #0x1
; CHECK-NEXT: tbnz w8, #0, .LBB0_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: smstart sm
; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: fmov s0, wzr
+; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
; CHECK-NEXT: tbnz w8, #0, .LBB0_4
; CHECK-NEXT: // %bb.3:
; CHECK-NEXT: smstop sm
; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: fmov s0, wzr
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
ret float zeroinitializer
}
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
index 93875549cffc..08dec228d2f7 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-body.ll
@@ -87,29 +87,27 @@ if.end:
define <2 x i64> @locally_streaming_caller_no_callee(<2 x i64> %a) "aarch64_pstate_sm_body" nounwind {
; CHECK-LABEL: locally_streaming_caller_no_callee:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
-; CHECK-NEXT: addsvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: smstart sm
; CHECK-NEXT: index z0.d, #0, #1
-; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: add z0.d, z0.d, #41 // =0x29
-; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: smstop sm
-; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: addsvl sp, sp, #1
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%add = add <2 x i64> %a, <i64 41, i64 42>;
diff --git a/llvm/test/CodeGen/AArch64/srem-vec-crash.ll b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
new file mode 100644
index 000000000000..0fce8de30d4d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/srem-vec-crash.ll
@@ -0,0 +1,15 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s
+
+define i32 @pr84830(i1 %arg) {
+; CHECK-LABEL: pr84830:
+; CHECK: // %bb.0: // %bb
+; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: ret
+bb:
+ %new0 = srem i1 %arg, true
+ %last = zext i1 %new0 to i32
+ %i = icmp ne i32 %last, 0
+ %i1 = select i1 %i, i32 0, i32 1
+ ret i32 %i1
+}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
index 81349620fb77..5d1c91e434dc 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
@@ -1,17 +1,15 @@
; Test that storage for allocas with disjoint lifetimes is reused with stack
; tagging.
-; RUN: opt -S -aarch64-stack-tagging -stack-tagging-use-stack-safety=0 %s -o - | \
-; RUN: llc --mattr=+mte -no-stack-coloring=false -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=false -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=COLOR
-; RUN: opt -S -aarch64-stack-tagging %s -stack-tagging-use-stack-safety=0 -o - | \
-; RUN: llc --mattr=+mte -no-stack-coloring=true -o - | \
+; RUN: llc --mattr=+mte -no-stack-coloring=true -stack-tagging-use-stack-safety=0 -o - %s | \
; RUN: FileCheck %s --check-prefix=NOCOLOR
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
-; COLOR: sub sp, sp, #208
+; COLOR: sub sp, sp, #192
; NOCOLOR: sub sp, sp, #336
define i32 @myCall_w2(i32 %in) sanitize_memtag {
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 255c6dedbd6e..1a76f8cf87ff 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -1090,18 +1090,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB39_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB39_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1109,20 +1120,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB39_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB39_2
+; GFX90A-NEXT: .LBB39_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB39_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB39_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1132,26 +1154,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB40_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB40_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB40_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB40_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1161,18 +1204,29 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB41_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB41_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1180,20 +1234,31 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB41_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB41_2
+; GFX90A-NEXT: .LBB41_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB41_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB41_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1203,26 +1268,47 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB42_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1394,37 +1480,59 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB49_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB49_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB49_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB49_2
+; GFX90A-NEXT: .LBB49_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB49_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: v_mov_b32_e32 v2, 0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB49_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1866,23 +1974,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB65_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB65_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB65_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB65_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1892,23 +2021,44 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB66_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB66_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB66_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB66_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -1918,44 +2068,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: s_mov_b32 s4, s3
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB67_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, s0
-; GFX90A-NEXT: ds_read_b64 v[0:1], v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, s0
+; GFX90A-NEXT: ds_read_b64 v[2:3], v4
; GFX90A-NEXT: s_mov_b64 s[0:1], 0
-; GFX90A-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB67_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB67_2
+; GFX90A-NEXT: .LBB67_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: s_mov_b32 s4, s3
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s4, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB67_3
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v2, s0
-; GFX940-NEXT: ds_read_b64 v[0:1], v2
+; GFX940-NEXT: v_mov_b32_e32 v4, s0
+; GFX940-NEXT: ds_read_b64 v[2:3], v4
; GFX940-NEXT: s_mov_b64 s[0:1], 0
-; GFX940-NEXT: .LBB67_1: ; %atomicrmw.start
+; GFX940-NEXT: .LBB67_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB67_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB67_2
+; GFX940-NEXT: .LBB67_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
index e288d9d5ab3c..eafd1e15e2cb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-atomic-cmpxchg-with-success.mir
@@ -16,7 +16,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -40,7 +41,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32))
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
@@ -63,7 +65,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s32), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s32), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s32), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ATOMIC_CMPXCHG]](s32)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s32), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
@@ -87,7 +90,8 @@ body: |
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY1]](s64)
; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 1)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[AMDGPU_ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[AMDGPU_ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = COPY $vgpr4_vgpr5
@@ -110,7 +114,8 @@ body: |
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr3_vgpr4
; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store syncscope("agent-one-as") monotonic monotonic (s64), addrspace 3)
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ATOMIC_CMPXCHG]](s64), [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ATOMIC_CMPXCHG]](s64), implicit [[ICMP]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ATOMIC_CMPXCHG]](s64)
+ ; CHECK-NEXT: S_ENDPGM 0, implicit [[COPY3]](s64), implicit [[ICMP]](s1)
%0:_(p3) = COPY $vgpr0
%1:_(s64) = COPY $vgpr1_vgpr2
%2:_(s64) = COPY $vgpr3_vgpr4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
index e9f8180aae66..fed277d7d10d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
@@ -64,9 +64,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s32)
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[CTLZ_ZERO_UNDEF]], [[C]]
- ; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[CTLZ_ZERO_UNDEF]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_CTLZ_ZERO_UNDEF %0
%2:_(s32) = G_ZEXT %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
index dba20e128237..eb86a981c9f1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
index 93d00714158b..80b3166108ad 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: saddsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: saddsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
index 57b1ab9b194e..220450c5e4ec 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir
@@ -86,8 +86,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -117,8 +118,9 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
; CHECK-NEXT: $vgpr2 = COPY [[ZEXT]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -172,11 +174,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY [[BITCAST2]](<2 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND2]](s32), [[AND3]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](<2 x s16>)
; CHECK-NEXT: $vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
@@ -360,13 +363,14 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR2]](s1)
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR3]](s1)
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<4 x s16>) = COPY [[CONCAT_VECTORS]](<4 x s16>)
; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C3]]
; CHECK-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C3]]
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32), [[AND6]](s32), [[AND7]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY5]](<4 x s16>)
; CHECK-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr1_vgpr2
@@ -403,11 +407,12 @@ body: |
; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR1]](s1)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[BUILD_VECTOR]](<2 x s32>)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C1]]
; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32)
- ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](<2 x s32>)
; CHECK-NEXT: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
index 33a8cda8e84b..49fb6e9bdaf3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
@@ -955,15 +955,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX8-LABEL: name: ssubsat_s64
@@ -980,15 +981,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
;
; GFX9-LABEL: name: ssubsat_s64
@@ -1005,15 +1007,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
@@ -1043,15 +1046,16 @@ body: |
; GFX6-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX6-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX6-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX6-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX6-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX6-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX6-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX6-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX6-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX6-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1060,13 +1064,14 @@ body: |
; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX6-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX6-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX6-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX6-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX6-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1086,15 +1091,16 @@ body: |
; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX8-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1103,13 +1109,14 @@ body: |
; GFX8-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX8-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX8-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX8-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX8-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
@@ -1129,15 +1136,16 @@ body: |
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
; GFX9-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
- ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY2]], [[C1]](s32)
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
; GFX9-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
- ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[COPY2]]
; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9-NEXT: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
@@ -1146,13 +1154,14 @@ body: |
; GFX9-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
; GFX9-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
; GFX9-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
- ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
+ ; GFX9-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C1]](s32)
; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
; GFX9-NEXT: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
; GFX9-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
; GFX9-NEXT: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
- ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[COPY3]]
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
index b4bc64812b53..305eca792cfb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-trap.mir
@@ -24,7 +24,7 @@ body: |
bb.0:
%0:_(s8) = G_CONSTANT i8 0
%1:_(p1) = G_CONSTANT i64 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
bb.1:
G_STORE %0, %1 :: (store 1, addrspace 1)
@@ -55,7 +55,7 @@ body: |
; GCN-NEXT: S_ENDPGM 0
bb.0:
%0:_(s8) = G_CONSTANT i8 0
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
%1:_(p1) = G_CONSTANT i64 0
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
index 623360f6b1d9..de46037e96e8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
index 6eed92ba1d71..6d4aa3b04d76 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.sbfe.ll
@@ -670,36 +670,19 @@ define amdgpu_kernel void @bfe_sext_in_reg_i24(ptr addrspace(1) %out, ptr addrsp
define amdgpu_kernel void @simplify_demanded_bfe_sdiv(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; GFX6-LABEL: simplify_demanded_bfe_sdiv:
; GFX6: ; %bb.0:
-; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, 2.0
-; GFX6-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
-; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_load_dword s0, s[6:7], 0x0
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: v_mul_lo_u32 v1, v0, -2
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_bfe_i32 s0, s0, 0x100001
-; GFX6-NEXT: s_ashr_i32 s2, s0, 31
-; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
-; GFX6-NEXT: s_add_i32 s0, s0, s2
-; GFX6-NEXT: s_xor_b32 s0, s0, s2
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v0
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_subrev_i32_e64 v2, s[0:1], 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 2, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s2, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s2, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_bfe_i32 s3, s3, 0x100001
+; GFX6-NEXT: s_ashr_i32 s4, s3, 31
+; GFX6-NEXT: s_lshr_b32 s4, s4, 31
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_ashr_i32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s3
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
%src = load i32, ptr addrspace(1) %in, align 4
%bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
index 686b849ff58f..06bd45a45cce 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.format.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX8 %s
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX12 %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX8-LABEL: name: struct_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index 9edc24554911..1e3f94a5e39c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
+; Note that TFE instructions don't have the result initialization to zero due to stopping before finalize-isel - which is where that's inserted
define amdgpu_ps float @struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; CHECK-LABEL: name: struct_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
index 1061f0003bd4..2c2f8e914447 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i32.ll
@@ -279,125 +279,27 @@ define i32 @v_sdiv_i32_pow2k_denom(i32 %num) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, 0x45800000
-; CHECK-NEXT: v_mov_b32_e32 v3, 0xfffff000
-; CHECK-NEXT: v_mov_b32_e32 v4, 0x1000
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 20, v1
; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_lo_u32 v3, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2
-; CHECK-NEXT: v_lshlrev_b32_e32 v3, 12, v2
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v2
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[4:5]
-; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, 0x1000, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v2
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v1
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i32 %num, 4096
ret i32 %result
}
define <2 x i32> @v_sdiv_v2i32_pow2k_denom(<2 x i32> %num) {
-; GISEL-LABEL: v_sdiv_v2i32_pow2k_denom:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; GISEL-NEXT: v_mov_b32_e32 v3, 0x1000
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_mov_b32_e32 v5, 0xfffff000
-; GISEL-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x4f7ffffe, v4
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_cvt_u32_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_lo_u32 v5, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v4, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_mul_hi_u32 v5, v0, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v1, v4
-; GISEL-NEXT: v_lshlrev_b32_e32 v7, 12, v5
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v5
-; GISEL-NEXT: v_lshlrev_b32_e32 v9, 12, v4
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, 1, v4
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[4:5]
-; GISEL-NEXT: v_sub_i32_e32 v7, vcc, v0, v3
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7]
-; GISEL-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v7, vcc, 1, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v0, v5, v7, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v8, vcc
-; GISEL-NEXT: v_xor_b32_e32 v0, v0, v2
-; GISEL-NEXT: v_xor_b32_e32 v1, v1, v6
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; CGP-LABEL: v_sdiv_v2i32_pow2k_denom:
-; CGP: ; %bb.0:
-; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_ashrrev_i32_e32 v2, 31, v0
-; CGP-NEXT: v_rcp_iflag_f32_e32 v3, 0x45800000
-; CGP-NEXT: v_mov_b32_e32 v4, 0xfffff000
-; CGP-NEXT: v_mov_b32_e32 v5, 0x1000
-; CGP-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_cvt_u32_f32_e32 v3, v3
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_mul_lo_u32 v4, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v3, v4
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CGP-NEXT: v_mul_hi_u32 v4, v0, v3
-; CGP-NEXT: v_mul_hi_u32 v3, v1, v3
-; CGP-NEXT: v_lshlrev_b32_e32 v7, 12, v4
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; CGP-NEXT: v_lshlrev_b32_e32 v9, 12, v3
-; CGP-NEXT: v_add_i32_e32 v10, vcc, 1, v3
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v9
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[4:5]
-; CGP-NEXT: v_sub_i32_e32 v7, vcc, v0, v5
-; CGP-NEXT: v_cmp_ge_u32_e64 s[6:7], v1, v5
-; CGP-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[6:7]
-; CGP-NEXT: v_subrev_i32_e32 v8, vcc, 0x1000, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v7, vcc, 1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[6:7]
-; CGP-NEXT: v_add_i32_e32 v8, vcc, 1, v3
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5
-; CGP-NEXT: v_cndmask_b32_e32 v1, v3, v8, vcc
-; CGP-NEXT: v_xor_b32_e32 v0, v0, v2
-; CGP-NEXT: v_xor_b32_e32 v1, v1, v6
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
-; CGP-NEXT: s_setpc_b64 s[30:31]
+; CHECK-LABEL: v_sdiv_v2i32_pow2k_denom:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v3, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 20, v3
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 12, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i32> %num, <i32 4096, i32 4096>
ret <2 x i32> %result
}
@@ -884,3 +786,24 @@ define <2 x i32> @v_sdiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
%result = sdiv <2 x i32> %num.mask, %den.mask
ret <2 x i32> %result
}
+
+define i32 @v_sdiv_i32_exact(i32 %num) {
+; CHECK-LABEL: v_sdiv_i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i32 %num, 4096
+ ret i32 %result
+}
+
+define <2 x i32> @v_sdiv_v2i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: v_sdiv_v2i32_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; CHECK-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 84906c01a469..377fa24cb475 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -999,126 +999,11 @@ define i64 @v_sdiv_i64_pow2k_denom(i64 %num) {
; CHECK-LABEL: v_sdiv_i64_pow2k_denom:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cvt_f32_u32_e32 v2, 0x1000
-; CHECK-NEXT: v_cvt_f32_ubyte0_e32 v3, 0
-; CHECK-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CHECK-NEXT: v_mac_f32_e32 v2, 0x4f800000, v3
-; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2
-; CHECK-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
-; CHECK-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
-; CHECK-NEXT: v_trunc_f32_e32 v4, v3
-; CHECK-NEXT: v_mac_f32_e32 v2, 0xcf800000, v4
-; CHECK-NEXT: v_cvt_u32_f32_e32 v5, v2
-; CHECK-NEXT: v_cvt_u32_f32_e32 v7, v4
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v2
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_mul_lo_u32 v4, v7, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v9, v5, v3
-; CHECK-NEXT: v_mul_lo_u32 v10, v7, v3
-; CHECK-NEXT: v_mul_hi_u32 v11, v5, v3
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v9
-; CHECK-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v10, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v4, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v9, v4
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v11
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v8, vcc, v10, v8
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v4
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, v8, v4
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v3, v4
-; CHECK-NEXT: v_add_i32_e32 v5, vcc, v5, v2
-; CHECK-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
-; CHECK-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v6, v5, 0
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v7, v[3:4]
-; CHECK-NEXT: v_ashrrev_i32_e32 v6, 31, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v1, v6, vcc
-; CHECK-NEXT: v_xor_b32_e32 v4, v0, v6
-; CHECK-NEXT: v_mul_lo_u32 v0, v7, v2
-; CHECK-NEXT: v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT: v_xor_b32_e32 v9, v1, v6
-; CHECK-NEXT: v_mul_hi_u32 v1, v5, v2
-; CHECK-NEXT: v_mul_hi_u32 v2, v7, v2
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v1, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT: v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT: v_mul_hi_u32 v3, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT: v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT: v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT: v_mul_hi_u32 v7, v4, v0
-; CHECK-NEXT: v_mul_hi_u32 v0, v9, v0
-; CHECK-NEXT: v_mov_b32_e32 v5, 0x1000
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_mul_lo_u32 v7, v9, v1
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CHECK-NEXT: v_add_i32_e32 v7, vcc, v0, v2
-; CHECK-NEXT: v_mul_hi_u32 v8, v9, v1
-; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v7, 0
-; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT: v_add_i32_e32 v3, vcc, v8, v2
-; CHECK-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v5, v3, v[1:2]
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0
-; CHECK-NEXT: v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
-; CHECK-NEXT: v_sub_i32_e64 v1, s[4:5], v9, v1
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; CHECK-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
-; CHECK-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e64 v2, -1, v4, s[4:5]
-; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v7
-; CHECK-NEXT: v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CHECK-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
-; CHECK-NEXT: v_add_i32_e32 v1, vcc, 1, v4
-; CHECK-NEXT: v_addc_u32_e32 v5, vcc, 0, v8, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v8, v5, vcc
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CHECK-NEXT: v_cndmask_b32_e32 v0, v7, v0, vcc
-; CHECK-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; CHECK-NEXT: v_xor_b32_e32 v0, v0, v6
-; CHECK-NEXT: v_xor_b32_e32 v1, v1, v6
-; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc
+; CHECK-NEXT: v_ashrrev_i32_e32 v2, 31, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 20, v2
+; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
; CHECK-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 %num, 4096
ret i64 %result
@@ -1128,473 +1013,31 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
; GISEL-LABEL: v_sdiv_v2i64_pow2k_denom:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; GISEL-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; GISEL-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; GISEL-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; GISEL-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; GISEL-NEXT: v_trunc_f32_e32 v7, v5
-; GISEL-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; GISEL-NEXT: v_cvt_u32_f32_e32 v6, v4
-; GISEL-NEXT: v_cvt_u32_f32_e32 v7, v7
-; GISEL-NEXT: v_mad_u64_u32 v[4:5], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[5:6]
-; GISEL-NEXT: v_mul_lo_u32 v5, v7, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_mul_hi_u32 v9, v6, v4
-; GISEL-NEXT: v_mul_hi_u32 v4, v7, v4
-; GISEL-NEXT: v_mul_lo_u32 v10, v6, v8
-; GISEL-NEXT: v_mul_lo_u32 v11, v7, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v10
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v5, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v10, v5
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v11, v4
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v9, v10
-; GISEL-NEXT: v_add_i32_e32 v4, vcc, v4, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v9, v5
-; GISEL-NEXT: v_add_i32_e32 v5, vcc, v8, v5
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v6, v4
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v11, 0
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GISEL-NEXT: v_mov_b32_e32 v4, v9
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s6, v5, v[4:5]
; GISEL-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; GISEL-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; GISEL-NEXT: v_ashrrev_i32_e32 v5, 31, v3
; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; GISEL-NEXT: v_mad_u64_u32 v[9:10], s[4:5], s7, v11, v[9:10]
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; GISEL-NEXT: v_xor_b32_e32 v10, v0, v4
-; GISEL-NEXT: v_mul_lo_u32 v0, v5, v8
-; GISEL-NEXT: v_mul_lo_u32 v12, v11, v9
-; GISEL-NEXT: v_xor_b32_e32 v13, v1, v4
-; GISEL-NEXT: v_mul_hi_u32 v1, v11, v8
-; GISEL-NEXT: v_mul_hi_u32 v8, v5, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v12, v0
-; GISEL-NEXT: v_mul_hi_u32 v12, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v8
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v12
-; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v12
-; GISEL-NEXT: v_mul_hi_u32 v9, v5, v9
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v9, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_addc_u32_e32 v1, vcc, v5, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v8, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v9, v10, v1
-; GISEL-NEXT: v_mul_hi_u32 v11, v10, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_mov_b32_e32 v5, 0x1000
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v8, v11
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v11, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_mul_hi_u32 v9, v10, v1
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v11, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v9
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v9, vcc, v11, v9
-; GISEL-NEXT: v_add_i32_e32 v11, vcc, v0, v8
-; GISEL-NEXT: v_mul_hi_u32 v12, v13, v1
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, v11, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v9, v8
-; GISEL-NEXT: v_add_i32_e32 v12, vcc, v12, v8
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v5, v12, v[1:2]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v10, v0
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], 0, v11, v[8:9]
-; GISEL-NEXT: s_sub_u32 s6, 0, 0x1000
-; GISEL-NEXT: s_subb_u32 s7, 0, 0
-; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], v13, v8, vcc
-; GISEL-NEXT: v_sub_i32_e64 v8, s[4:5], v13, v8
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; GISEL-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v8, vcc
-; GISEL-NEXT: v_sub_i32_e32 v8, vcc, v0, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v10, -1, v9, s[4:5]
-; GISEL-NEXT: v_subbrev_u32_e32 v9, vcc, 0, v1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, 1, v11
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v6, 0
-; GISEL-NEXT: v_addc_u32_e32 v14, vcc, 0, v12, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v8, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v9
-; GISEL-NEXT: v_cndmask_b32_e32 v15, -1, v8, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s6, v7, v[1:2]
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, 1, v13
-; GISEL-NEXT: v_mad_u64_u32 v[8:9], s[4:5], s7, v6, v[8:9]
-; GISEL-NEXT: v_addc_u32_e32 v16, vcc, 0, v14, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v13, v1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v1, v7, v0
-; GISEL-NEXT: v_mul_lo_u32 v13, v6, v8
-; GISEL-NEXT: v_mul_hi_u32 v15, v6, v0
-; GISEL-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc
-; GISEL-NEXT: v_mul_hi_u32 v0, v7, v0
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v1, v15
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v15, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_mul_hi_u32 v13, v6, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v15, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v13
-; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v15, v13
-; GISEL-NEXT: v_mul_hi_u32 v8, v7, v8
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; GISEL-NEXT: v_add_i32_e32 v1, vcc, v8, v1
-; GISEL-NEXT: v_add_i32_e32 v8, vcc, v6, v0
-; GISEL-NEXT: v_addc_u32_e32 v13, vcc, v7, v1, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GISEL-NEXT: v_cndmask_b32_e32 v9, v11, v9, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
-; GISEL-NEXT: v_xor_b32_e32 v1, v9, v4
-; GISEL-NEXT: v_ashrrev_i32_e32 v9, 31, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], s7, v8, v[6:7]
-; GISEL-NEXT: v_cndmask_b32_e32 v10, v12, v14, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_addc_u32_e32 v3, vcc, v3, v9, vcc
-; GISEL-NEXT: v_xor_b32_e32 v11, v2, v9
-; GISEL-NEXT: v_mul_lo_u32 v2, v13, v0
-; GISEL-NEXT: v_mul_lo_u32 v7, v8, v6
-; GISEL-NEXT: v_xor_b32_e32 v12, v3, v9
-; GISEL-NEXT: v_mul_hi_u32 v3, v8, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v13, v0
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v7, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v8, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_mul_hi_u32 v6, v13, v6
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; GISEL-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v8, v0
-; GISEL-NEXT: v_addc_u32_e32 v2, vcc, v13, v2, vcc
-; GISEL-NEXT: v_mul_lo_u32 v3, v12, v0
-; GISEL-NEXT: v_mul_lo_u32 v6, v11, v2
-; GISEL-NEXT: v_mul_hi_u32 v7, v11, v0
-; GISEL-NEXT: v_mul_hi_u32 v0, v12, v0
-; GISEL-NEXT: v_xor_b32_e32 v8, v10, v4
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v3, v7
-; GISEL-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GISEL-NEXT: v_mul_lo_u32 v7, v12, v2
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, v6, v3
-; GISEL-NEXT: v_mul_hi_u32 v6, v11, v2
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v7, v0
-; GISEL-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; GISEL-NEXT: v_add_i32_e32 v10, vcc, v0, v3
-; GISEL-NEXT: v_mul_hi_u32 v7, v12, v2
-; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v10, 0
-; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_add_i32_e32 v0, vcc, v6, v0
-; GISEL-NEXT: v_add_i32_e32 v13, vcc, v7, v0
-; GISEL-NEXT: v_mov_b32_e32 v0, v3
-; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v5, v13, v[0:1]
-; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v1, v4
-; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v8, v4, vcc
-; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[4:5], 0, v10, v[6:7]
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v11, v2
-; GISEL-NEXT: v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
-; GISEL-NEXT: v_sub_i32_e64 v3, s[4:5], v12, v3
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v5
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; GISEL-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4
-; GISEL-NEXT: v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v4, -1, v6, s[4:5]
-; GISEL-NEXT: v_add_i32_e32 v6, vcc, 1, v10
-; GISEL-NEXT: v_addc_u32_e32 v7, vcc, 0, v13, vcc
-; GISEL-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5
-; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
-; GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; GISEL-NEXT: v_add_i32_e32 v3, vcc, 1, v6
-; GISEL-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
-; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
-; GISEL-NEXT: v_xor_b32_e32 v2, v2, v9
-; GISEL-NEXT: v_xor_b32_e32 v3, v3, v9
-; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v2, v9
-; GISEL-NEXT: v_subb_u32_e32 v3, vcc, v3, v9, vcc
+; GISEL-NEXT: v_lshrrev_b32_e32 v5, 20, v5
+; GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GISEL-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GISEL-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GISEL-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GISEL-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; GISEL-NEXT: s_setpc_b64 s[30:31]
;
; CGP-LABEL: v_sdiv_v2i64_pow2k_denom:
; CGP: ; %bb.0:
; CGP-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT: v_cvt_f32_u32_e32 v4, 0x1000
-; CGP-NEXT: v_cvt_f32_ubyte0_e32 v5, 0
-; CGP-NEXT: v_mov_b32_e32 v6, 0xfffff000
-; CGP-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5
-; CGP-NEXT: v_rcp_iflag_f32_e32 v4, v4
-; CGP-NEXT: v_mul_f32_e32 v4, 0x5f7ffffc, v4
-; CGP-NEXT: v_mul_f32_e32 v5, 0x2f800000, v4
-; CGP-NEXT: v_trunc_f32_e32 v7, v5
-; CGP-NEXT: v_mac_f32_e32 v4, 0xcf800000, v7
-; CGP-NEXT: v_cvt_u32_f32_e32 v8, v4
-; CGP-NEXT: v_cvt_u32_f32_e32 v9, v7
-; CGP-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v6, v8, 0
-; CGP-NEXT: v_mov_b32_e32 v7, v5
-; CGP-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v6, v9, v[7:8]
-; CGP-NEXT: v_mul_hi_u32 v12, v9, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], -1, v8, v[10:11]
-; CGP-NEXT: v_mul_lo_u32 v10, v9, v4
-; CGP-NEXT: v_mul_hi_u32 v11, v8, v4
-; CGP-NEXT: v_mul_lo_u32 v4, v8, v13
-; CGP-NEXT: v_mul_lo_u32 v7, v9, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v8, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v9, v13
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v10, v4
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v4, v11
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v15, v4
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v12
-; CGP-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v7, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v15, v14
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v7, v4
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v14, v7
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v13, v7
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v8, v4
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v6, v16, 0
-; CGP-NEXT: v_addc_u32_e32 v17, vcc, v9, v7, vcc
-; CGP-NEXT: v_mov_b32_e32 v4, v14
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], v6, v17, v[4:5]
-; CGP-NEXT: v_ashrrev_i32_e32 v7, 31, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT: v_mad_u64_u32 v[14:15], s[4:5], -1, v16, v[14:15]
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_xor_b32_e32 v15, v0, v7
-; CGP-NEXT: v_mul_lo_u32 v0, v17, v13
-; CGP-NEXT: v_mul_lo_u32 v4, v16, v14
-; CGP-NEXT: v_xor_b32_e32 v18, v1, v7
-; CGP-NEXT: v_mul_hi_u32 v1, v16, v13
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v13
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v1
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v1, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v4, v0
-; CGP-NEXT: v_mul_hi_u32 v4, v16, v14
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v13
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v4
-; CGP-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, v13, v4
-; CGP-NEXT: v_mul_hi_u32 v13, v17, v14
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v1, v0
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v4, v1
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v13, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_addc_u32_e32 v1, vcc, v17, v1, vcc
-; CGP-NEXT: v_mul_lo_u32 v13, v18, v0
-; CGP-NEXT: v_mul_lo_u32 v14, v15, v1
-; CGP-NEXT: v_mul_hi_u32 v16, v15, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v18, v0
-; CGP-NEXT: v_mov_b32_e32 v4, 0x1000
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v13, v16
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v16, v18, v1
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_mul_hi_u32 v14, v15, v1
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v16, v0
-; CGP-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v14
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v14, vcc, v16, v14
-; CGP-NEXT: v_add_i32_e32 v16, vcc, v0, v13
-; CGP-NEXT: v_mul_hi_u32 v17, v18, v1
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, v16, 0
-; CGP-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v13, vcc, v14, v13
-; CGP-NEXT: v_add_i32_e32 v17, vcc, v17, v13
-; CGP-NEXT: v_mad_u64_u32 v[13:14], s[4:5], v4, v17, v[1:2]
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v15, v0
-; CGP-NEXT: v_subb_u32_e64 v1, s[4:5], v18, v13, vcc
-; CGP-NEXT: v_sub_i32_e64 v13, s[4:5], v18, v13
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v4
-; CGP-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1
-; CGP-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v13, vcc
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT: v_subbrev_u32_e32 v13, vcc, 0, v1, vcc
-; CGP-NEXT: v_add_i32_e32 v15, vcc, 1, v16
-; CGP-NEXT: v_addc_u32_e32 v18, vcc, 0, v17, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4
-; CGP-NEXT: v_mov_b32_e32 v0, v5
-; CGP-NEXT: v_cndmask_b32_e64 v14, -1, v14, s[4:5]
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v9, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e64 v19, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v13
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], -1, v8, v[0:1]
-; CGP-NEXT: v_cndmask_b32_e32 v5, -1, v19, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, 1, v15
-; CGP-NEXT: v_mul_lo_u32 v19, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v13, vcc, 0, v18, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v5, v15, v1, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v13, v18, v13, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v19
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v1, v11
-; CGP-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v11, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_mul_hi_u32 v10, v8, v0
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v11, v12
-; CGP-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v11, vcc, v12, v11
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v1, vcc, v10, v1
-; CGP-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v10, vcc, v11, v10
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v10
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v1
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, v9, v0, vcc
-; CGP-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v8, 0
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; CGP-NEXT: v_cndmask_b32_e32 v5, v16, v5, vcc
-; CGP-NEXT: v_xor_b32_e32 v11, v5, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v6, v9, v[1:2]
-; CGP-NEXT: v_cndmask_b32_e32 v10, v17, v13, vcc
-; CGP-NEXT: v_xor_b32_e32 v1, v10, v7
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], -1, v8, v[5:6]
-; CGP-NEXT: v_ashrrev_i32_e32 v10, 31, v3
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_addc_u32_e32 v3, vcc, v3, v10, vcc
-; CGP-NEXT: v_xor_b32_e32 v12, v2, v10
-; CGP-NEXT: v_mul_lo_u32 v2, v9, v0
-; CGP-NEXT: v_mul_lo_u32 v6, v8, v5
-; CGP-NEXT: v_xor_b32_e32 v13, v3, v10
-; CGP-NEXT: v_mul_hi_u32 v3, v8, v0
-; CGP-NEXT: v_mul_hi_u32 v0, v9, v0
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v3, v9, v5
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v6, v2
-; CGP-NEXT: v_mul_hi_u32 v6, v8, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v3, v0
-; CGP-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_mul_hi_u32 v5, v9, v5
-; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v3, v2
-; CGP-NEXT: v_add_i32_e32 v2, vcc, v5, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v8, v0
-; CGP-NEXT: v_addc_u32_e32 v2, vcc, v9, v2, vcc
-; CGP-NEXT: v_mul_lo_u32 v5, v13, v3
-; CGP-NEXT: v_mul_lo_u32 v6, v12, v2
-; CGP-NEXT: v_sub_i32_e32 v0, vcc, v11, v7
-; CGP-NEXT: v_subb_u32_e32 v1, vcc, v1, v7, vcc
-; CGP-NEXT: v_mul_hi_u32 v7, v12, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_mul_lo_u32 v7, v13, v2
-; CGP-NEXT: v_mul_hi_u32 v3, v13, v3
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_mul_hi_u32 v6, v12, v2
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v7, v3
-; CGP-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT: v_add_i32_e32 v7, vcc, v3, v5
-; CGP-NEXT: v_mul_hi_u32 v8, v13, v2
-; CGP-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, v7, 0
-; CGP-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT: v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT: v_add_i32_e32 v8, vcc, v8, v5
-; CGP-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v4, v8, v[3:4]
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v12, v2
-; CGP-NEXT: v_subb_u32_e64 v3, s[4:5], v13, v5, vcc
-; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v13, v5
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cmp_ge_u32_e64 s[4:5], v2, v4
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[4:5]
-; CGP-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
-; CGP-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; CGP-NEXT: v_cndmask_b32_e64 v3, -1, v6, s[4:5]
-; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v7
-; CGP-NEXT: v_addc_u32_e32 v9, vcc, 0, v8, vcc
-; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4
-; CGP-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
-; CGP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; CGP-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc
-; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v6
-; CGP-NEXT: v_addc_u32_e32 v5, vcc, 0, v9, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; CGP-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v4, v9, v5, vcc
-; CGP-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; CGP-NEXT: v_cndmask_b32_e32 v2, v7, v2, vcc
-; CGP-NEXT: v_cndmask_b32_e32 v3, v8, v4, vcc
-; CGP-NEXT: v_xor_b32_e32 v2, v2, v10
-; CGP-NEXT: v_xor_b32_e32 v3, v3, v10
-; CGP-NEXT: v_sub_i32_e32 v2, vcc, v2, v10
-; CGP-NEXT: v_subb_u32_e32 v3, vcc, v3, v10, vcc
+; CGP-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; CGP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CGP-NEXT: v_lshrrev_b32_e32 v4, 20, v4
+; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; CGP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; CGP-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CGP-NEXT: v_ashr_i64 v[2:3], v[2:3], 12
; CGP-NEXT: s_setpc_b64 s[30:31]
%result = sdiv <2 x i64> %num, <i64 4096, i64 4096>
ret <2 x i64> %result
@@ -3398,3 +2841,24 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
%result = sdiv <2 x i64> %num.mask, %den.mask
ret <2 x i64> %result
}
+
+define i64 @v_sdiv_i64_exact(i64 %num) {
+; CHECK-LABEL: v_sdiv_i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact i64 %num, 4096
+ ret i64 %result
+}
+
+define <2 x i64> @v_sdiv_v2i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: v_sdiv_v2i64_exact:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; CHECK-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
new file mode 100644
index 000000000000..cba114c3568a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/add_sub_u64_pseudos.mir
@@ -0,0 +1,68 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=finalize-isel -o - %s | FileCheck -check-prefix=GFX12 %s
+
+---
+name: reg_ops
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: reg_ops
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub0
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[DEF1]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY2]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], [[COPY3]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: reg_ops
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[DEF1:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], [[DEF1]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = IMPLICIT_DEF
+ %2:sreg_64 = S_ADD_U64_PSEUDO %0, %1, implicit-def $scc
+...
+
+---
+name: lhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: lhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 6565, [[COPY]], implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 0, [[COPY1]], implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: lhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 6565, [[DEF]]
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO 6565, %0, implicit-def $scc
+...
+
+---
+name: rhs_imm
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX11-LABEL: name: rhs_imm
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[DEF]].sub1
+ ; GFX11-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], 6565, implicit-def $scc
+ ; GFX11-NEXT: [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY1]], 0, implicit-def $scc, implicit $scc
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+ ;
+ ; GFX12-LABEL: name: rhs_imm
+ ; GFX12: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+ ; GFX12-NEXT: [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 [[DEF]], 6565
+ %0:sreg_64 = IMPLICIT_DEF
+ %1:sreg_64 = S_ADD_U64_PSEUDO %0, 6565, implicit-def $scc
+...
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
index 66034af5c351..cff9ce050667 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
@@ -233,9 +233,9 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/allow-check.ll b/llvm/test/CodeGen/AMDGPU/allow-check.ll
index db6cdc30493d..d4f5621ce26a 100644
--- a/llvm/test/CodeGen/AMDGPU/allow-check.ll
+++ b/llvm/test/CodeGen/AMDGPU/allow-check.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d | FileCheck %s
-; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel | FileCheck %s
-; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -fast-isel | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=amdgcn-amd-mesa3d -global-isel=0 -fast-isel=1 | FileCheck %s
define i1 @test_runtime() local_unnamed_addr {
; CHECK-LABEL: test_runtime:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
new file mode 100644
index 000000000000..33b1cc65dc56
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
@@ -0,0 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals all --version 4
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -passes=amdgpu-attributor %s | FileCheck %s
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_def() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i32 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i32 asm sideeffect "; def $0", "=a"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[DEF:%.*]] = call i64 asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ %def = call i64 asm sideeffect "; def $0", "={a[0:1]}"()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_second_arg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v,a"(i32 poison, i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_non_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_non_agpr_asm(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "v"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_asm_physreg_tuple() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_tuple(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+define void @func_uses_asm_virtreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_virtreg_agpr(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a0}"(i32 poison)
+ ret void
+}
+
+define void @func_uses_asm_physreg_agpr_tuple() {
+; CHECK-LABEL: define void @func_uses_asm_physreg_agpr_tuple(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "{a[0:1]}"(i64 poison)
+ ret void
+}
+
+declare void @unknown()
+
+define amdgpu_kernel void @kernel_calls_extern() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern(
+; CHECK-SAME: ) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: call void @unknown()
+; CHECK-NEXT: ret void
+;
+ call void @unknown()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_extern_marked_callsite() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern_marked_callsite(
+; CHECK-SAME: ) #[[ATTR4]] {
+; CHECK-NEXT: call void @unknown() #[[ATTR9:[0-9]+]]
+; CHECK-NEXT: ret void
+;
+ call void @unknown() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]()
+; CHECK-NEXT: ret void
+;
+ call void %indirect()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(ptr %indirect) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: call void [[INDIRECT]]() #[[ATTR9]]
+; CHECK-NEXT: ret void
+;
+ call void %indirect() #0
+ ret void
+}
+
+define amdgpu_kernel void @kernel_transitively_uses_agpr_asm() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_transitively_uses_agpr_asm(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define void @empty() {
+; CHECK-LABEL: define void @empty(
+; CHECK-SAME: ) #[[ATTR5:[0-9]+]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define void @also_empty() {
+; CHECK-LABEL: define void @also_empty(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: ret void
+;
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_empty() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_empty(
+; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_non_agpr_and_agpr(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: call void @empty()
+; CHECK-NEXT: call void @func_uses_asm_physreg_agpr()
+; CHECK-NEXT: ret void
+;
+ call void @empty()
+ call void @func_uses_asm_physreg_agpr()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_generic_intrinsic(ptr %ptr0, ptr %ptr1, i64 %size) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_generic_intrinsic(
+; CHECK-SAME: ptr [[PTR0:%.*]], ptr [[PTR1:%.*]], i64 [[SIZE:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[PTR0]], ptr [[PTR1]], i64 [[SIZE]], i1 false)
+; CHECK-NEXT: ret void
+;
+ call void @llvm.memcpy.p0.p0.i64(ptr %ptr0, ptr %ptr1, i64 %size, i1 false)
+ ret void
+}
+
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg)
+
+define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(ptr addrspace(1) %out, float %a, float %b, <32 x float> %c) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_mfma.f32.32x32x1f32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], float [[A:%.*]], float [[B:%.*]], <32 x float> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float [[A]], float [[B]], <32 x float> [[C]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: store <32 x float> [[RESULT]], ptr addrspace(1) [[OUT]], align 128
+; CHECK-NEXT: ret void
+;
+ %result = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %a, float %b, <32 x float> %c, i32 0, i32 0, i32 0)
+ store <32 x float> %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @kernel_calls_workitem_id_x(ptr addrspace(1) %out) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_workitem_id_x(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[RESULT:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
+; CHECK-NEXT: ret void
+;
+ %result = call i32 @llvm.amdgcn.workitem.id.x()
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty
+; CHECK-NEXT: call void [[FPTR]]()
+; CHECK-NEXT: ret void
+;
+ %fptr = select i1 %cond, ptr @empty, ptr @also_empty
+ call void %fptr()
+ ret void
+}
+
+
+attributes #0 = { "amdgpu-no-agpr" }
+;.
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6:[0-9]+]] = { convergent nocallback nofree nosync nounwind willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR7:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index d9001656f308..2ad28b8dd6ec 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -10668,3 +10668,111 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
store <2 x i64> %r, ptr addrspace(1) %out
ret void
}
+
+define <2 x i32> @v_sdiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_sdiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_sdiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 12, v0
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_sdiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_sdiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = sdiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = sdiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_sdiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_ashr_i64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sdiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_ashrrev_i64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = sdiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
+
+define <2 x i32> @v_udiv_i32_exact(<2 x i32> %num) {
+; CHECK-LABEL: @v_udiv_i32_exact(
+; CHECK: %1 = extractelement <2 x i32> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i32 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i32> poison, i32 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i32> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i32 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i32> %3, i32 %5, i64 1
+; CHECK-NEXT: ret <2 x i32> %6
+;
+; GFX6-LABEL: v_udiv_i32_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i32_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 12, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i32> %num, <i32 4096, i32 1024>
+ ret <2 x i32> %result
+}
+
+define <2 x i64> @v_udiv_i64_exact(<2 x i64> %num) {
+; CHECK-LABEL: @v_udiv_i64_exact(
+; CHECK: %1 = extractelement <2 x i64> %num, i64 0
+; CHECK-NEXT: %2 = udiv exact i64 %1, 4096
+; CHECK-NEXT: %3 = insertelement <2 x i64> poison, i64 %2, i64 0
+; CHECK-NEXT: %4 = extractelement <2 x i64> %num, i64 1
+; CHECK-NEXT: %5 = udiv exact i64 %4, 1024
+; CHECK-NEXT: %6 = insertelement <2 x i64> %3, i64 %5, i64 1
+; CHECK-NEXT: ret <2 x i64> %6
+;
+; GFX6-LABEL: v_udiv_i64_exact:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], 12
+; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], 10
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_udiv_i64_exact:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], 12, v[0:1]
+; GFX9-NEXT: v_lshrrev_b64 v[2:3], 10, v[2:3]
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %result = udiv exact <2 x i64> %num, <i64 4096, i64 1024>
+ ret <2 x i64> %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 942f459ea6b8..8ddaf243db92 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -808,7 +808,7 @@ define float @test_pown_fast_f32_nobuiltin(float %x, i32 %y) {
; CHECK-LABEL: define float @test_pown_fast_f32_nobuiltin
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z4pownfi(float [[X]], i32 [[Y]]) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -820,11 +820,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]])
-; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]])
-; CHECK-NEXT: [[POWNI2F:%.*]] = sitofp i32 [[Y]] to float
-; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast float [[__LOG2]], [[POWNI2F]]
-; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]])
+; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]]
+; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]]
+; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]]
; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32
; CHECK-NEXT: [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
index 2ffa647d1869..2e64a3456c24 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
@@ -896,7 +896,7 @@ define float @test_rootn_f32__y_neg2__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]]) #[[ATTR0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index af0eb23d8e99..3d4ae84d9c69 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -1025,33 +1025,33 @@ attributes #6 = { "enqueued-block" }
; AKF_HSA: attributes #[[ATTR8]] = { "amdgpu-calls" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR19]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR20]] = { nounwind sanitize_address "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR21]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR22]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR23:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR24:[0-9]+]] = { "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR25]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "enqueued-block" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR26]] = { "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR27]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR28]] = { nounwind }
; ATTRIBUTOR_HSA: attributes #[[ATTR29]] = { "enqueued-block" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 9a9c28ac632f..43cdf85ed381 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -643,19 +643,19 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-stack-objects" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
; AKF_HSA: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index 6c5e58c74033..547ff69592ca 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -393,17 +393,18 @@ define amdgpu_kernel void @use_get_local_size_z(ptr addrspace(1) %ptr) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
+;.
; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
index 1ebd864e7e03..29704959fc17 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
@@ -477,7 +477,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1032-NEXT: s_cbranch_execz .LBB1_3
; GFX1032-NEXT: ; %bb.2:
; GFX1032-NEXT: v_mov_b32_e32 v0, s11
-; GFX1032-NEXT: s_mov_b32 s10, s11
; GFX1032-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
; GFX1032-NEXT: .LBB1_3:
; GFX1032-NEXT: s_waitcnt_depctr 0xffe3
@@ -615,7 +614,6 @@ define amdgpu_ps void @add_i32_varying(ptr addrspace(8) inreg %out, ptr addrspac
; GFX1132-NEXT: s_cbranch_execz .LBB1_3
; GFX1132-NEXT: ; %bb.2:
; GFX1132-NEXT: v_mov_b32_e32 v0, s11
-; GFX1132-NEXT: s_mov_b32 s10, s11
; GFX1132-NEXT: buffer_atomic_add_u32 v0, off, s[4:7], 0 glc
; GFX1132-NEXT: .LBB1_3:
; GFX1132-NEXT: s_or_b32 exec_lo, exec_lo, s9
diff --git a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
index 6beccce9400e..1c8725f52f7e 100644
--- a/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
+++ b/llvm/test/CodeGen/AMDGPU/convergence-tokens.ll
@@ -4,7 +4,7 @@
; CHECK-LABEL: name: basic_call
; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
-; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
+; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
; DEADMI: {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
; GISEL: {{.*}} G_SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
define i32 @basic_call(i32 %src) #0 {
@@ -92,15 +92,9 @@ define i32 @nested(i32 %src) #0 {
ret i32 %sum
}
-; COM: FIXME: Tokens on tail-call have not been implemented for SelectionDAG
-; COM: yet; the corresponding checks have been commented out.
-;
; CHECK-LABEL: name: tail_call_void_func_void
-; GISEL: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
-; COM: CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
-; COM: ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @external_void_func_void, [[TOKEN]], csr_amdgpu, {{.*}}
-; COM: DEADMI: {{.*}} SI_CALL {{.*}}, @external_void_func_void, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
-; GISEL: {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, implicit [[TOKEN]]
+; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
+; CHECK: {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, {{.*}}implicit [[TOKEN]]
define void @tail_call_void_func_void() #0 {
%t1 = call token @llvm.experimental.convergence.entry()
tail call void @external_void_func_void() [ "convergencectrl"(token %t1) ]
diff --git a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
index 895185cb41a3..577d38e65668 100644
--- a/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/copy-vgpr-clobber-spill-vgpr.mir
@@ -333,7 +333,7 @@
ret void
}
- attributes #0 = { "amdgpu-waves-per-eu"="4,4" }
+ attributes #0 = { "amdgpu-waves-per-eu"="4,4" "amdgpu-no-agpr" }
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
index 0c034192869b..386f9cd3f9ce 100644
--- a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
@@ -35,6 +35,6 @@ define amdgpu_kernel void @test_direct_indirect_call() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR1]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 2f3d5d9d140c..cf99b5d80e13 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -1,10 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
-; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-SDAG-O0 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9 %s
+; RUN: llc -O0 -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0 %s
-; FIXME: GlobalISel missing the power-of-2 cases in legalization. https://github.com/llvm/llvm-project/issues/80671
-; xUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9,GFX9 %s
-; xUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-O0,GFX9-O0 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G %s
+; RUN: llc -O0 -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GFX9-G-O0 %s
define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-LABEL: v_sdiv_i128_vv:
@@ -1223,6 +1222,1158 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v16, 31, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v16, v0
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v16, v1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v10, vcc, v0, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v16, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v11, vcc, v1, v16, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v17, 31, v7
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v16, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v2, v16, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v16, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v17, v4
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v17, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v18, vcc, v0, v17
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v17, v6
+; GFX9-G-NEXT: v_subb_co_u32_e32 v19, vcc, v1, v17, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v17, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v17, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v0, v18, v4
+; GFX9-G-NEXT: v_or_b32_e32 v1, v19, v5
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v1, v11, v13
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v18
+; GFX9-G-NEXT: v_ffbh_u32_e32 v0, v19
+; GFX9-G-NEXT: v_add_u32_e32 v1, 32, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v4
+; GFX9-G-NEXT: v_min_u32_e32 v0, v0, v1
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v5
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[4:5]
+; GFX9-G-NEXT: v_add_u32_e32 v0, 64, v0
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v10
+; GFX9-G-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v1, v11
+; GFX9-G-NEXT: v_add_u32_e32 v2, 32, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v3, v12
+; GFX9-G-NEXT: v_min_u32_e32 v1, v1, v2
+; GFX9-G-NEXT: v_ffbh_u32_e32 v2, v13
+; GFX9-G-NEXT: v_add_u32_e32 v3, 32, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[12:13]
+; GFX9-G-NEXT: v_add_u32_e32 v1, 64, v1
+; GFX9-G-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v0, s[6:7], v0, v1
+; GFX9-G-NEXT: v_subb_co_u32_e64 v1, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v6, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v2, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v3, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[0:1], v[6:7]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v15, v1, v3
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v7, v6, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v20, v7, v6
+; GFX9-G-NEXT: v_xor_b32_e32 v6, 0x7f, v0
+; GFX9-G-NEXT: v_or_b32_e32 v14, v6, v2
+; GFX9-G-NEXT: v_and_b32_e32 v6, 1, v20
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-G-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v12, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v13, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v14, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v14, v20, v14
+; GFX9-G-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, 1, v0
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v1, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v3, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v8, vcc, 0x7f, v0
+; GFX9-G-NEXT: v_sub_u32_e32 v0, 64, v8
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v0, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v8, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v9, 64, v8
+; GFX9-G-NEXT: v_lshlrev_b64 v[6:7], v8, v[10:11]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], v9, v[10:11]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v7, 0, v7, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v1, v13, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v2, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v20, v[10:11]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], v2, v[12:13]
+; GFX9-G-NEXT: v_subrev_u32_e32 v24, 64, v20
+; GFX9-G-NEXT: v_lshrrev_b64 v[14:15], v20, v[12:13]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-G-NEXT: v_lshrrev_b64 v[0:1], v24, v[12:13]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v20
+; GFX9-G-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v14, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v15, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v24, vcc, -1, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v19, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v12, v0, v10, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v13, v1, v11, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v26, vcc, -1, v4, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-NEXT: v_addc_co_u32_e32 v27, vcc, -1, v5, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-G-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-G-NEXT: .LBB0_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v10, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[12:13]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v12, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v2, v12
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v14, 31, v13
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v24, v2
+; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v14
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v25, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v26, v0, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v12, vcc, v27, v1, vcc
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v28, 31, v12
+; GFX9-G-NEXT: v_and_b32_e32 v12, v28, v18
+; GFX9-G-NEXT: v_sub_co_u32_e32 v12, vcc, v2, v12
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v19
+; GFX9-G-NEXT: v_subb_co_u32_e32 v13, vcc, v3, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v2, v28, v4
+; GFX9-G-NEXT: v_subb_co_u32_e32 v14, vcc, v0, v2, vcc
+; GFX9-G-NEXT: v_and_b32_e32 v0, v28, v5
+; GFX9-G-NEXT: v_subb_co_u32_e32 v15, vcc, v1, v0, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v20, vcc, -1, v20
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, -1, v21, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v22, vcc, -1, v22, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v23, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v0, v20, v22
+; GFX9-G-NEXT: v_or_b32_e32 v1, v21, v23
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v10
+; GFX9-G-NEXT: v_and_b32_e32 v10, 1, v28
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB0_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[2:3], 1, v[6:7]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v4, 31, v7
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v4
+; GFX9-G-NEXT: v_or_b32_e32 v6, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v7, v1, v3
+; GFX9-G-NEXT: .LBB0_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_xor_b32_e32 v3, v17, v16
+; GFX9-G-NEXT: v_xor_b32_e32 v0, v6, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v1, v7, v3
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v3
+; GFX9-G-NEXT: v_xor_b32_e32 v2, v8, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-G-NEXT: v_xor_b32_e32 v4, v9, v3
+; GFX9-G-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v3, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v4, v3, vcc
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14_vgpr15_vgpr16 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v1
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v12, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v10, v1, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v16
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v11, v3, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr1 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v9, v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v12, v1
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v4, v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v10, v3
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v10, v2
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v1, s[6:7], v1, v12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[6:7], v4, v12, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v6, s[6:7], v3, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v5, s[6:7], v2, v10, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v5, v11, v5
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v8, v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v14
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v9, v7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v9, v6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v5, s[6:7], v5, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v15, s[6:7], v8, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v14, s[6:7], v7, v9, s[6:7]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v13, s[6:7], v6, v9, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v13
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v13, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v11, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v11, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v9, v9, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s16, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s15, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s16
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_3
+; GFX9-G-O0-NEXT: s_branch .LBB0_8
+; GFX9-G-O0-NEXT: .LBB0_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_5
+; GFX9-G-O0-NEXT: .LBB0_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_9
+; GFX9-G-O0-NEXT: .LBB0_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_3
+; GFX9-G-O0-NEXT: .LBB0_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_4
+; GFX9-G-O0-NEXT: .LBB0_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB0_6
+; GFX9-G-O0-NEXT: s_branch .LBB0_1
+; GFX9-G-O0-NEXT: .LBB0_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB0_6
+; GFX9-G-O0-NEXT: .LBB0_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_5
+; GFX9-G-O0-NEXT: s_branch .LBB0_7
+; GFX9-G-O0-NEXT: .LBB0_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v0, v0, v8
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v1, v1, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v10
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v2, v2, v6
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v3, v5
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v0, s[4:5], v0, v8
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v1, s[4:5], v1, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v2, s[4:5], v2, v6, s[4:5]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v3, s[4:5], v3, v5, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2306,6 +3457,1043 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_vv:
+; GFX9-G: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_or_b32_e32 v8, v4, v6
+; GFX9-G-NEXT: v_or_b32_e32 v9, v5, v7
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v8, v0, v2
+; GFX9-G-NEXT: v_or_b32_e32 v9, v1, v3
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[8:9]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v4
+; GFX9-G-NEXT: v_ffbh_u32_e32 v8, v5
+; GFX9-G-NEXT: v_add_u32_e32 v9, 32, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v6
+; GFX9-G-NEXT: v_min_u32_e32 v8, v8, v9
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v7
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[6:7]
+; GFX9-G-NEXT: v_add_u32_e32 v8, 64, v8
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_ffbh_u32_e32 v9, v1
+; GFX9-G-NEXT: v_add_u32_e32 v10, 32, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v11, v2
+; GFX9-G-NEXT: v_min_u32_e32 v9, v9, v10
+; GFX9-G-NEXT: v_ffbh_u32_e32 v10, v3
+; GFX9-G-NEXT: v_add_u32_e32 v11, 32, v11
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[2:3]
+; GFX9-G-NEXT: v_add_u32_e32 v9, 64, v9
+; GFX9-G-NEXT: v_min_u32_e32 v10, v10, v11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v10, v9, s[6:7]
+; GFX9-G-NEXT: v_sub_co_u32_e64 v12, s[6:7], v8, v9
+; GFX9-G-NEXT: v_subb_co_u32_e64 v13, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v8, 0x7f
+; GFX9-G-NEXT: v_subb_co_u32_e64 v14, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v9, 0
+; GFX9-G-NEXT: v_subb_co_u32_e64 v15, s[6:7], 0, 0, s[6:7]
+; GFX9-G-NEXT: v_cmp_gt_u64_e64 s[6:7], v[12:13], v[8:9]
+; GFX9-G-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_lt_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: v_or_b32_e32 v17, v13, v15
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[6:7]
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[6:7]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[4:5]
+; GFX9-G-NEXT: v_or_b32_e32 v18, v9, v8
+; GFX9-G-NEXT: v_xor_b32_e32 v8, 0x7f, v12
+; GFX9-G-NEXT: v_or_b32_e32 v16, v8, v14
+; GFX9-G-NEXT: v_and_b32_e32 v8, 1, v18
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-G-NEXT: v_cndmask_b32_e64 v10, v0, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v11, v1, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v8, v2, 0, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e64 v9, v3, 0, vcc
+; GFX9-G-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[16:17]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v16, v18, v16
+; GFX9-G-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX9-G-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_6
+; GFX9-G-NEXT: ; %bb.1: ; %udiv-bb1
+; GFX9-G-NEXT: v_add_co_u32_e32 v18, vcc, 1, v12
+; GFX9-G-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v13, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v14, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v15, vcc
+; GFX9-G-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GFX9-G-NEXT: v_sub_co_u32_e32 v16, vcc, 0x7f, v12
+; GFX9-G-NEXT: v_sub_u32_e32 v8, 64, v16
+; GFX9-G-NEXT: v_lshrrev_b64 v[8:9], v8, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], v16, v[2:3]
+; GFX9-G-NEXT: v_subrev_u32_e32 v14, 64, v16
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v16, v[0:1]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v8, v10
+; GFX9-G-NEXT: v_or_b32_e32 v11, v9, v11
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], v14, v[0:1]
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v16
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cndmask_b32_e32 v14, 0, v12, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v15, 0, v13, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
+; GFX9-G-NEXT: v_cmp_eq_u32_e32 vcc, 0, v16
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e32 v8, v8, v2, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v9, v9, v3, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GFX9-G-NEXT: s_xor_b64 s[12:13], exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-NEXT: ; %bb.2: ; %udiv-preheader
+; GFX9-G-NEXT: v_sub_u32_e32 v12, 64, v18
+; GFX9-G-NEXT: v_subrev_u32_e32 v22, 64, v18
+; GFX9-G-NEXT: v_lshrrev_b64 v[10:11], v18, v[0:1]
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], v12, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[16:17], v18, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b64 v[2:3], v22, v[2:3]
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v13
+; GFX9-G-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
+; GFX9-G-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
+; GFX9-G-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
+; GFX9-G-NEXT: v_add_co_u32_e32 v22, vcc, -1, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v23, vcc, -1, v5, vcc
+; GFX9-G-NEXT: s_mov_b64 s[10:11], s[8:9]
+; GFX9-G-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
+; GFX9-G-NEXT: v_addc_co_u32_e32 v24, vcc, -1, v6, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v13, s11
+; GFX9-G-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; GFX9-G-NEXT: v_cndmask_b32_e64 v3, v3, v1, s[4:5]
+; GFX9-G-NEXT: v_addc_co_u32_e32 v25, vcc, -1, v7, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-NEXT: v_mov_b32_e32 v11, s9
+; GFX9-G-NEXT: v_mov_b32_e32 v10, s8
+; GFX9-G-NEXT: v_mov_b32_e32 v12, s10
+; GFX9-G-NEXT: .LBB1_3: ; %udiv-do-while
+; GFX9-G-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[14:15]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v0, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v14, v10, v12
+; GFX9-G-NEXT: v_or_b32_e32 v15, v11, v13
+; GFX9-G-NEXT: v_lshlrev_b64 v[12:13], 1, v[16:17]
+; GFX9-G-NEXT: v_lshlrev_b64 v[10:11], 1, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v3
+; GFX9-G-NEXT: v_or_b32_e32 v12, v12, v2
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v9
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_or_b32_e32 v2, v10, v2
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v0
+; GFX9-G-NEXT: v_sub_co_u32_e32 v0, vcc, v22, v2
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v23, v11, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v24, v12, vcc
+; GFX9-G-NEXT: v_subb_co_u32_e32 v0, vcc, v25, v13, vcc
+; GFX9-G-NEXT: v_add_co_u32_e64 v18, s[4:5], -1, v18
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v0
+; GFX9-G-NEXT: v_addc_co_u32_e64 v19, s[4:5], -1, v19, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v10, v3, v4
+; GFX9-G-NEXT: v_addc_co_u32_e64 v20, s[4:5], -1, v20, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v16, v3, v5
+; GFX9-G-NEXT: v_sub_co_u32_e32 v2, vcc, v2, v10
+; GFX9-G-NEXT: v_addc_co_u32_e64 v21, s[4:5], -1, v21, s[4:5]
+; GFX9-G-NEXT: v_and_b32_e32 v0, 1, v3
+; GFX9-G-NEXT: v_and_b32_e32 v17, v3, v6
+; GFX9-G-NEXT: v_and_b32_e32 v26, v3, v7
+; GFX9-G-NEXT: v_subb_co_u32_e32 v3, vcc, v11, v16, vcc
+; GFX9-G-NEXT: v_or_b32_e32 v10, v18, v20
+; GFX9-G-NEXT: v_or_b32_e32 v11, v19, v21
+; GFX9-G-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GFX9-G-NEXT: v_subb_co_u32_e32 v16, vcc, v12, v17, vcc
+; GFX9-G-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-NEXT: v_subb_co_u32_e32 v17, vcc, v13, v26, vcc
+; GFX9-G-NEXT: s_or_b64 s[8:9], s[4:5], s[8:9]
+; GFX9-G-NEXT: v_mov_b32_e32 v10, v0
+; GFX9-G-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: s_cbranch_execnz .LBB1_3
+; GFX9-G-NEXT: ; %bb.4: ; %Flow
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX9-G-NEXT: .LBB1_5: ; %Flow2
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[12:13]
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 1, v[14:15]
+; GFX9-G-NEXT: v_lshlrev_b64 v[8:9], 1, v[8:9]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 31, v15
+; GFX9-G-NEXT: v_or_b32_e32 v8, v8, v2
+; GFX9-G-NEXT: v_or_b32_e32 v10, v10, v0
+; GFX9-G-NEXT: v_or_b32_e32 v11, v11, v1
+; GFX9-G-NEXT: .LBB1_6: ; %Flow3
+; GFX9-G-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX9-G-NEXT: v_mov_b32_e32 v0, v10
+; GFX9-G-NEXT: v_mov_b32_e32 v1, v11
+; GFX9-G-NEXT: v_mov_b32_e32 v2, v8
+; GFX9-G-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_vv:
+; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: ; implicit-def: $vgpr8 : SGPR spill to VGPR lane
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v0
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9_vgpr10_vgpr11 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v3
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v4
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v5
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[6:7], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v14
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v9, v12
+; GFX9-G-O0-NEXT: v_or_b32_e64 v11, v10, v11
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[11:12]
+; GFX9-G-O0-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[8:9], v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v6, v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v5, v5
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v5, v5, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s14, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[9:10], v[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v12
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v7, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v7, v8
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s10
+; GFX9-G-O0-NEXT: v_add_u32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v10
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v6, v6
+; GFX9-G-O0-NEXT: v_ffbh_u32_e64 v8, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, 32
+; GFX9-G-O0-NEXT: v_add_u32_e64 v8, v8, v9
+; GFX9-G-O0-NEXT: v_min_u32_e64 v6, v6, v8
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s13, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s11, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s12, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v6, s[8:9], v5, v6
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s14
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v7, s[8:9], v5, v7, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s12
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s10
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v8, s[8:9]
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[12:13], 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[10:11], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[8:9], v[12:13], v[14:15]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s13
+; GFX9-G-O0-NEXT: v_cmp_gt_u64_e64 s[12:13], v[10:11], v[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[12:13]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[10:11]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v10, v5, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v11, s[6:7]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7
+; GFX9-G-O0-NEXT: v_xor_b32_e64 v7, v7, s6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v9
+; GFX9-G-O0-NEXT: v_or_b32_e64 v8, v7, v8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[6:7], v[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v1, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_and_b32_e32 v3, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v6
+; GFX9-G-O0-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-G-O0-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v5
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], -1
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s4, 0
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s5, 1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_3
+; GFX9-G-O0-NEXT: s_branch .LBB1_8
+; GFX9-G-O0-NEXT: .LBB1_1: ; %Flow
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v0, 2
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v0, 3
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(4)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_5
+; GFX9-G-O0-NEXT: .LBB1_3: ; %Flow2
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v4, 0
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v4, 1
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_9
+; GFX9-G-O0-NEXT: .LBB1_4: ; %udiv-loop-exit
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9
+; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_3
+; GFX9-G-O0-NEXT: .LBB1_5: ; %Flow1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s4, v8, 4
+; GFX9-G-O0-NEXT: v_readlane_b32 s5, v8, 5
+; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_4
+; GFX9-G-O0-NEXT: .LBB1_6: ; %udiv-do-while
+; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_readlane_b32 s6, v16, 6
+; GFX9-G-O0-NEXT: v_readlane_b32 s7, v16, 7
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[21:22], v2, v[0:1]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15
+; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v22
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[23:24], v0, v[2:3]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[12:13]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr2 killed $exec
+; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v30, v32
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v34
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v29
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v30
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v24
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v22
+; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15
+; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v11, s[8:9], v11, v4
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v10, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v8, v7, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v6, v5, s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v8, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s8
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v6, v6, v10
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_and_b32_e64 v12, v8, s9
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, s8
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s4
+; GFX9-G-O0-NEXT: ; kill: def $vgpr12_vgpr13 killed $vgpr12_vgpr13 def $vgpr12_vgpr13_vgpr14_vgpr15 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v25
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v28
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v23
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v24
+; GFX9-G-O0-NEXT: v_and_b32_e64 v11, v8, v11
+; GFX9-G-O0-NEXT: v_and_b32_e64 v10, v8, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v22
+; GFX9-G-O0-NEXT: v_and_b32_e64 v8, v6, v8
+; GFX9-G-O0-NEXT: v_and_b32_e64 v6, v6, v21
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[8:9], v4, v11
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v10, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v9, s[8:9], v7, v8, s[8:9]
+; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v8, s[8:9], v5, v6, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s8, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s12, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s11, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s8
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v17, s[8:9], v11, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, s12
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v18, s[8:9], v10, v11, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, s11
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v20, s[8:9], v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v19, s[8:9], v8, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v20
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v11, v19
+; GFX9-G-O0-NEXT: v_or_b32_e64 v17, v17, v20
+; GFX9-G-O0-NEXT: v_or_b32_e64 v19, v18, v19
+; GFX9-G-O0-NEXT: ; kill: def $vgpr17 killed $vgpr17 def $vgpr17_vgpr18 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, s4
+; GFX9-G-O0-NEXT: v_cmp_eq_u64_e64 s[4:5], v[17:18], v[19:20]
+; GFX9-G-O0-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v0
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v19, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v18, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, v12
+; GFX9-G-O0-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 2
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 3
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s6, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v16, s7, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execnz .LBB1_6
+; GFX9-G-O0-NEXT: s_branch .LBB1_1
+; GFX9-G-O0-NEXT: .LBB1_7: ; %udiv-preheader
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 64
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v4, v13, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s4
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v5, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s4
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[4:5], v13, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v13, v6
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v13, v[21:22]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[26:27], v13, v[15:16]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[24:25], v5, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v26
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v27
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v25
+; GFX9-G-O0-NEXT: v_or_b32_e64 v14, v14, v23
+; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v5, v13
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], 0
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[21:22], v4, v[21:22]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v21
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v22
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v15
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v16
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v14, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v5, v13, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr7 killed $vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[4:5]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr13 killed $vgpr13 def $vgpr13_vgpr14 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4_vgpr5 killed $vgpr4_vgpr5 def $vgpr4_vgpr5_vgpr6_vgpr7 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v14
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v17
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v20
+; GFX9-G-O0-NEXT: s_mov_b32 s4, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s7, -1
+; GFX9-G-O0-NEXT: s_mov_b32 s6, -1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v17, s4
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v16, s[4:5], v16, v17
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, s10
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v15, s[4:5], v15, v16, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v14, s[4:5], v14, v15, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v13, s[4:5], v13, v14, s[4:5]
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9]
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9]
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s8, 6
+; GFX9-G-O0-NEXT: v_writelane_b32 v12, s9, 7
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_branch .LBB1_6
+; GFX9-G-O0-NEXT: .LBB1_8: ; %udiv-bb1
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 1
+; GFX9-G-O0-NEXT: s_mov_b32 s10, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s9, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, s6
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(3)
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v5, s[6:7], v2, v5
+; GFX9-G-O0-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, s10
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(1)
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v6, s[6:7], v4, v6, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v8, s[6:7], v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v7, s[6:7], v1, v3, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v16, v7
+; GFX9-G-O0-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_sub_co_u32_e64 v4, s[6:7], v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s7, 64
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v3, v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_sub_u32_e64 v9, v1, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s6, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-G-O0-NEXT: v_cmp_lt_u32_e64 s[8:9], v4, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-G-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v4, v1
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[1:2], v4, v[13:14]
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[18:19], v9, v[13:14]
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[16:17], v4, v[11:12]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v18
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v19
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v16
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17
+; GFX9-G-O0-NEXT: v_or_b32_e64 v10, v10, v15
+; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[13:14], v3, v[13:14]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v2, v3, s[8:9]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v13
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v14
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[8:9]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[8:9]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v11
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v12
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v9, v9, v10, s[6:7]
+; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v3
+; GFX9-G-O0-NEXT: ; kill: def $vgpr1_vgpr2 killed $vgpr1_vgpr2 def $vgpr1_vgpr2_vgpr3_vgpr4 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v10
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX9-G-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8
+; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, s4
+; GFX9-G-O0-NEXT: v_cmp_ne_u64_e64 s[4:5], v[5:6], v[7:8]
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s10
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s11
+; GFX9-G-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec
+; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7]
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s6, 4
+; GFX9-G-O0-NEXT: v_writelane_b32 v0, s7, 5
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_5
+; GFX9-G-O0-NEXT: s_branch .LBB1_7
+; GFX9-G-O0-NEXT: .LBB1_9: ; %udiv-end
+; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v8
+; GFX9-G-O0-NEXT: ; kill: killed $vgpr4
+; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_nop 0
+; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5]
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, %rhs
ret i128 %div
}
@@ -2388,6 +4576,66 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: v_lshrrev_b64 v[3:4], s4, v[3:4]
; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr3_vgpr4 killed $exec
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v5, v4
+; GFX9-G-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
+; GFX9-G-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-G-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v2, vcc
+; GFX9-G-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v3, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v2
+; GFX9-G-NEXT: v_ashrrev_i32_e32 v2, 1, v2
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_sdiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v0
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b64 v[6:7], v0, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v6
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v7
+; GFX9-G-O0-NEXT: s_mov_b32 s8, 0
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_add_co_u32_e64 v4, s[6:7], v4, v5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v1, s[6:7], v1, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v5, s[6:7], v2, v0, s[6:7]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_addc_co_u32_e64 v4, s[6:7], v3, v0, s[6:7]
+; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s5, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v3
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v3, v2, v4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v2, v2, v4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = sdiv i128 %lhs, 8589934592
ret i128 %div
}
@@ -2434,10 +4682,42 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 killed $vgpr2_vgpr3 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v3, 0
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G: ; %bb.0:
+; GFX9-G-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-NEXT: v_mov_b32_e32 v4, v1
+; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[2:3]
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v4
+; GFX9-G-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v3
+; GFX9-G-NEXT: v_mov_b32_e32 v3, 0
+; GFX9-G-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-G-O0-LABEL: v_udiv_i128_v_pow2k:
+; GFX9-G-O0: ; %bb.0:
+; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v4
+; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v2, v3
+; GFX9-G-O0-NEXT: s_mov_b32 s4, 0
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, s4
+; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31]
%div = udiv i128 %lhs, 8589934592
ret i128 %div
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX9-SDAG: {{.*}}
-; GFX9-SDAG-O0: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
index 0069370cc972..05558c555c58 100644
--- a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
+++ b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
@@ -42,6 +42,6 @@ attributes #0 = { "amdgpu-no-dispatch-id" }
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-no-dispatch-id" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
index 3616d617f84a..5ef8a94eeaa7 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-restore-undef-use.mir
@@ -8,6 +8,8 @@
---
name: restore_undef_copy_use
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
machineFunctionInfo:
maxKernArgAlign: 1
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index 767d347bcfaa..a948fab8f1c1 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -1181,18 +1181,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB42_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB42_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1200,20 +1210,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat(ptr addrspace(1) %pt
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB42_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB42_2
+; GFX90A-NEXT: .LBB42_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB42_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB42_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 seq_cst
@@ -1223,26 +1243,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB43_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB43_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB43_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB43_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1252,18 +1291,28 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace(1) %ptr) #1 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB44_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB44_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
@@ -1271,20 +1320,30 @@ define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_system(ptr addrspace
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB44_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB44_2
+; GFX90A-NEXT: .LBB44_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_system:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB44_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc0 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1] sc1
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc0 sc1
+; GFX940-NEXT: .LBB44_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") seq_cst
@@ -1294,26 +1353,45 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_flush(ptr addrspace(1) %ptr) #0 {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB45_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
+; GFX90A-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX90A-NEXT: v_mov_b32_e32 v2, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
+; GFX90A-NEXT: .LBB45_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB45_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB45_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -1485,37 +1563,57 @@ main_body:
define amdgpu_kernel void @global_atomic_fadd_f64_noret_pat_agent_safe(ptr addrspace(1) %ptr) {
; GFX90A-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB52_3
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s6
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
-; GFX90A-NEXT: v_mov_b32_e32 v4, 0
+; GFX90A-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX90A-NEXT: v_mov_b32_e32 v6, 0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], s[4:5], s[4:5] op_sel:[0,1]
-; GFX90A-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX90A-NEXT: .LBB52_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], 4.0
-; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX90A-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_execnz .LBB52_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB52_2
+; GFX90A-NEXT: .LBB52_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: global_atomic_fadd_f64_noret_pat_agent_safe:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB52_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX940-NEXT: v_mov_b32_e32 v2, 0
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: buffer_wbl2 sc1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: global_atomic_add_f64 v2, v[0:1], s[0:1]
; GFX940-NEXT: s_waitcnt vmcnt(0)
; GFX940-NEXT: buffer_inv sc1
+; GFX940-NEXT: .LBB52_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") seq_cst
@@ -2020,23 +2118,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr) #1 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB70_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB70_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB70_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB70_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2046,23 +2163,42 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3) %ptr) #0 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX90A: ; %bb.0: ; %main_body
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB71_2
+; GFX90A-NEXT: ; %bb.1:
; GFX90A-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX90A-NEXT: v_mov_b32_e32 v0, 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, 0x40100000
+; GFX90A-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v2, s0
; GFX90A-NEXT: ds_add_f64 v2, v[0:1]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: .LBB71_2:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX940: ; %bb.0: ; %main_body
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB71_2
+; GFX940-NEXT: ; %bb.1:
; GFX940-NEXT: s_load_dword s0, s[0:1], 0x24
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], 4.0
+; GFX940-NEXT: s_bcnt1_i32_b64 s1, s[2:3]
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v2, s0
; GFX940-NEXT: ds_add_f64 v2, v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: .LBB71_2:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2072,46 +2208,66 @@ main_body:
define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrspace(3) %ptr) #4 {
; GFX90A-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX90A: ; %bb.0: ; %main_body
-; GFX90A-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: s_mov_b64 s[2:3], exec
+; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX90A-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX90A-NEXT: s_cbranch_execz .LBB72_3
+; GFX90A-NEXT: ; %bb.1:
+; GFX90A-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX90A-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v0, s2
-; GFX90A-NEXT: ds_read_b64 v[0:1], v0
-; GFX90A-NEXT: v_mov_b32_e32 v2, s2
-; GFX90A-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX90A-NEXT: v_mov_b32_e32 v0, s4
+; GFX90A-NEXT: ds_read_b64 v[2:3], v0
+; GFX90A-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX90A-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX90A-NEXT: s_mov_b64 s[0:1], 0
+; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX90A-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX90A-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[6:7], v[6:7] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX90A-NEXT: s_cbranch_execnz .LBB72_1
-; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX90A-NEXT: s_cbranch_execnz .LBB72_2
+; GFX90A-NEXT: .LBB72_3:
; GFX90A-NEXT: s_endpgm
;
; GFX940-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX940: ; %bb.0: ; %main_body
-; GFX940-NEXT: s_load_dword s2, s[0:1], 0x24
-; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: s_mov_b64 s[2:3], exec
+; GFX940-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX940-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX940-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX940-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX940-NEXT: s_cbranch_execz .LBB72_3
+; GFX940-NEXT: ; %bb.1:
+; GFX940-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX940-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_mov_b32_e32 v0, s2
-; GFX940-NEXT: ds_read_b64 v[0:1], v0
-; GFX940-NEXT: v_mov_b32_e32 v2, s2
-; GFX940-NEXT: .LBB72_1: ; %atomicrmw.start
+; GFX940-NEXT: v_mov_b32_e32 v0, s4
+; GFX940-NEXT: ds_read_b64 v[2:3], v0
+; GFX940-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX940-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; GFX940-NEXT: s_mov_b64 s[0:1], 0
+; GFX940-NEXT: v_mov_b32_e32 v4, s4
+; GFX940-NEXT: .LBB72_2: ; %atomicrmw.start
; GFX940-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_add_f64 v[4:5], v[0:1], 4.0
-; GFX940-NEXT: ds_cmpst_rtn_b64 v[4:5], v2, v[0:1], v[4:5]
+; GFX940-NEXT: v_add_f64 v[6:7], v[2:3], v[0:1]
+; GFX940-NEXT: ds_cmpst_rtn_b64 v[6:7], v4, v[2:3], v[6:7]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
-; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[0:1]
+; GFX940-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
; GFX940-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX940-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
+; GFX940-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
; GFX940-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX940-NEXT: s_cbranch_execnz .LBB72_1
-; GFX940-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX940-NEXT: s_cbranch_execnz .LBB72_2
+; GFX940-NEXT: .LBB72_3:
; GFX940-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index b2311a87059c..66bf0d5abb73 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -22,35 +22,31 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0xffffff7f
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SDAG-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; SDAG-NEXT: v_and_b32_e32 v0, 1, v0
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB0_7
; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v9, vcc, -1, v0
-; SDAG-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, -1, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0x432
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
-; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, s[4:5]
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB0_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
@@ -59,37 +55,37 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7
-; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v12, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v12, v11, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
; SDAG-NEXT: v_mov_b32_e32 v3, 0
; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v11, v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
; SDAG-NEXT: v_mov_b32_e32 v2, v1
; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v7, v11, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
; SDAG-NEXT: v_mov_b32_e32 v6, v2
; SDAG-NEXT: v_mov_b32_e32 v2, v3
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v12, v8, v[1:2]
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v9, v12, v[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v5, vcc, v6, v2
-; SDAG-NEXT: v_addc_co_u32_e64 v6, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
-; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[6:7], v7, v8, v[5:6]
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
; SDAG-NEXT: ; implicit-def: $vgpr11
; SDAG-NEXT: ; implicit-def: $vgpr8
; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v5, v3
-; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
; SDAG-NEXT: ; implicit-def: $vgpr9
@@ -100,37 +96,37 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v6, v11, 0
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v5, v11, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v7, v4
; SDAG-NEXT: v_mov_b32_e32 v4, v2
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v6, v8, v[3:4]
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v7, v2
-; SDAG-NEXT: v_addc_co_u32_e64 v3, s[6:7], 0, 0, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v8, v[2:3]
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v9, v6, v[2:3]
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
; SDAG-NEXT: .LBB0_6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB0_7: ; %Flow2
-; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[10:11]
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
; SDAG-NEXT: ; %bb.9: ; %Flow3
-; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB0_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
; SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -158,14 +154,16 @@ define i128 @fptosi_f64_to_i128(double %x) {
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
-; GISEL-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
@@ -238,7 +236,7 @@ define i128 @fptosi_f64_to_i128(double %x) {
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
; GISEL-NEXT: s_cbranch_execz .LBB0_4
; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT: v_add_co_u32_e32 v6, vcc, 0xfffffbcd, v6
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
@@ -396,35 +394,31 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0xffffff7f
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SDAG-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; SDAG-NEXT: v_and_b32_e32 v0, 1, v0
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB1_7
; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v9, vcc, -1, v0
-; SDAG-NEXT: v_addc_co_u32_e64 v10, s[6:7], 0, -1, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0x432
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x432
; SDAG-NEXT: v_and_b32_e32 v0, 0xfffff, v5
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
-; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, s[4:5]
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[6:7]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v11, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v5, 0x100000, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB1_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
@@ -433,37 +427,37 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[4:5]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7
-; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v7
; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, v1, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v12, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v12, v11, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v7, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v12, v11, 0
; SDAG-NEXT: v_mov_b32_e32 v3, 0
; SDAG-NEXT: v_mul_lo_u32 v13, v8, v2
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v11, v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v11, v2, 0
; SDAG-NEXT: v_mov_b32_e32 v2, v1
; SDAG-NEXT: v_mul_lo_u32 v6, v11, v6
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v7, v11, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v7, v11, v[2:3]
; SDAG-NEXT: v_mul_lo_u32 v10, v10, v12
; SDAG-NEXT: v_add3_u32 v5, v5, v6, v13
; SDAG-NEXT: v_mov_b32_e32 v6, v2
; SDAG-NEXT: v_mov_b32_e32 v2, v3
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v12, v8, v[1:2]
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v9, v12, v[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v5, vcc, v6, v2
-; SDAG-NEXT: v_addc_co_u32_e64 v6, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
; SDAG-NEXT: v_mul_lo_u32 v9, v9, v7
-; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[6:7], v7, v8, v[5:6]
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v7, v8, v[5:6]
; SDAG-NEXT: ; implicit-def: $vgpr11
; SDAG-NEXT: ; implicit-def: $vgpr8
; SDAG-NEXT: v_add3_u32 v4, v10, v4, v9
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v5, v3
-; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v3
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v4, s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
; SDAG-NEXT: ; implicit-def: $vgpr4_vgpr5
; SDAG-NEXT: ; implicit-def: $vgpr9
@@ -474,37 +468,37 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x433, v6
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[4:5]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
; SDAG-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, v1, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v6, v0, v4, s[6:7]
; SDAG-NEXT: v_cndmask_b32_e64 v5, v1, v5, s[6:7]
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v6, v11, 0
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v11, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v5, v11, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v5, v11, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v7, v4
; SDAG-NEXT: v_mov_b32_e32 v4, v2
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v6, v8, v[3:4]
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v7, v2
-; SDAG-NEXT: v_addc_co_u32_e64 v3, s[6:7], 0, 0, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v8, v[2:3]
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v9, v6, v[2:3]
-; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v10, v6, v[3:4]
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v6, v8, v[3:4]
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v7, v2
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, v8, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v9, v6, v[2:3]
+; SDAG-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v6, v[3:4]
; SDAG-NEXT: v_mad_i32_i24 v3, v9, v5, v3
; SDAG-NEXT: .LBB1_6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[12:13]
; SDAG-NEXT: .LBB1_7: ; %Flow2
-; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[10:11]
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
; SDAG-NEXT: ; %bb.9: ; %Flow3
-; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB1_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
; SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -532,14 +526,16 @@ define i128 @fptoui_f64_to_i128(double %x) {
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
; GISEL-NEXT: v_cmp_lt_i64_e64 s[4:5], -1, v[4:5]
-; GISEL-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
@@ -612,7 +608,7 @@ define i128 @fptoui_f64_to_i128(double %x) {
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
; GISEL-NEXT: s_cbranch_execz .LBB1_4
; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT: v_add_co_u32_e32 v6, vcc, 0xfffffbcd, v6
+; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffbcd, v6
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
@@ -769,36 +765,32 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0xffffff7f
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SDAG-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; SDAG-NEXT: v_and_b32_e32 v0, 1, v0
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB2_7
; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v9, vcc, -1, v0
-; SDAG-NEXT: v_addc_co_u32_e64 v11, s[6:7], 0, -1, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0x95
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[5:6]
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
; SDAG-NEXT: v_mov_b32_e32 v7, 0
-; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB2_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
@@ -807,37 +799,37 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
-; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v13, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v13, v10, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
; SDAG-NEXT: v_mov_b32_e32 v6, v1
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v12, v10, v[6:7]
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v10, v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
; SDAG-NEXT: v_mov_b32_e32 v6, v5
; SDAG-NEXT: v_mov_b32_e32 v5, v7
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v13, v8, v[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v9, v13, v[2:3]
-; SDAG-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
-; SDAG-NEXT: v_addc_co_u32_e64 v6, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
-; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[6:7], v12, v8, v[5:6]
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
; SDAG-NEXT: ; implicit-def: $vgpr10
; SDAG-NEXT: ; implicit-def: $vgpr8
; SDAG-NEXT: ; implicit-def: $vgpr9
; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v5, v1
-; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, v6, v3, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
; SDAG-NEXT: v_mov_b32_e32 v1, v4
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
@@ -847,29 +839,29 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v6, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[12:13], v3, v10, 0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[12:13], v3, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v5
-; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[12:13], v9, v3, v[1:2]
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v4
; SDAG-NEXT: .LBB2_6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB2_7: ; %Flow2
-; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[10:11]
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
; SDAG-NEXT: ; %bb.9: ; %Flow3
-; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB2_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
; SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -897,14 +889,16 @@ define i128 @fptosi_f32_to_i128(float %x) {
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
-; GISEL-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
@@ -978,7 +972,7 @@ define i128 @fptosi_f32_to_i128(float %x) {
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
; GISEL-NEXT: s_cbranch_execz .LBB2_4
; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT: v_add_co_u32_e32 v6, vcc, 0xffffff6a, v6
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
@@ -1129,36 +1123,32 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
+; SDAG-NEXT: s_movk_i32 s4, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0xffffff7f
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SDAG-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SDAG-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
-; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; SDAG-NEXT: v_and_b32_e32 v0, 1, v0
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; SDAG-NEXT: s_mov_b32 s5, -1
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[0:1]
+; SDAG-NEXT: v_cmp_eq_u64_e64 s[6:7], -1, v[2:3]
+; SDAG-NEXT: v_cmp_lt_i32_e32 vcc, -1, v4
+; SDAG-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[10:11], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB3_7
; SDAG-NEXT: ; %bb.2: ; %fp-to-i-if-end9
-; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; SDAG-NEXT: v_add_co_u32_e32 v9, vcc, -1, v0
-; SDAG-NEXT: v_addc_co_u32_e64 v11, s[6:7], 0, -1, vcc
-; SDAG-NEXT: s_mov_b64 s[6:7], 0x95
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v9, s[4:5], -1, v0
+; SDAG-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, -1, s[4:5]
+; SDAG-NEXT: s_mov_b64 s[4:5], 0x95
; SDAG-NEXT: v_and_b32_e32 v0, 0x7fffff, v4
-; SDAG-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[5:6]
+; SDAG-NEXT: v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
; SDAG-NEXT: v_mov_b32_e32 v7, 0
-; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v10, -1, 1, vcc
; SDAG-NEXT: v_or_b32_e32 v6, 0x800000, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
-; SDAG-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; SDAG-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; SDAG-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
; SDAG-NEXT: s_cbranch_execz .LBB3_4
; SDAG-NEXT: ; %bb.3: ; %fp-to-i-if-else
@@ -1167,37 +1157,37 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: v_add_u32_e32 v4, 0xffffff6a, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v0, v[6:7]
; SDAG-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v4
-; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v4
+; SDAG-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[4:5]
; SDAG-NEXT: v_cmp_ne_u32_e64 s[6:7], 0, v4
; SDAG-NEXT: v_cndmask_b32_e64 v3, 0, v1, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
+; SDAG-NEXT: v_cndmask_b32_e64 v2, v2, v0, s[4:5]
; SDAG-NEXT: v_lshlrev_b64 v[0:1], v4, v[6:7]
; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT: v_cndmask_b32_e32 v13, 0, v0, vcc
-; SDAG-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v13, v10, 0
+; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v0, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v12, 0, v1, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v13, v10, 0
; SDAG-NEXT: v_mul_lo_u32 v14, v8, v2
; SDAG-NEXT: v_mul_lo_u32 v15, v10, v3
; SDAG-NEXT: v_mov_b32_e32 v6, v1
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v12, v10, v[6:7]
-; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v10, v2, 0
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v12, v10, v[6:7]
+; SDAG-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
; SDAG-NEXT: v_mov_b32_e32 v6, v5
; SDAG-NEXT: v_mov_b32_e32 v5, v7
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v13, v8, v[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, v[4:5]
; SDAG-NEXT: v_add3_u32 v3, v3, v15, v14
-; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v9, v13, v[2:3]
-; SDAG-NEXT: v_add_co_u32_e32 v5, vcc, v6, v5
-; SDAG-NEXT: v_addc_co_u32_e64 v6, s[6:7], 0, 0, vcc
+; SDAG-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v9, v13, v[2:3]
+; SDAG-NEXT: v_add_co_u32_e64 v5, s[4:5], v6, v5
+; SDAG-NEXT: v_addc_co_u32_e64 v6, s[4:5], 0, 0, s[4:5]
; SDAG-NEXT: v_mul_lo_u32 v3, v9, v12
; SDAG-NEXT: v_mul_lo_u32 v7, v11, v13
-; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[6:7], v12, v8, v[5:6]
+; SDAG-NEXT: v_mad_u64_u32 v[5:6], s[4:5], v12, v8, v[5:6]
; SDAG-NEXT: ; implicit-def: $vgpr10
; SDAG-NEXT: ; implicit-def: $vgpr8
; SDAG-NEXT: ; implicit-def: $vgpr9
; SDAG-NEXT: v_add3_u32 v3, v7, v2, v3
-; SDAG-NEXT: v_add_co_u32_e32 v2, vcc, v5, v1
-; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, v6, v3, vcc
+; SDAG-NEXT: v_add_co_u32_e64 v2, s[4:5], v5, v1
+; SDAG-NEXT: v_addc_co_u32_e64 v3, s[4:5], v6, v3, s[4:5]
; SDAG-NEXT: ; implicit-def: $vgpr5_vgpr6
; SDAG-NEXT: v_mov_b32_e32 v1, v4
; SDAG-NEXT: ; implicit-def: $vgpr6_vgpr7
@@ -1207,29 +1197,29 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: ; %bb.5: ; %fp-to-i-if-then12
; SDAG-NEXT: v_sub_u32_e32 v2, 0x96, v5
; SDAG-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7]
-; SDAG-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
-; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v6, vcc
-; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[12:13], v3, v10, 0
+; SDAG-NEXT: v_cmp_gt_u32_e64 s[4:5], 64, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, v0, s[4:5]
+; SDAG-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v2
+; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
; SDAG-NEXT: v_mov_b32_e32 v2, 0
-; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[12:13], v3, v8, v[1:2]
+; SDAG-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v5
-; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[12:13], v9, v3, v[1:2]
+; SDAG-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
; SDAG-NEXT: v_mov_b32_e32 v1, v4
; SDAG-NEXT: .LBB3_6: ; %Flow1
; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
; SDAG-NEXT: .LBB3_7: ; %Flow2
-; SDAG-NEXT: s_andn2_saveexec_b64 s[6:7], s[10:11]
+; SDAG-NEXT: s_andn2_saveexec_b64 s[4:5], s[10:11]
; SDAG-NEXT: ; %bb.8: ; %fp-to-i-if-then5
; SDAG-NEXT: v_bfrev_b32_e32 v0, 1
; SDAG-NEXT: v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
-; SDAG-NEXT: v_cndmask_b32_e64 v3, v0, v1, s[4:5]
+; SDAG-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc
; SDAG-NEXT: v_mov_b32_e32 v0, v2
; SDAG-NEXT: v_mov_b32_e32 v1, v2
; SDAG-NEXT: ; %bb.9: ; %Flow3
-; SDAG-NEXT: s_or_b64 exec, exec, s[6:7]
+; SDAG-NEXT: s_or_b64 exec, exec, s[4:5]
; SDAG-NEXT: .LBB3_10: ; %fp-to-i-cleanup
; SDAG-NEXT: s_or_b64 exec, exec, s[8:9]
; SDAG-NEXT: s_setpc_b64 s[30:31]
@@ -1257,14 +1247,16 @@ define i128 @fptoui_f32_to_i128(float %x) {
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v6
; GISEL-NEXT: v_mov_b32_e32 v2, 0xffffff80
; GISEL-NEXT: v_addc_co_u32_e64 v1, s[6:7], 0, -1, vcc
-; GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GISEL-NEXT: v_mov_b32_e32 v3, -1
; GISEL-NEXT: v_addc_co_u32_e64 v8, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_addc_co_u32_e64 v9, s[6:7], 0, -1, s[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GISEL-NEXT: v_cmp_le_u64_e32 vcc, -1, v[8:9]
; GISEL-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v4
-; GISEL-NEXT: v_cndmask_b32_e32 v0, 1, v0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, -1, v[8:9]
+; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GISEL-NEXT: v_and_b32_e32 v0, 1, v0
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
@@ -1338,7 +1330,7 @@ define i128 @fptoui_f32_to_i128(float %x) {
; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7]
; GISEL-NEXT: s_cbranch_execz .LBB3_4
; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT: v_add_co_u32_e32 v6, vcc, 0xffffff6a, v6
+; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff6a, v6
; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[4:5]
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6
; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index 538ef42121b8..b71728096093 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
@@ -1058,6 +1058,566 @@ define amdgpu_ps float @global_atomic_fadd_div_address_div_value_system_scope_st
ret float %result
}
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]])
+; IR-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]])
+; IR-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-NEXT: [[TMP27:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP28:%.*]] = fmul double [[VAL]], [[TMP27]]
+; IR-NEXT: [[TMP29:%.*]] = fadd double [[TMP26]], [[TMP28]]
+; IR-NEXT: br label [[TMP30]]
+; IR: 30:
+; IR-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]])
+; IR-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]])
+; IR-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-NEXT: [[TMP23:%.*]] = uitofp i32 [[TMP8]] to double
+; IR-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0x7FF0000000000000, double [[VAL]]
+; IR-NEXT: [[TMP25:%.*]] = call double @llvm.minnum.f64(double [[TMP22]], double [[TMP24]])
+; IR-NEXT: br label [[TMP26]]
+; IR: 26:
+; IR-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP26]]
+; IR-ITERATIVE: 26:
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP27]]
+;
+; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
+; IR-DPP-NEXT: [[TMP14:%.*]] = bitcast double [[TMP13]] to i64
+; IR-DPP-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
+; IR-DPP-NEXT: [[TMP16:%.*]] = lshr i64 [[TMP14]], 32
+; IR-DPP-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP16]] to i32
+; IR-DPP-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP15]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP19:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = insertelement <2 x i32> poison, i32 [[TMP18]], i32 0
+; IR-DPP-NEXT: [[TMP21:%.*]] = insertelement <2 x i32> [[TMP20]], i32 [[TMP19]], i32 1
+; IR-DPP-NEXT: [[TMP22:%.*]] = bitcast <2 x i32> [[TMP21]] to double
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = select i1 [[TMP9]], double 0xFFF0000000000000, double [[VAL]]
+; IR-DPP-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP22]], double [[TMP24]], metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP26]]
+; IR-DPP: 26:
+; IR-DPP-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
+; IR-DPP-NEXT: ret double [[TMP27]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-ITERATIVE-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-ITERATIVE-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br label [[TMP30]]
+; IR-ITERATIVE: 30:
+; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-ITERATIVE-NEXT: ret double [[TMP31]]
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
+; IR-DPP-NEXT: [[TMP18:%.*]] = bitcast double [[TMP17]] to i64
+; IR-DPP-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32
+; IR-DPP-NEXT: [[TMP20:%.*]] = lshr i64 [[TMP18]], 32
+; IR-DPP-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32
+; IR-DPP-NEXT: [[TMP22:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP19]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP23:%.*]] = call i32 @llvm.amdgcn.readfirstlane(i32 [[TMP21]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP24:%.*]] = insertelement <2 x i32> poison, i32 [[TMP22]], i32 0
+; IR-DPP-NEXT: [[TMP25:%.*]] = insertelement <2 x i32> [[TMP24]], i32 [[TMP23]], i32 1
+; IR-DPP-NEXT: [[TMP26:%.*]] = bitcast <2 x i32> [[TMP25]] to double
+; IR-DPP-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP26]], double [[TMP28]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: br label [[TMP30]]
+; IR-DPP: 30:
+; IR-DPP-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
+; IR-DPP-NEXT: ret double [[TMP31]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret double [[RESULT]]
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret double %result
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp }
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
index 76ec1cc84f55..99d02ffaa523 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -358,65 +358,6 @@ define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -450,69 +391,6 @@ define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
}
define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -549,71 +427,6 @@ define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %i
}
define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -648,73 +461,6 @@ define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
}
define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f32_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -752,80 +498,6 @@ define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -876,84 +548,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1007,83 +601,6 @@ define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1134,87 +651,6 @@ define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inre
}
define amdgpu_gfx float @global_atomic_xchg_f32_ret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index d137f471910d..380ce7f3b939 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -372,65 +372,6 @@ define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_offset_scalar(ptr addrspace(1)
; ---------------------------------------------------------------------
define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB0_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB0_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB0_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -464,69 +405,6 @@ define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
}
define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v0, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v3, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB1_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v3, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB1_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB1_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -563,71 +441,6 @@ define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %
}
define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v4
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB2_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v0, v4
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v4
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB2_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v0, v4
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB2_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -663,73 +476,6 @@ define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
}
define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_f64_e32 v4, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: global_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v3, v0
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB3_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: global_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v3, v0
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[4:5], v[2:3] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB3_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v3, v4
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v4, v[0:1], v[2:3] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB3_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v4
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -768,80 +514,6 @@ define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s4
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB4_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s4
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB4_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB4_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -896,84 +568,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v1, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v2, s34
-; GCN1-NEXT: v_mov_b32_e32 v0, s6
-; GCN1-NEXT: v_mov_b32_e32 v3, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB5_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v1, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v2, s34
-; GCN2-NEXT: v_mov_b32_e32 v0, s6
-; GCN2-NEXT: v_mov_b32_e32 v3, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB5_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v1, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v2, s4
-; GCN3-NEXT: v_mov_b32_e32 v0, s6
-; GCN3-NEXT: v_mov_b32_e32 v3, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: v_mov_b32_e32 v1, v0
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB5_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1029,83 +623,6 @@ define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v0, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s5
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[34:35], 0
-; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s4
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s5
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_cbranch_execnz .LBB6_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v0, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s5
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[34:35], 0
-; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s4
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s5
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_cbranch_execnz .LBB6_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1]
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB6_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -1160,87 +677,6 @@ define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inr
}
define amdgpu_gfx double @global_atomic_xchg_f64_ret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
-; GCN1-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN1: ; %bb.0:
-; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: s_add_u32 s34, s4, 16
-; GCN1-NEXT: s_addc_u32 s35, s5, 0
-; GCN1-NEXT: v_mov_b32_e32 v0, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s35
-; GCN1-NEXT: global_load_dword v0, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[36:37], 0
-; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: v_mov_b32_e32 v3, s34
-; GCN1-NEXT: v_mov_b32_e32 v1, s6
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v2, v0
-; GCN1-NEXT: v_mov_b32_e32 v4, s35
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN1-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_cbranch_execnz .LBB7_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN1-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN2-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN2: ; %bb.0:
-; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: s_add_u32 s34, s4, 16
-; GCN2-NEXT: s_addc_u32 s35, s5, 0
-; GCN2-NEXT: v_mov_b32_e32 v0, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s35
-; GCN2-NEXT: global_load_dword v0, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[36:37], 0
-; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: v_mov_b32_e32 v3, s34
-; GCN2-NEXT: v_mov_b32_e32 v1, s6
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v2, v0
-; GCN2-NEXT: v_mov_b32_e32 v4, s35
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] glc
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN2-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_cbranch_execnz .LBB7_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[36:37]
-; GCN2-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN3-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
-; GCN3: ; %bb.0:
-; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v0, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s5
-; GCN3-NEXT: global_load_dword v0, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[34:35], 0
-; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: v_mov_b32_e32 v3, s4
-; GCN3-NEXT: v_mov_b32_e32 v1, s6
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v2, v0
-; GCN3-NEXT: v_mov_b32_e32 v4, s5
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: global_atomic_cmpswap v0, v[3:4], v[1:2] offset:16 glc
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
-; GCN3-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_cbranch_execnz .LBB7_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[34:35]
-; GCN3-NEXT: s_setpc_b64 s[30:31]
; SI-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
index fab24e10f810..86e3d9338e07 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_iterative_scan_fp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck -check-prefix=IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck -check-prefix=IR-DPP %s
declare i32 @llvm.amdgcn.workitem.id.x()
define amdgpu_kernel void @global_atomic_fadd_uni_value(ptr addrspace(1) %ptr) #0 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_value(
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index cc7a45cbb6e3..b9234f47df19 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=Iterative -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
-; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -amdgpu-atomic-optimizer-strategy=DPP -passes='amdgpu-atomic-optimizer,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=iterative>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-ITERATIVE %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx906 -passes='amdgpu-atomic-optimizer<strategy=dpp>,verify<domtree>' %s | FileCheck --check-prefixes=IR,IR-DPP %s
; Tests various combinations of uniform/divergent address and uniform/divergent value inputs of various types for atomic operations.
; Optimization remains same for Iterative and DPP strategies when value in uniform. These different scan/reduction
@@ -864,6 +864,426 @@ define amdgpu_ps void @global_atomic_fadd_div_address_div_value_system_scope_str
ret void
}
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
+; IR-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-NEXT: [[TMP11:%.*]] = uitofp i32 [[TMP10]] to double
+; IR-NEXT: [[TMP12:%.*]] = fmul double [[VAL:%.*]], [[TMP11]]
+; IR-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR: 14:
+; IR-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP16]]
+; IR: 16:
+; IR-NEXT: br label [[TMP17]]
+; IR: 17:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_scope_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
+; IR-ITERATIVE-NEXT: ret void
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("one-as") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
+; IR-DPP-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
+; IR-ITERATIVE-NEXT: ret void
+;
+; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
+; IR-DPP-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) inreg %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-ITERATIVE: 10:
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-ITERATIVE-NEXT: br label [[TMP12]]
+; IR-ITERATIVE: 12:
+; IR-ITERATIVE-NEXT: br label [[TMP13]]
+; IR-ITERATIVE: 13:
+; IR-ITERATIVE-NEXT: ret void
+;
+; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR-DPP: 10:
+; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-DPP-NEXT: br label [[TMP12]]
+; IR-DPP: 12:
+; IR-DPP-NEXT: br label [[TMP13]]
+; IR-DPP: 13:
+; IR-DPP-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-ITERATIVE: 2:
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-ITERATIVE: 14:
+; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-ITERATIVE-NEXT: br label [[TMP16]]
+; IR-ITERATIVE: 16:
+; IR-ITERATIVE-NEXT: br label [[TMP17]]
+; IR-ITERATIVE: 17:
+; IR-ITERATIVE-NEXT: ret void
+;
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
+; IR-DPP: 2:
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
+; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
+; IR-DPP: 14:
+; IR-DPP-NEXT: [[TMP15:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP12]] monotonic, align 4
+; IR-DPP-NEXT: br label [[TMP16]]
+; IR-DPP: 16:
+; IR-DPP-NEXT: br label [[TMP17]]
+; IR-DPP: 17:
+; IR-DPP-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_agent_scope_unsafe(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_uni_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fsub_double_div_address_div_value_agent_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_uni_value_agent_scope(ptr addrspace(1) %ptr, double inreg %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_uni_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmin_double_div_address_div_value_agent_scope(ptr addrspace(1) %ptr, double %val) #0 {
+; IR-LABEL: @global_atomic_fmin_double_div_address_div_value_agent_scope(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmin ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(ptr addrspace(1) %ptr, double inreg %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(ptr addrspace(1) %ptr, double %val) #2 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_system_scope_strictfp(
+; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] monotonic, align 4
+; IR-NEXT: ret void
+;
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp }
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index 96c615b974ce..4f00d48551be 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_agent_scope_unsafe:
@@ -5408,6 +5409,5583 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 3cc5a4cd1d0a..622be43e7442 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index 314c52a71d93..49d415c9eed7 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare float @div.double.value()
define amdgpu_kernel void @global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_uni_address_uni_value_agent_scope_unsafe:
@@ -3550,6 +3551,3965 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
+; GFX7LESS-NEXT: .LBB6_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-NEXT: .LBB6_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-NEXT: .LBB6_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-NEXT: .LBB6_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-NEXT: .LBB6_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-NEXT: .LBB6_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX9-DPP-NEXT: .LBB6_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1064-DPP-NEXT: .LBB6_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1032-DPP-NEXT: .LBB6_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1164-DPP-NEXT: .LBB6_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
+; GFX1132-DPP-NEXT: .LBB6_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB8_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB8_2
+; GFX7LESS-NEXT: .LBB8_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-NEXT: .LBB8_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-NEXT: .LBB8_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-NEXT: .LBB8_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-NEXT: .LBB8_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-NEXT: .LBB8_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX9-DPP-NEXT: .LBB8_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1064-DPP-NEXT: .LBB8_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1032-DPP-NEXT: .LBB8_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1164-DPP-NEXT: .LBB8_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB8_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB8_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB8_2
+; GFX1132-DPP-NEXT: .LBB8_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_one_as_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
+; GFX7LESS-NEXT: .LBB10_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-NEXT: .LBB10_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-NEXT: .LBB10_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-NEXT: .LBB10_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-NEXT: .LBB10_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-NEXT: .LBB10_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX9-DPP-NEXT: .LBB10_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1064-DPP-NEXT: .LBB10_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1032-DPP-NEXT: .LBB10_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1164-DPP-NEXT: .LBB10_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
+; GFX1132-DPP-NEXT: .LBB10_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
+; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_defalut_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
+; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index bc9125e326c4..7a7ddbe618b0 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -13,6 +13,7 @@
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-atomic-optimizer-strategy=DPP -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX1132-DPP %s
declare float @div.float.value()
+declare double @div.double.value()
define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_agent_scope_unsafe:
@@ -5616,6 +5617,5581 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_defalut_scop
ret void
}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
+; GFX7LESS-NEXT: .LBB9_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-NEXT: .LBB9_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-NEXT: .LBB9_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-NEXT: .LBB9_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-NEXT: .LBB9_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-NEXT: .LBB9_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX9-DPP-NEXT: .LBB9_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1064-DPP-NEXT: .LBB9_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1032-DPP-NEXT: .LBB9_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1164-DPP-NEXT: .LBB9_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
+; GFX1132-DPP-NEXT: .LBB9_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB11_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_2
+; GFX7LESS-NEXT: .LBB11_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-NEXT: .LBB11_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-NEXT: .LBB11_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-NEXT: .LBB11_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-NEXT: .LBB11_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-NEXT: .LBB11_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX9-DPP-NEXT: .LBB11_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1064-DPP-NEXT: .LBB11_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1032-DPP-NEXT: .LBB11_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1164-DPP-NEXT: .LBB11_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB11_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB11_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_2
+; GFX1132-DPP-NEXT: .LBB11_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB12_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("one-as") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) %ptr) #2{
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB13_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
+; GFX7LESS-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB13_2
+; GFX7LESS-NEXT: .LBB13_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-NEXT: .LBB13_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-NEXT: .LBB13_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-NEXT: .LBB13_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-NEXT: .LBB13_3:
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-NEXT: .LBB13_3:
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX9-DPP-NEXT: .LBB13_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1064-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1064-DPP-NEXT: .LBB13_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
+; GFX1032-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1032-DPP-NEXT: .LBB13_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX1164-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1164-DPP-NEXT: .LBB13_3:
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB13_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
+; GFX1132-DPP-NEXT: .LBB13_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB13_2
+; GFX1132-DPP-NEXT: .LBB13_3:
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB14_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.double.value()
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
+; GFX1132-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1064-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1032-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
+; GFX1164-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
+; GFX1132-DPP-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB15_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic
+ ret void
+}
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s2
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
+; GFX7LESS-NEXT: ; %bb.1:
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b32 s12, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
+; GFX7LESS-NEXT: .LBB16_3:
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s42, -1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-NEXT: s_add_u32 s40, s40, s3
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-NEXT: ; %bb.1:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-NEXT: s_add_u32 s8, s34, 44
+; GFX9-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-NEXT: s_mov_b32 s12, s33
+; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-NEXT: .LBB16_3:
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s42, -1
+; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-NEXT: ; %bb.1:
+; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-NEXT: s_mov_b32 s12, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-NEXT: .LBB16_3:
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s42, -1
+; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-NEXT: s_mov_b32 s38, 0
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-NEXT: ; %bb.1:
+; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-NEXT: s_mov_b32 s12, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-NEXT: .LBB16_3:
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-NEXT: ; %bb.1:
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-NEXT: s_mov_b32 s33, s2
+; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b32 s12, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-NEXT: .LBB16_3:
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-NEXT: s_mov_b32 s38, 0
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-NEXT: ; %bb.1:
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b32 s12, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-NEXT: .LBB16_3:
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s42, -1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
+; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
+; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX9-DPP-NEXT: ; %bb.1:
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
+; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: s_mov_b32 s12, s33
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX9-DPP-NEXT: .LBB16_3:
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1064-DPP-NEXT: ; %bb.1:
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1064-DPP-NEXT: .LBB16_3:
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
+; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1032-DPP-NEXT: ; %bb.1:
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
+; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1032-DPP-NEXT: .LBB16_3:
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1164-DPP-NEXT: ; %bb.1:
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1164-DPP-NEXT: .LBB16_3:
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
+; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
+; GFX1132-DPP-NEXT: ; %bb.1:
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
+; GFX1132-DPP-NEXT: .LBB16_3:
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ ret void
+}
+
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp(ptr addrspace(1) %ptr) #2 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX7LESS: ; %bb.0:
+; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
+; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s50, -1
+; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
+; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
+; GFX7LESS-NEXT: s_mov_b32 s33, s8
+; GFX7LESS-NEXT: s_mov_b32 s40, s7
+; GFX7LESS-NEXT: s_mov_b32 s41, s6
+; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s46, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
+; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
+; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
+; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
+; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
+; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
+; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_waitcnt expcnt(2)
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX7LESS-NEXT: s_mov_b32 s12, s41
+; GFX7LESS-NEXT: s_mov_b32 s13, s40
+; GFX7LESS-NEXT: s_mov_b32 s14, s33
+; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: s_waitcnt expcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
+; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
+; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
+; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
+; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7LESS-NEXT: s_endpgm
+;
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s50, -1
+; GFX9-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-NEXT: s_add_u32 s48, s48, s9
+; GFX9-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-NEXT: s_mov_b32 s33, s8
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_mov_b32 s40, s7
+; GFX9-NEXT: s_mov_b32 s41, s6
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: s_movk_i32 s32, 0x800
+; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-NEXT: s_add_u32 s8, s36, 44
+; GFX9-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-NEXT: s_getpc_b64 s[0:1]
+; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s41
+; GFX9-NEXT: s_mov_b32 s13, s40
+; GFX9-NEXT: s_mov_b32 s14, s33
+; GFX9-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+;
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064: ; %bb.0:
+; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s50, -1
+; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-NEXT: s_mov_b32 s33, s8
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_mov_b32 s40, s7
+; GFX1064-NEXT: s_mov_b32 s41, s6
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s41
+; GFX1064-NEXT: s_mov_b32 s13, s40
+; GFX1064-NEXT: s_mov_b32 s14, s33
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-NEXT: s_clause 0x1
+; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-NEXT: s_endpgm
+;
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032: ; %bb.0:
+; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s50, -1
+; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-NEXT: s_mov_b32 s33, s8
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_mov_b32 s40, s7
+; GFX1032-NEXT: s_mov_b32 s41, s6
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s41
+; GFX1032-NEXT: s_mov_b32 s13, s40
+; GFX1032-NEXT: s_mov_b32 s14, s33
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-NEXT: s_clause 0x1
+; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-NEXT: s_endpgm
+;
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164: ; %bb.0:
+; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-NEXT: s_mov_b32 s33, s8
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s6
+; GFX1164-NEXT: s_mov_b32 s13, s7
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_mov_b32 s32, 32
+; GFX1164-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-NEXT: s_mov_b32 s40, s7
+; GFX1164-NEXT: s_mov_b32 s41, s6
+; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b32 s12, s41
+; GFX1164-NEXT: s_mov_b32 s13, s40
+; GFX1164-NEXT: s_mov_b32 s14, s33
+; GFX1164-NEXT: s_clause 0x1
+; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-NEXT: s_endpgm
+;
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132: ; %bb.0:
+; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-NEXT: s_mov_b32 s40, s14
+; GFX1132-NEXT: s_mov_b32 s41, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b32 s13, s14
+; GFX1132-NEXT: s_mov_b32 s14, s15
+; GFX1132-NEXT: s_mov_b32 s32, 32
+; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-NEXT: s_mov_b32 s44, 0
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_mov_b32 s12, s41
+; GFX1132-NEXT: s_mov_b32 s13, s40
+; GFX1132-NEXT: s_mov_b32 s14, s33
+; GFX1132-NEXT: s_clause 0x1
+; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-NEXT: s_endpgm
+;
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX9-DPP: ; %bb.0:
+; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s50, -1
+; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
+; GFX9-DPP-NEXT: s_mov_b32 s33, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_mov_b32 s40, s7
+; GFX9-DPP-NEXT: s_mov_b32 s41, s6
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
+; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
+; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-DPP-NEXT: s_mov_b32 s12, s41
+; GFX9-DPP-NEXT: s_mov_b32 s13, s40
+; GFX9-DPP-NEXT: s_mov_b32 s14, s33
+; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-DPP-NEXT: s_endpgm
+;
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1064-DPP: ; %bb.0:
+; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1064-DPP-NEXT: s_clause 0x1
+; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1064-DPP-NEXT: s_endpgm
+;
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1032-DPP: ; %bb.0:
+; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
+; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
+; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
+; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1032-DPP-NEXT: s_clause 0x1
+; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
+; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
+; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1032-DPP-NEXT: s_endpgm
+;
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1164-DPP: ; %bb.0:
+; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
+; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
+; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
+; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1164-DPP-NEXT: s_clause 0x1
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
+; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1164-DPP-NEXT: s_endpgm
+;
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_defalut_scope_strictfp:
+; GFX1132-DPP: ; %bb.0:
+; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
+; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
+; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
+; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
+; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
+; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
+; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
+; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
+; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
+; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
+; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
+; GFX1132-DPP-NEXT: s_clause 0x1
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
+; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
+; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
+; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
+; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
+; GFX1132-DPP-NEXT: s_endpgm
+ %divValue = call double @div.float.value() strictfp
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ ret void
+}
+
attributes #0 = { "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #1 = { strictfp "denormal-fp-math-f32"="preserve-sign,preserve-sign" "amdgpu-unsafe-fp-atomics"="true" }
attributes #2 = { strictfp}
diff --git a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
index bdd89a907790..dde84af57ed2 100644
--- a/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/greedy-alloc-fail-sgpr1024-spill.mir
@@ -13,6 +13,7 @@
name: greedy_fail_alloc_sgpr1024_spill
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
explicitKernArgSize: 16
diff --git a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
index a5792bf29ddc..4c21f8729745 100644
--- a/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll
@@ -258,25 +258,25 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
;.
; V4: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V4: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V4: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V4: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V5: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V5: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V5: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V5: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V6: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; V6: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR4]] = { "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
-; V6: attributes #[[ATTR5]] = { "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
+; V6: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
;.
; V4: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
index e015095a4884..ab160ffc10ed 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-cc.ll
@@ -92,7 +92,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX11-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -122,7 +121,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc(<4 x i32> inreg %a, <4 x i32> %b
; DAGISEL-GFX10-NEXT: $vgpr5 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr6 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr7 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -234,7 +232,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX11-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -272,7 +269,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_ptr(ptr inreg %a, ptr %b, ptr ad
; DAGISEL-GFX10-NEXT: $vgpr9 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr10 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -404,7 +400,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX11-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -454,7 +449,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_struct( {ptr, i32, <4 x i32>} in
; DAGISEL-GFX10-NEXT: $vgpr11 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr12 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -506,7 +500,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -524,7 +517,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_float(float inreg %a, float %b)
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -576,7 +568,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -594,7 +585,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_half(half inreg %a, half %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -646,7 +636,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -664,7 +653,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a, bfloat %
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -716,7 +704,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX11-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
; DAGISEL-GFX11-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -734,7 +721,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_i16(i16 inreg %a, i16 %b) {
; DAGISEL-GFX10-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr0 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr1 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -870,7 +856,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX11-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX11-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX11-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -916,7 +901,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_cc_v16i16(<16 x i16> inreg %a, <16
; DAGISEL-GFX10-NEXT: $vgpr13 = COPY [[COPY2]]
; DAGISEL-GFX10-NEXT: $vgpr14 = COPY [[COPY1]]
; DAGISEL-GFX10-NEXT: $vgpr15 = COPY [[COPY]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
@@ -2480,7 +2464,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX11-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX11-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX11-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX11-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX11-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX11-NEXT: S_ENDPGM 0
@@ -2827,7 +2810,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_many_regs(<36 x i32> inreg %a, <128
; DAGISEL-GFX10-NEXT: $vgpr29 = COPY [[COPY134]]
; DAGISEL-GFX10-NEXT: $vgpr30 = COPY [[COPY133]]
; DAGISEL-GFX10-NEXT: $vgpr31 = COPY [[COPY132]]
- ; DAGISEL-GFX10-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[S_LOAD_DWORDX2_IMM]], @use, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
; DAGISEL-GFX10-NEXT: ADJCALLSTACKDOWN 0, 528, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
; DAGISEL-GFX10-NEXT: S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
index 1acbb0911828..fbf2ee1145ae 100644
--- a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
+++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
@@ -60,7 +60,6 @@ define amdgpu_kernel void @test_kernel(i32 %val) #0 {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_or_saveexec_b64 s[34:35], -1
diff --git a/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
new file mode 100644
index 000000000000..d101d8da5e0f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lds-mixed-absolute-addresses-unused.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s 2>&1 | FileCheck %s
+
+; This looks like a partially lowered module, but the non-lowered GV isn't used by any kernels.
+; In such cases, LowerModuleLDS is free to leave it in and ignore it, and we want to make sure
+; LowerModuleLDS doesn't crash if it re-runs on such modules.
+@notLowered = addrspace(3) global i32 poison
+@lowered = addrspace(3) global i32 poison, !absolute_symbol !0
+
+@llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @notLowered to ptr)], section "llvm.metadata"
+
+define amdgpu_kernel void @kern(i32 %val0) {
+; CHECK-LABEL: define amdgpu_kernel void @kern(
+; CHECK-SAME: i32 [[VAL0:%.*]]) {
+; CHECK-NEXT: [[VAL1:%.*]] = add i32 [[VAL0]], 4
+; CHECK-NEXT: store i32 [[VAL1]], ptr addrspace(3) @lowered, align 4
+; CHECK-NEXT: ret void
+;
+ %val1 = add i32 %val0, 4
+ store i32 %val1, ptr addrspace(3) @lowered
+ ret void
+}
+
+
+!0 = !{i32 0, i32 1}
diff --git a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
index b512a43aa102..b1f4f2ef1ef5 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-reject-mixed-absolute-addresses.ll
@@ -8,7 +8,7 @@
define amdgpu_kernel void @kern() {
%val0 = load i32, ptr addrspace(3) @var1
%val1 = add i32 %val0, 4
- store i32 %val1, ptr addrspace(3) @var1
+ store i32 %val1, ptr addrspace(3) @var2
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
index b4415c12926a..f6197e077021 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w32.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W32 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W32 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1))
-declare <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1))
-declare <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1))
-declare <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1))
+declare <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1))
+declare <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <2 x i32> @llvm.amdgcn.global.load.tr.v2i32.p1(ptr addrspace(1) %gep)
+ %val = call <2 x i32> @llvm.amdgcn.global.load.tr.b64.v2i32.p1(ptr addrspace(1) %gep)
store <2 x i32> %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b128 v4, v[0:3], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x i16> @llvm.amdgcn.global.load.tr.v8i16.p1(ptr addrspace(1) %gep)
+ %val = call <8 x i16> @llvm.amdgcn.global.load.tr.b128.v8i16.p1(ptr addrspace(1) %gep)
store <8 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x half> @llvm.amdgcn.global.load.tr.v8f16.p1(ptr addrspace(1) %gep)
- store <8 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W32: ; %bb.0: ; %entry
-; GFX12-SDAG-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-SDAG-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-SDAG-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-SDAG-W32-NEXT: s_nop 0
-; GFX12-SDAG-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W32-NEXT: s_endpgm
-;
-; GFX12-GISEL-W32-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W32: ; %bb.0: ; %entry
-; GFX12-GISEL-W32-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W32-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-GISEL-W32-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_load_tr_b128 v[0:3], v4, s[0:1] offset:32
-; GFX12-GISEL-W32-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W32-NEXT: global_store_b128 v4, v[0:3], s[2:3]
-; GFX12-GISEL-W32-NEXT: s_nop 0
-; GFX12-GISEL-W32-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W32-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <8 x bfloat> @llvm.amdgcn.global.load.tr.v8bf16.p1(ptr addrspace(1) %gep)
- store <8 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
index 7ad1416789de..a2dc3662fcc4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.global.load.tr-w64.ll
@@ -1,132 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-SDAG-W64 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX12-GISEL-W64 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -mattr=-wavefrontsize32,+wavefrontsize64 < %s | FileCheck -check-prefix=GFX12 %s
-declare i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1))
-declare <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1))
-declare <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1))
-declare <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1))
+declare i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1))
+declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1))
define amdgpu_kernel void @global_load_tr_b64(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b64:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b64:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b32 v0, v1, s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+; GFX12-LABEL: global_load_tr_b64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b64 v1, v0, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b32 v0, v1, s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call i32 @llvm.amdgcn.global.load.tr.i32.p1(ptr addrspace(1) %gep)
+ %val = call i32 @llvm.amdgcn.global.load.tr.b64.i32.p1(ptr addrspace(1) %gep)
store i32 %val, ptr addrspace(1) %use
ret void
}
-define amdgpu_kernel void @global_load_tr_b128_i16(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_i16:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
+define amdgpu_kernel void @global_load_tr_b128(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX12-LABEL: global_load_tr_b128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x i16> @llvm.amdgcn.global.load.tr.v4i16.p1(ptr addrspace(1) %gep)
+ %val = call <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16.p1(ptr addrspace(1) %gep)
store <4 x i16> %val, ptr addrspace(1) %use
ret void
}
-
-define amdgpu_kernel void @global_load_tr_b128_half(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_half:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_half:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x half> @llvm.amdgcn.global.load.tr.v4f16.p1(ptr addrspace(1) %gep)
- store <4 x half> %val, ptr addrspace(1) %use
- ret void
-}
-
-define amdgpu_kernel void @global_load_tr_b128_bfloat(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX12-SDAG-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-SDAG-W64: ; %bb.0: ; %entry
-; GFX12-SDAG-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-SDAG-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-SDAG-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-SDAG-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-SDAG-W64-NEXT: s_nop 0
-; GFX12-SDAG-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-SDAG-W64-NEXT: s_endpgm
-;
-; GFX12-GISEL-W64-LABEL: global_load_tr_b128_bfloat:
-; GFX12-GISEL-W64: ; %bb.0: ; %entry
-; GFX12-GISEL-W64-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX12-GISEL-W64-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-GISEL-W64-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_load_tr_b128 v[0:1], v2, s[0:1] offset:32
-; GFX12-GISEL-W64-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-W64-NEXT: global_store_b64 v2, v[0:1], s[2:3]
-; GFX12-GISEL-W64-NEXT: s_nop 0
-; GFX12-GISEL-W64-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
-; GFX12-GISEL-W64-NEXT: s_endpgm
-entry:
- %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
- %val = call <4 x bfloat> @llvm.amdgcn.global.load.tr.v4bf16.p1(ptr addrspace(1) %gep)
- store <4 x bfloat> %val, ptr addrspace(1) %use
- ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
index 091b29c23d60..e93595b9ef27 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.single.2b.mir
@@ -4,6 +4,8 @@
--- |
define amdgpu_kernel void @single-wave-phase-2b(ptr addrspace(3) noalias %in0, ptr addrspace(3) noalias %in1, ptr addrspace(3) noalias %in2, ptr addrspace(3) noalias %in3, ptr addrspace(3) noalias %in4, ptr addrspace(3) noalias %in5, ptr addrspace(3) noalias %in6, ptr addrspace(3) noalias %in7, ptr addrspace(3) noalias %in8, ptr addrspace(3) noalias %in9, ptr addrspace(3) noalias %in10, ptr addrspace(3) noalias %in11, ptr addrspace(7) noalias %in12, ptr addrspace(7) noalias %in13, ptr addrspace(7) noalias %in14, ptr addrspace(7) noalias %in15, ptr addrspace(7) noalias %in16, ptr addrspace(7) noalias %in17, ptr addrspace(7) noalias %in18, ptr addrspace(7) noalias %in19, ptr addrspace(7) noalias %in20, ptr addrspace(7) noalias %in21, ptr addrspace(7) noalias %in22, ptr addrspace(7) noalias %in23, ptr addrspace(7) noalias %in24, ptr addrspace(7) noalias %in25, ptr addrspace(7) noalias %in26, ptr addrspace(7) noalias %in27, ptr addrspace(7) noalias %in28, ptr addrspace(7) noalias %in29) #0 { ret void }
+ attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,1" "amdgpu-flat-work-group-size"="1,256" }
+
!0 = distinct !{!0}
!1 = !{!1, !0}
...
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
index 1348315e72e7..7b1f55e7eeba 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.msaa.load.ll
@@ -22,18 +22,36 @@ main_body:
define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_both:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x00,0x00,0x60,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x05]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v9, v8 ; encoding: [0x08,0x03,0x12,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v10, v8 ; encoding: [0x08,0x03,0x14,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v11, v8 ; encoding: [0x08,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v8 ; encoding: [0x08,0x03,0x18,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:7], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x98,0x02,0x60,0xf0,0x05,0x00,0x60,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x08,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_both:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v7, v0 :: v_dual_mov_b32 v8, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x05]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v9, v8 :: v_dual_mov_b32 v10, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0a,0x09]
+; GFX12-NEXT: v_dual_mov_b32 v11, v8 :: v_dual_mov_b32 v12, v8 ; encoding: [0x08,0x01,0x10,0xca,0x08,0x01,0x0c,0x0b]
+; GFX12-NEXT: v_dual_mov_b32 v0, v8 :: v_dual_mov_b32 v1, v9 ; encoding: [0x08,0x01,0x10,0xca,0x09,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v10 :: v_dual_mov_b32 v3, v11 ; encoding: [0x0a,0x01,0x10,0xca,0x0b,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v12 ; encoding: [0x0c,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v7, v6, v5], s[0:7] dmask:0x2 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ; encoding: [0x0e,0x20,0x86,0xe4,0x00,0x01,0x00,0x00,0x07,0x06,0x05,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v8, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x08,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32i32.i32(i32 2, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
@@ -63,18 +81,37 @@ main_body:
define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:4], v[0:3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v9, 0 :: v_dual_mov_b32 v8, v3 ; encoding: [0x80,0x00,0x10,0xca,0x03,0x01,0x08,0x09]
+; GFX11-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x06,0x07]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4) ; encoding: [0x42,0x02,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, v9 ; encoding: [0x00,0x01,0x10,0xca,0x09,0x01,0x0a,0x05]
+; GFX11-NEXT: v_mov_b32_e32 v11, v9 ; encoding: [0x09,0x03,0x16,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v12, v9 ; encoding: [0x09,0x03,0x18,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v13, v9 ; encoding: [0x09,0x03,0x1a,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x93,0x01,0x87,0xbf]
+; GFX11-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX11-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:4], v[5:8], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x9c,0x08,0x60,0xf0,0x05,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x05,0x04,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x09,0x04,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:4], [v0, v1, v2, v3], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v5, 0 ; encoding: [0x80,0x02,0x0a,0x7e]
+; GFX12-NEXT: v_mov_b32_e32 v9, 0 ; encoding: [0x80,0x02,0x12,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v6, v2 ; encoding: [0x03,0x01,0x10,0xca,0x02,0x01,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v7, v1 :: v_dual_mov_b32 v8, v0 ; encoding: [0x01,0x01,0x10,0xca,0x00,0x01,0x08,0x07]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x23,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v10, v9 :: v_dual_mov_b32 v11, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0a,0x0a]
+; GFX12-NEXT: v_dual_mov_b32 v12, v9 :: v_dual_mov_b32 v13, v9 ; encoding: [0x09,0x01,0x10,0xca,0x09,0x01,0x0c,0x0c]
+; GFX12-NEXT: v_dual_mov_b32 v0, v9 :: v_dual_mov_b32 v1, v10 ; encoding: [0x09,0x01,0x10,0xca,0x0a,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; encoding: [0x92,0x01,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v2, v11 :: v_dual_mov_b32 v3, v12 ; encoding: [0x0b,0x01,0x10,0xca,0x0c,0x01,0x02,0x02]
+; GFX12-NEXT: v_mov_b32_e32 v4, v13 ; encoding: [0x0d,0x03,0x08,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:4], [v8, v7, v6, v5], s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ; encoding: [0x0f,0x20,0x06,0xe6,0x00,0x00,0x00,0x00,0x08,0x07,0x06,0x05]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v5, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x05,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v9, v4, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x02,0x09,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32i32.i32(i32 8, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -155,18 +192,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2dmsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %fragid) {
; GFX11-LABEL: load_2dmsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x03]
+; GFX11-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x05]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v7, v6 ; encoding: [0x06,0x03,0x0e,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v8, v6 ; encoding: [0x06,0x03,0x10,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], v[3:5], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x98,0x01,0x62,0xf0,0x03,0x00,0x20,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x06,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2dmsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x00]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v6, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x05]
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x03]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v8, v6 ; encoding: [0x06,0x01,0x10,0xca,0x06,0x01,0x08,0x07]
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 ; encoding: [0x06,0x01,0x10,0xca,0x07,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v8 ; encoding: [0x08,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm tfe d16 ; encoding: [0x2e,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x05,0x04,0x03,0x00]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v6, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x06,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
@@ -196,18 +246,31 @@ main_body:
define amdgpu_ps <4 x half> @load_2darraymsaa_tfe_d16(<8 x i32> inreg %rsrc, ptr addrspace(1) inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
; GFX11-LABEL: load_2darraymsaa_tfe_d16:
; GFX11: ; %bb.0: ; %main_body
-; GFX11-NEXT: image_msaa_load v[0:2], v[0:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9c,0x01,0x62,0xf0,0x00,0x00,0x20,0x00]
-; GFX11-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; encoding: [0x22,0x01,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v8, v7 ; encoding: [0x07,0x03,0x10,0x7e]
+; GFX11-NEXT: v_mov_b32_e32 v9, v7 ; encoding: [0x07,0x03,0x12,0x7e]
+; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX11-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX11-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x9d,0x01,0x62,0xf0,0x06,0x00,0x20,0x00,0x05,0x04,0x03,0x00]
; GFX11-NEXT: s_waitcnt vmcnt(0) ; encoding: [0xf7,0x03,0x89,0xbf]
-; GFX11-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x03,0x02,0x08,0x00]
+; GFX11-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x00,0x00,0x6a,0xdc,0x07,0x02,0x08,0x00]
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-LABEL: load_2darraymsaa_tfe_d16:
; GFX12: ; %bb.0: ; %main_body
-; GFX12-NEXT: image_msaa_load v[0:2], [v0, v1, v2, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x00,0x01,0x02,0x03]
-; GFX12-NEXT: v_mov_b32_e32 v3, 0 ; encoding: [0x80,0x02,0x06,0x7e]
+; GFX12-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, 0 ; encoding: [0x00,0x01,0x10,0xca,0x80,0x00,0x06,0x06]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v5, v1 ; encoding: [0x02,0x01,0x10,0xca,0x01,0x01,0x04,0x04]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; encoding: [0x92,0x00,0x87,0xbf]
+; GFX12-NEXT: v_dual_mov_b32 v8, v7 :: v_dual_mov_b32 v9, v7 ; encoding: [0x07,0x01,0x10,0xca,0x07,0x01,0x08,0x08]
+; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 ; encoding: [0x07,0x01,0x10,0xca,0x08,0x01,0x00,0x00]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; encoding: [0x02,0x00,0x87,0xbf]
+; GFX12-NEXT: v_mov_b32_e32 v2, v9 ; encoding: [0x09,0x03,0x04,0x7e]
+; GFX12-NEXT: image_msaa_load v[0:2], [v6, v5, v4, v3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe d16 ; encoding: [0x2f,0x20,0x46,0xe4,0x00,0x00,0x00,0x00,0x06,0x05,0x04,0x03]
; GFX12-NEXT: s_wait_loadcnt 0x0 ; encoding: [0x00,0x00,0xc0,0xbf]
-; GFX12-NEXT: global_store_b32 v3, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x03,0x00,0x00,0x00]
+; GFX12-NEXT: global_store_b32 v7, v2, s[8:9] ; encoding: [0x08,0x80,0x06,0xee,0x00,0x00,0x00,0x01,0x07,0x00,0x00,0x00]
; GFX12-NEXT: ; return to shader part epilog
main_body:
%v = call {<4 x half>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f16i32.i32(i32 1, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
index 429528e9091d..e3dd036ecc30 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.inreg.ll
@@ -147,6 +147,34 @@ main_body:
ret half %res
}
+define amdgpu_ps half @v_interp_rtz_f16(float inreg %i, float inreg %j, i32 inreg %m0) #0 {
+; GCN-LABEL: v_interp_rtz_f16:
+; GCN: ; %bb.0: ; %main_body
+; GCN-NEXT: s_mov_b32 s3, exec_lo
+; GCN-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GCN-NEXT: s_mov_b32 m0, s2
+; GCN-NEXT: lds_param_load v1, attr0.x wait_vdst:15
+; GCN-NEXT: s_mov_b32 exec_lo, s3
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v2, s1
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v3, v1, v0, v1 wait_exp:0
+; GCN-NEXT: v_interp_p10_rtz_f16_f32 v0, v1, v0, v1 op_sel:[1,0,1,0] wait_exp:7
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v3, v1, v2, v3 wait_exp:7
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GCN-NEXT: v_interp_p2_rtz_f16_f32 v0, v1, v2, v0 op_sel:[1,0,0,0] wait_exp:7
+; GCN-NEXT: v_add_f16_e32 v0, v3, v0
+; GCN-NEXT: ; return to shader part epilog
+main_body:
+ %p0 = call float @llvm.amdgcn.lds.param.load(i32 0, i32 0, i32 %m0)
+ %l_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 0)
+ %l_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %l_p0, i1 0)
+ %h_p0 = call float @llvm.amdgcn.interp.p10.rtz.f16(float %p0, float %i, float %p0, i1 1)
+ %h_p1 = call half @llvm.amdgcn.interp.p2.rtz.f16(float %p0, float %j, float %h_p0, i1 1)
+ %res = fadd half %l_p1, %h_p1
+ ret half %res
+}
+
define amdgpu_ps half @v_interp_f16_imm_params(float inreg %i, float inreg %j) #0 {
; GCN-LABEL: v_interp_f16_imm_params:
; GCN: ; %bb.0: ; %main_body
@@ -172,6 +200,8 @@ declare float @llvm.amdgcn.interp.inreg.p10(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p2(float, float, float) #0
declare float @llvm.amdgcn.interp.inreg.p10.f16(float, float, float, i1) #0
declare half @llvm.amdgcn.interp.inreg.p2.f16(float, float, float, i1) #0
+declare float @llvm.amdgcn.interp.p10.rtz.f16(float, float, float, i1) #0
+declare half @llvm.amdgcn.interp.p2.rtz.f16(float, float, float, i1) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare void @llvm.amdgcn.exp.f16(i32, i32, float, float, float, float, i1, i1) #0
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
index 00be32b06de0..ba3d306cc0cf 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-enable-prt-strict-null -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-SDAG %s
;RUN: llc < %s -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs | FileCheck --check-prefixes=GFX12,GFX12-GISEL %s
@@ -34,6 +35,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32>
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -75,6 +86,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -146,6 +164,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(<4 x i32> inreg) {
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_immoffs_large:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v8, 0
@@ -196,6 +233,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_12bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -235,6 +279,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_13bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -274,6 +327,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_16bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -313,6 +375,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_voffset_large_23bit:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -352,6 +423,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(<4 x i32> inreg) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: buffer_load_voffset_large_24bit:
; GFX12-SDAG: ; %bb.0: ; %main_body
; GFX12-SDAG-NEXT: v_dual_mov_b32 v1, 0x800000 :: v_dual_mov_b32 v0, 0
@@ -389,6 +469,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_idx:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], null idxen
@@ -427,6 +513,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -466,6 +561,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(<4 x i32> inreg, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_ofs_imm:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, 0
@@ -497,6 +601,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], null idxen offen
@@ -529,6 +639,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i3
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_both_reversed:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v2, v0
@@ -562,6 +679,13 @@ define amdgpu_ps float @buffer_load_x(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -595,6 +719,13 @@ define amdgpu_ps float @buffer_load_x_i32(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_x_i32:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -629,6 +760,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(<4 x i32> inreg %rsrc) {
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_xy:
; GFX12: ; %bb.0: ; %main_body
; GFX12-NEXT: v_mov_b32_e32 v0, 0
@@ -644,7 +782,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
-; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX6-NEXT: v_mov_b32_e32 v7, 2
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
+; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: s_mov_b32 s0, s2
@@ -658,7 +801,12 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
-; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX8PLUS-NEXT: v_mov_b32_e32 v7, 2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
+; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX8PLUS-NEXT: v_mov_b32_e32 v0, v6
@@ -667,22 +815,40 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
;
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
+; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v2, 2
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4i32_tfe:
; GFX12: ; %bb.0:
-; GFX12-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
+; GFX12-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v7, 2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
+; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v7, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX12-NEXT: v_mov_b32_e32 v0, v6
; GFX12-NEXT: ; return to shader part epilog
- %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
+ %load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.format.sl_v4i32i32s(<4 x i32> %rsrc, i32 2, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
%status = extractvalue { <4 x i32>, i32 } %load, 1
@@ -694,6 +860,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -708,6 +878,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -718,15 +892,32 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v4f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v6, v2
; GFX12-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b128 v[0:1], v[2:5], off
@@ -744,6 +935,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -759,6 +953,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -769,15 +966,31 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -795,6 +1008,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -810,6 +1026,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -820,15 +1039,31 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v3f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
+; GFX12-NEXT: v_mov_b32_e32 v5, v2
; GFX12-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b96 v[0:1], v[2:4], off
@@ -846,6 +1081,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -860,6 +1098,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -870,15 +1110,29 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -896,6 +1150,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -910,6 +1167,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -920,15 +1179,29 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_v2f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v4, v2
; GFX12-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
@@ -946,6 +1219,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -960,6 +1234,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -970,15 +1245,28 @@ define amdgpu_cs float @buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_i32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
@@ -996,6 +1284,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -1010,6 +1299,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -1020,15 +1310,28 @@ define amdgpu_cs float @buffer_load_f32_tfe(<4 x i32> inreg %rsrc, ptr addrspace
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
+;
; GFX12-LABEL: buffer_load_f32_tfe:
; GFX12: ; %bb.0:
; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
; GFX12-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], null idxen tfe
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
index b0bd4e428ef2..c5202b84fa1e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.load.format.ll
@@ -2,6 +2,7 @@
;RUN: llc < %s -mtriple=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefixes=GFX6 %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefixes=GFX8PLUS %s
;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=GFX11 %s
+;RUN: llc < %s -mtriple=amdgcn -mattr=-enable-prt-strict-null -mcpu=gfx1100 -verify-machineinstrs | FileCheck --check-prefixes=NOPRT %s
define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrspace(8) inreg) {
; GFX6-LABEL: buffer_load:
@@ -31,6 +32,16 @@ define amdgpu_ps {<4 x float>, <4 x float>, <4 x float>} @buffer_load(ptr addrsp
; GFX11-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_clause 0x2
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 0 idxen
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], 0 idxen glc
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], 0 idxen slc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 0)
%data_glc = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 0, i32 0, i32 1)
@@ -62,6 +73,13 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs(ptr addrspace(8) inreg) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:42
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 42, i32 0, i32 0)
ret <4 x float> %data
@@ -126,6 +144,25 @@ define amdgpu_ps <4 x float> @buffer_load_immoffs_large(ptr addrspace(8) inreg)
; GFX11-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
; GFX11-NEXT: v_add_f32_e32 v2, v10, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_immoffs_large:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v8, 0
+; NOPRT-NEXT: s_movk_i32 s4, 0x7ffc
+; NOPRT-NEXT: s_clause 0x1
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v8, s[0:3], 60 idxen offset:4092
+; NOPRT-NEXT: buffer_load_format_xyzw v[4:7], v8, s[0:3], s4 idxen offset:4092
+; NOPRT-NEXT: s_mov_b32 s4, 0x8ffc
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_add_f32_e32 v1, v1, v5
+; NOPRT-NEXT: buffer_load_format_xyzw v[8:11], v8, s[0:3], s4 idxen offset:4
+; NOPRT-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v3, v3, v7
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: v_dual_add_f32 v2, v2, v6 :: v_dual_add_f32 v1, v9, v1
+; NOPRT-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; NOPRT-NEXT: v_dual_add_f32 v0, v8, v0 :: v_dual_add_f32 v3, v11, v3
+; NOPRT-NEXT: v_add_f32_e32 v2, v10, v2
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%d.0 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 60, i32 0)
%d.1 = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 32764, i32 0)
@@ -156,6 +193,13 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_12bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_12bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 4092, i32 0, i32 0)
ret <4 x float> %data
@@ -188,6 +232,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_13bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_13bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x1000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8188, i32 0, i32 0)
ret <4 x float> %data
@@ -220,6 +273,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_16bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_16bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xf000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 65532, i32 0, i32 0)
ret <4 x float> %data
@@ -252,6 +314,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_23bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_23bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0x7ff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 8388604, i32 0, i32 0)
ret <4 x float> %data
@@ -284,6 +355,15 @@ define amdgpu_ps <4 x float> @buffer_load_voffset_large_24bit(ptr addrspace(8) i
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_voffset_large_24bit:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, 0xfff000 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:4092
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 16777212, i32 0, i32 0)
ret <4 x float> %data
@@ -307,6 +387,12 @@ define amdgpu_ps <4 x float> @buffer_load_idx(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_idx:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 0, i32 0, i32 0)
ret <4 x float> %data
@@ -339,6 +425,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -371,6 +466,15 @@ define amdgpu_ps <4 x float> @buffer_load_ofs_imm(ptr addrspace(8) inreg, i32) {
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_ofs_imm:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: s_mov_b32 s4, 0
+; NOPRT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; NOPRT-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s4
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen offset:60
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%ofs = add i32 %1, 60
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 0, i32 %ofs, i32 0, i32 0)
@@ -395,6 +499,12 @@ define amdgpu_ps <4 x float> @buffer_load_both(ptr addrspace(8) inreg, i32, i32)
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %1, i32 %2, i32 0, i32 0)
ret <4 x float> %data
@@ -421,6 +531,13 @@ define amdgpu_ps <4 x float> @buffer_load_both_reversed(ptr addrspace(8) inreg,
; GFX11-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_both_reversed:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v2, v0
+; NOPRT-NEXT: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <4 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v4f32(ptr addrspace(8) %0, i32 %2, i32 %1, i32 0, i32 0)
ret <4 x float> %data
@@ -447,6 +564,13 @@ define amdgpu_ps float @buffer_load_x(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call float @llvm.amdgcn.struct.ptr.buffer.load.format.f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret float %data
@@ -473,6 +597,13 @@ define amdgpu_ps float @buffer_load_x_i32(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_x_i32:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_x v0, v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call i32 @llvm.amdgcn.struct.ptr.buffer.load.format.i32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%fdata = bitcast i32 %data to float
@@ -500,6 +631,13 @@ define amdgpu_ps <2 x float> @buffer_load_xy(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_xy:
+; NOPRT: ; %bb.0: ; %main_body
+; NOPRT-NEXT: v_mov_b32_e32 v0, 0
+; NOPRT-NEXT: buffer_load_format_xy v[0:1], v0, s[0:3], 0 idxen
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: ; return to shader part epilog
main_body:
%data = call <2 x float> @llvm.amdgcn.struct.ptr.buffer.load.format.v2f32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
ret <2 x float> %data
@@ -509,6 +647,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -523,6 +665,10 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -533,11 +679,25 @@ define amdgpu_cs float @buffer_load_v4i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %load, 0
store <4 x i32> %data, ptr addrspace(1) %out
@@ -550,6 +710,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v4f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
+; GFX6-NEXT: v_mov_b32_e32 v6, v2
; GFX6-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -564,6 +728,10 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v4f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v6, v2
; GFX8PLUS-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -574,11 +742,25 @@ define amdgpu_cs float @buffer_load_v4f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v4f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
+; GFX11-NEXT: v_mov_b32_e32 v6, v2
; GFX11-NEXT: buffer_load_format_xyzw v[2:6], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b128 v[0:1], v[2:5], off
; GFX11-NEXT: v_mov_b32_e32 v0, v6
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v4f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v6, 0
+; NOPRT-NEXT: buffer_load_format_xyzw v[2:6], v6, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b128 v[0:1], v[2:5], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v6
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <4 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v4f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %load, 0
store <4 x float> %data, ptr addrspace(1) %out
@@ -591,6 +773,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -606,6 +791,9 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -616,11 +804,24 @@ define amdgpu_cs float @buffer_load_v3i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %load, 0
store <3 x i32> %data, ptr addrspace(1) %out
@@ -633,6 +834,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v3f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -648,6 +852,9 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v3f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v5, v2
; GFX8PLUS-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx3 v[0:1], v[2:4]
@@ -658,11 +865,24 @@ define amdgpu_cs float @buffer_load_v3f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v3f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
+; GFX11-NEXT: v_mov_b32_e32 v5, v2
; GFX11-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b96 v[0:1], v[2:4], off
; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v3f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v5, 0
+; NOPRT-NEXT: buffer_load_format_xyz v[2:5], v5, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b96 v[0:1], v[2:4], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v5
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <3 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v3f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %load, 0
store <3 x float> %data, ptr addrspace(1) %out
@@ -675,6 +895,9 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -689,6 +912,8 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -699,11 +924,23 @@ define amdgpu_cs float @buffer_load_v2i32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x i32>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %load, 0
store <2 x i32> %data, ptr addrspace(1) %out
@@ -716,6 +953,9 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX6-LABEL: buffer_load_v2f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v5, v2
; GFX6-NEXT: buffer_load_format_xyz v[2:5], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -730,6 +970,8 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX8PLUS-LABEL: buffer_load_v2f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
+; GFX8PLUS-NEXT: v_mov_b32_e32 v4, v2
; GFX8PLUS-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
@@ -740,11 +982,23 @@ define amdgpu_cs float @buffer_load_v2f32_tfe(ptr addrspace(8) inreg %rsrc, ptr
; GFX11-LABEL: buffer_load_v2f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: v_mov_b32_e32 v4, v2
; GFX11-NEXT: buffer_load_format_xy v[2:4], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: v_mov_b32_e32 v0, v4
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_v2f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v4, 0
+; NOPRT-NEXT: buffer_load_format_xy v[2:4], v4, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b64 v[0:1], v[2:3], off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v4
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { <2 x float>, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_v2f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %load, 0
store <2 x float> %data, ptr addrspace(1) %out
@@ -757,6 +1011,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_i32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -771,6 +1026,7 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_i32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -781,11 +1037,22 @@ define amdgpu_cs float @buffer_load_i32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_i32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_i32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { i32, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_i32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %load, 0
store i32 %data, ptr addrspace(1) %out
@@ -798,6 +1065,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX6-LABEL: buffer_load_f32_tfe:
; GFX6: ; %bb.0:
; GFX6-NEXT: v_mov_b32_e32 v2, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, v2
; GFX6-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX6-NEXT: s_mov_b32 s2, 0
; GFX6-NEXT: s_mov_b32 s3, 0xf000
@@ -812,6 +1080,7 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX8PLUS-LABEL: buffer_load_f32_tfe:
; GFX8PLUS: ; %bb.0:
; GFX8PLUS-NEXT: v_mov_b32_e32 v2, 0
+; GFX8PLUS-NEXT: v_mov_b32_e32 v3, v2
; GFX8PLUS-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX8PLUS-NEXT: s_waitcnt vmcnt(0)
; GFX8PLUS-NEXT: flat_store_dword v[0:1], v2
@@ -822,11 +1091,22 @@ define amdgpu_cs float @buffer_load_f32_tfe(ptr addrspace(8) inreg %rsrc, ptr ad
; GFX11-LABEL: buffer_load_f32_tfe:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
; GFX11-NEXT: buffer_load_format_x v[2:3], v2, s[0:3], 0 idxen tfe
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
+;
+; NOPRT-LABEL: buffer_load_f32_tfe:
+; NOPRT: ; %bb.0:
+; NOPRT-NEXT: v_mov_b32_e32 v3, 0
+; NOPRT-NEXT: buffer_load_format_x v[2:3], v3, s[0:3], 0 idxen tfe
+; NOPRT-NEXT: s_waitcnt vmcnt(0)
+; NOPRT-NEXT: global_store_b32 v[0:1], v2, off
+; NOPRT-NEXT: v_mov_b32_e32 v0, v3
+; NOPRT-NEXT: ; return to shader part epilog
%load = call { float, i32 } @llvm.amdgcn.struct.ptr.buffer.load.format.sl_f32i32s(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { float, i32 } %load, 0
store float %data, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
new file mode 100644
index 000000000000..f1d946376afe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lto-lower-module-lds.ll
@@ -0,0 +1,47 @@
+
+; Default O0
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O0
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O0 -cg-opt-level 0 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O1
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O1
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O1 -cg-opt-level 1 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O2
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O2
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O2 -cg-opt-level 2 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Default O3
+; RUN: opt -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; Unified O3
+; RUN: opt -unified-lto -thinlto-split-lto-unit -thinlto-bc -mtriple=amdgcn-- -mcpu=gfx1030 %s -o %t.bc
+; RUN: llvm-lto2 run -unified-lto=full -O3 -cg-opt-level 3 %t.bc -o %t.s -r %t.bc,test,px -debug-pass-manager -debug-pass=Structure 2>&1 | FileCheck %s
+
+; First print will be from the New PM during the full LTO pipeline.
+; Second print will be from the legacy PM during the CG pipeline.
+
+; CHECK: Running pass: AMDGPULowerModuleLDSPass on [module]
+; CHECK: ModulePass Manager
+; CHECK: Lower uses of LDS variables from non-kernel functions
+
+@lds = internal unnamed_addr addrspace(3) global i32 poison, align 4
+
+define amdgpu_kernel void @test() {
+entry:
+ store i32 1, ptr addrspace(3) @lds
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
new file mode 100644
index 000000000000..d7f5d1a23789
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer-gfx12.mir
@@ -0,0 +1,1154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GFX12 %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx3_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_32
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub0_sub1_sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFSET]].sub3
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub0_sub1
+ ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFSET]].sub2
+ ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_xyz
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx3_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact %14:vreg_96, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dwordx2
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %15:vreg_64, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dwordx2_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact %14:vreg_64, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-LABEL: name: buffer_store_dword_dword
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-LABEL: name: buffer_store_dword_32
+ ; GFX12: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX12-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %13:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %5:vgpr_32, %13:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %6:vgpr_32, %13:sgpr_128, $sgpr_null, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %7:vgpr_32, %13:sgpr_128, $sgpr_null, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %8:vgpr_32, %13:sgpr_128, $sgpr_null, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %9:vgpr_32, %13:sgpr_128, $sgpr_null, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %10:vgpr_32, %13:sgpr_128, $sgpr_null, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %11:vgpr_32, %13:sgpr_128, $sgpr_null, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %12:vgpr_32, %13:sgpr_128, $sgpr_null, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %4:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_not_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFSET]].sub1
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact %4:vgpr_32, %5:sgpr_128, $sgpr_null, 12, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_IDXEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GFX12-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub0_sub1
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN_exact]].sub2
+ ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub0
+ ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN_exact %4, %5:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %4, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GFX12-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GFX12: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %6:sgpr_128, $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_IDXEN_exact %5, %7:sgpr_128, $sgpr_null, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-buffer.mir b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
new file mode 100644
index 000000000000..1c6d429d20ea
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merge-buffer.mir
@@ -0,0 +1,1130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck -check-prefixes=GCN %s
+
+---
+name: buffer_load_dword_dwordx3
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx3_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx3_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+
+name: buffer_load_dword_dword
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_32
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_32
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0_sub1_sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX4_OFFSET]].sub3
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[COPY6]].sub0_sub1
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY killed [[COPY6]].sub2
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY8]].sub0
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY killed [[COPY8]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFSET [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_OFFSET]].sub0_sub1
+ ; GCN-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_OFFSET]].sub2
+ ; GCN-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GCN-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[COPY12]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %10:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %11:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %12:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %13:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %14:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %15:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+#
+# buffer_store_dword
+#
+
+name: buffer_store_dword_xyz
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_xyz
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[REG_SEQUENCE1]], %subreg.sub1_sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx3_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx3_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[COPY]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_96 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1, %6:vgpr_32, %subreg.sub2
+ BUFFER_STORE_DWORDX3_OFFSET_exact %14:vreg_96, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[REG_SEQUENCE2]], %subreg.sub2_sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE3]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ %15:vreg_64 = REG_SEQUENCE %6:vgpr_32, %subreg.sub0, %7:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dwordx2
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dwordx2
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, %10:vreg_64, %subreg.sub1_sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORDX2_OFFSET_exact %15:vreg_64, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dwordx2_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dwordx2_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[REG_SEQUENCE1]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE2]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %14:vreg_64 = REG_SEQUENCE %4:vgpr_32, %subreg.sub0, %5:vgpr_32, %subreg.sub1
+ BUFFER_STORE_DWORDX2_OFFSET_exact %14:vreg_64, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_dword
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: buffer_store_dword_dword
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY6]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_store_dword_32
+body: |
+ bb.0.entry:
+ liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-LABEL: name: buffer_store_dword_32
+ ; GCN: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr8
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr7
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY9:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY10:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY11:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY12:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY11]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY9]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GCN-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact killed [[REG_SEQUENCE1]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE2]], %subreg.sub0_sub1, [[COPY4]], %subreg.sub2
+ ; GCN-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE killed [[REG_SEQUENCE3]], %subreg.sub0_sub1_sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: BUFFER_STORE_DWORDX4_OFFSET_exact killed [[REG_SEQUENCE4]], [[REG_SEQUENCE]], 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s128), align 1, addrspace 4)
+ ; GCN-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_96 = REG_SEQUENCE killed [[REG_SEQUENCE5]], %subreg.sub0_sub1, [[COPY]], %subreg.sub2
+ ; GCN-NEXT: BUFFER_STORE_DWORDX3_OFFSET_exact killed [[REG_SEQUENCE6]], [[REG_SEQUENCE]], 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s96), align 1, addrspace 4)
+ %12:vgpr_32 = COPY $vgpr8
+ %11:vgpr_32 = COPY $vgpr7
+ %10:vgpr_32 = COPY $vgpr6
+ %9:vgpr_32 = COPY $vgpr5
+ %8:vgpr_32 = COPY $vgpr4
+ %7:vgpr_32 = COPY $vgpr3
+ %6:vgpr_32 = COPY $vgpr2
+ %5:vgpr_32 = COPY $vgpr1
+ %4:vgpr_32 = COPY $vgpr0
+ %3:sgpr_32 = COPY $sgpr3
+ %2:sgpr_32 = COPY $sgpr2
+ %1:sgpr_32 = COPY $sgpr1
+ %0:sgpr_32 = COPY $sgpr0
+ %13:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %13:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %5:vgpr_32, %13:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %6:vgpr_32, %13:sgpr_128, 0, 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %7:vgpr_32, %13:sgpr_128, 0, 20, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %8:vgpr_32, %13:sgpr_128, 0, 24, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %9:vgpr_32, %13:sgpr_128, 0, 28, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %10:vgpr_32, %13:sgpr_128, 0, 36, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %11:vgpr_32, %13:sgpr_128, 0, 40, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %12:vgpr_32, %13:sgpr_128, 0, 44, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_not_merged_swizzled_1
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_not_merged_swizzled_1
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzle
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzle
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %5:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 12, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %4:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_merge_across_swizzled_store
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_merge_across_swizzled_store
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[COPY4]], [[REG_SEQUENCE]], 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ BUFFER_STORE_DWORD_OFFSET_exact %4:vgpr_32, %5:sgpr_128, 0, 6, 0, 1, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 4)
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_IDXEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_IDXEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_IDXEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_IDXEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_idxen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_IDXEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_IDXEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub1_sub2
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dwordx3_bothen_exact
+body: |
+ bb.0.entry:
+
+ ; GCN-LABEL: name: buffer_load_dword_dwordx3_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX4_BOTHEN_exact:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_96 = COPY killed [[BUFFER_LOAD_DWORDX4_BOTHEN_exact]].sub1_sub2_sub3
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX3_BOTHEN_exact:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub0_sub1
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX3_BOTHEN_exact]].sub2
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY killed [[COPY5]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dword_dword_dword_bothen_exact_swizzled_0
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORD_BOTHEN_exact:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 4)
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub0
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY killed [[BUFFER_LOAD_DWORDX2_BOTHEN_exact]].sub1
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 4, 0, 1, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %8:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 8, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN_exact %4, %5:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vreg_64 = COPY $vgpr0
+ %5:vreg_64 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_bothen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_BOTHEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vreg_64 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_vaddr
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY4]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:vgpr_32 = COPY $vgpr0
+ %5:vgpr_32 = COPY $vgpr1
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %4, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
+---
+
+name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+body: |
+ bb.0.entry:
+ ; GCN-LABEL: name: buffer_load_dwordx2_dwordx2_idxen_exact_diff_srsrc
+ ; GCN: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr4
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE]], 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GCN-NEXT: [[BUFFER_LOAD_DWORDX2_IDXEN_exact1:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact [[COPY5]], [[REG_SEQUENCE1]], 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %0:sgpr_32 = COPY $sgpr0
+ %1:sgpr_32 = COPY $sgpr1
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_32 = COPY $sgpr4
+ %5:vgpr_32 = COPY $vgpr0
+ %6:sgpr_128 = REG_SEQUENCE %0:sgpr_32, %subreg.sub0, %1:sgpr_32, %subreg.sub1, %2:sgpr_32, %subreg.sub2, %3:sgpr_32, %subreg.sub3
+ %7:sgpr_128 = REG_SEQUENCE %1:sgpr_32, %subreg.sub0, %2:sgpr_32, %subreg.sub1, %3:sgpr_32, %subreg.sub2, %4:sgpr_32, %subreg.sub3
+ %8:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %6:sgpr_128, 0, 4, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ %9:vreg_64 = BUFFER_LOAD_DWORDX2_IDXEN_exact %5, %7:sgpr_128, 0, 12, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
index c86b5adec372..9766b427b432 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-tbuffer.mir
@@ -7,9 +7,37 @@
# GFX9 tests
#
+---
name: gfx9_tbuffer_load_x_xyz
body: |
bb.0.entry:
+ ; GFX9-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX9: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX9-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX9-NEXT: [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET:%[0-9]+]]:vreg_128 = TBUFFER_LOAD_FORMAT_XYZW_OFFSET [[REG_SEQUENCE]], 0, 4, 126, 0, 0, implicit $exec :: (dereferenceable load (s128), align 1, addrspace 4)
+ ; GFX9-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub0
+ ; GFX9-NEXT: [[COPY5:%[0-9]+]]:vreg_96 = COPY killed [[TBUFFER_LOAD_FORMAT_XYZW_OFFSET]].sub1_sub2_sub3
+ ;
+ ; GFX10-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX10: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX10-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX10-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX10-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
+ ;
+ ; GFX11-LABEL: name: gfx9_tbuffer_load_x_xyz
+ ; GFX11: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; GFX11-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; GFX11-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; GFX11-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_X_OFFSET:%[0-9]+]]:vgpr_32 = TBUFFER_LOAD_FORMAT_X_OFFSET [[REG_SEQUENCE]], 0, 4, 116, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
+ ; GFX11-NEXT: [[TBUFFER_LOAD_FORMAT_XYZ_OFFSET:%[0-9]+]]:vreg_96 = TBUFFER_LOAD_FORMAT_XYZ_OFFSET [[REG_SEQUENCE]], 0, 8, 125, 0, 0, implicit $exec :: (dereferenceable load (s96), align 1, addrspace 4)
%0:sgpr_32 = COPY $sgpr0
%1:sgpr_32 = COPY $sgpr1
%2:sgpr_32 = COPY $sgpr2
diff --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
index cbdc7bb45634..69971bca2738 100644
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -27,7 +27,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-LABEL: csr_vgpr_spill_fp_callee:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
@@ -43,7 +43,6 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; clobber csr v40
@@ -55,7 +54,7 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
bb:
@@ -88,7 +87,6 @@ define amdgpu_kernel void @kernel_call() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -148,7 +146,6 @@ define amdgpu_kernel void @kernel_tailcall() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
bb:
@@ -173,7 +170,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s24, s33
+; CHECK-NEXT: s_mov_b32 s18, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -188,7 +185,6 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v1, 1
; CHECK-NEXT: v_readlane_b32 s30, v1, 0
@@ -196,7 +192,7 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s24
+; CHECK-NEXT: s_mov_b32 s33, s18
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -208,7 +204,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-LABEL: caller_save_vgpr_spill_fp:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s25, s33
+; CHECK-NEXT: s_mov_b32 s19, s33
; CHECK-NEXT: s_mov_b32 s33, s32
; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], s33 ; 4-byte Folded Spill
@@ -223,7 +219,6 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_readlane_b32 s31, v2, 1
; CHECK-NEXT: v_readlane_b32 s30, v2, 0
@@ -231,7 +226,7 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
; CHECK-NEXT: buffer_load_dword v2, off, s[0:3], s33 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_add_i32 s32, s32, 0xfffffc00
-; CHECK-NEXT: s_mov_b32 s33, s25
+; CHECK-NEXT: s_mov_b32 s33, s19
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -263,7 +258,6 @@ define protected amdgpu_kernel void @kernel() {
; CHECK-NEXT: ; implicit-def: $sgpr15
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
index 3de258bb52a5..bf2cf6aeb990 100644
--- a/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
+++ b/llvm/test/CodeGen/AMDGPU/neighboring-mfma-padding.mir
@@ -5,6 +5,14 @@
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=75 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD75 %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx908 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx908-PAD100 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx90a-PAD100 %s
+
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-DEFAULT %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=50 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD50 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-mfma-padding-ratio=100 -verify-machineinstrs -run-pass post-RA-hazard-rec %s -o - | FileCheck -check-prefix=gfx940-PAD100 %s
+
---
name: mfma_padding_2_pass
body: |
@@ -31,6 +39,35 @@ body: |
; gfx908-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -64,6 +101,40 @@ body: |
; gfx908-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 0
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 0
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_NOP 0
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 0
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_1_intervening_valu
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 0
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -100,6 +171,41 @@ body: |
; gfx908-PAD100-NEXT: DBG_VALUE
; gfx908-PAD100-NEXT: S_NOP 1
; gfx908-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: DBG_VALUE
+ ; gfx90a-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: DBG_VALUE
+ ; gfx90a-PAD50-NEXT: S_NOP 0
+ ; gfx90a-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx90a-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: DBG_VALUE
+ ; gfx90a-PAD100-NEXT: S_NOP 1
+ ; gfx90a-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-DEFAULT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: DBG_VALUE
+ ; gfx940-DEFAULT-NEXT: S_NOP 1
+ ; gfx940-DEFAULT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD50: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: DBG_VALUE
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_2_pass_dbg
+ ; gfx940-PAD100: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: DBG_VALUE
+ ; gfx940-PAD100-NEXT: S_NOP 1
+ ; gfx940-PAD100-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
DBG_VALUE
$agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
@@ -132,6 +238,34 @@ body: |
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -172,6 +306,46 @@ body: |
; gfx908-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 1
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 1
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_8_pass_2_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -207,6 +381,36 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 7
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 7
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -258,6 +462,60 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 3
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 3
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 3
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 3
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_4_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 3
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -369,6 +627,126 @@ body: |
; gfx908-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_16_intervening_valu
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr4 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr5 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr6 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr7 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr8 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr10 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr11 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr12 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr13 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr14 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr15 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr16 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$vgpr2 = V_MOV_B32_e32 1, implicit $exec
$vgpr3 = V_MOV_B32_e32 1, implicit $exec
@@ -414,6 +792,30 @@ body: |
; gfx908-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
; gfx908-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx90a-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-DEFAULT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD50: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_occ_1
+ ; gfx940-PAD100: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
...
@@ -506,6 +908,108 @@ body: |
; gfx908-PAD100-NEXT: S_NOP 7
; gfx908-PAD100-NEXT: S_NOP 5
; gfx908-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-DEFAULT: bb.0:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.1:
+ ; gfx90a-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: {{ $}}
+ ; gfx90a-DEFAULT-NEXT: bb.2:
+ ; gfx90a-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD50: bb.0:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.1:
+ ; gfx90a-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: {{ $}}
+ ; gfx90a-PAD50-NEXT: bb.2:
+ ; gfx90a-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD50-NEXT: S_NOP 5
+ ; gfx90a-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx90a-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx90a-PAD100: bb.0:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.1:
+ ; gfx90a-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: {{ $}}
+ ; gfx90a-PAD100-NEXT: bb.2:
+ ; gfx90a-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx90a-PAD100-NEXT: S_NOP 7
+ ; gfx90a-PAD100-NEXT: S_NOP 5
+ ; gfx90a-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-DEFAULT-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-DEFAULT: bb.0:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-DEFAULT-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.1:
+ ; gfx940-DEFAULT-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: {{ $}}
+ ; gfx940-DEFAULT-NEXT: bb.2:
+ ; gfx940-DEFAULT-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-DEFAULT-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD50-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD50: bb.0:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD50-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.1:
+ ; gfx940-PAD50-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: {{ $}}
+ ; gfx940-PAD50-NEXT: bb.2:
+ ; gfx940-PAD50-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD50-NEXT: S_NOP 5
+ ; gfx940-PAD50-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ;
+ ; gfx940-PAD100-LABEL: name: mfma_padding_16_pass_2_preds
+ ; gfx940-PAD100: bb.0:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; gfx940-PAD100-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.1:
+ ; gfx940-PAD100-NEXT: successors: %bb.2(0x80000000)
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: {{ $}}
+ ; gfx940-PAD100-NEXT: bb.2:
+ ; gfx940-PAD100-NEXT: $vgpr3 = V_MOV_B32_e32 1, implicit $exec
+ ; gfx940-PAD100-NEXT: S_NOP 7
+ ; gfx940-PAD100-NEXT: S_NOP 5
+ ; gfx940-PAD100-NEXT: early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
bb.0:
$agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
diff --git a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
index 34e67d0993fb..9999cb9173b5 100644
--- a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
@@ -32,7 +32,6 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
; CHECK-NEXT: s_mov_b64 s[20:21], s[0:1]
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: .Ltmp1:
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
index a70488a00db7..a030f86da1b6 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll
@@ -1,17 +1,20 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,HSA,ASM %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -filetype=obj < %s | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,NON-HSA,OBJ %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=1 -asm-verbose=0 < %s | llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx940 -filetype=obj | llvm-objdump --arch=amdgcn --mcpu=gfx940 --disassemble - | FileCheck -check-prefixes=GCN,HSA,OBJ %s
; GCN: preload_kernarg_header
; HSA: s_trap 2
; NON-HSA: s_endpgm
-; GCN-COUNT-63: s_nop 0
+; ASM: .fill 63, 4, 0xbf800000 ; s_nop 0
+; OBJ-COUNT-63: s_nop 0
define amdgpu_kernel void @preload_kernarg_header(ptr %arg) {
store ptr %arg, ptr %arg
ret void
}
; GCN: non_kernel_function
+; GCN-NOT: s_trap 2
; GCN-NOT: s_nop 0
; GCN: flat_store
define void @non_kernel_function(ptr %arg) {
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
index e7488e059ee9..20edbd6c0d0f 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs-inreg-hints.ll
@@ -157,27 +157,27 @@ define amdgpu_kernel void @test_preload_hint_kernel_1_call_func(ptr %0) #0 {
define amdgpu_kernel void @test_preload_hint_kernel_1_call_intrinsic(i16 %0) #0 {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR2]] {
+; NO-PRELOAD-SAME: (i16 [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; NO-PRELOAD-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-1-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-1-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-3-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-3-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-16-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-16-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_1_call_intrinsic
-; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR2]] {
+; PRELOAD-20-SAME: (i16 inreg [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] {
; PRELOAD-20-NEXT: call void @llvm.amdgcn.set.prio(i16 [[TMP0]])
; PRELOAD-20-NEXT: ret void
;
@@ -235,23 +235,23 @@ define amdgpu_kernel void @test_preload_hint_kernel_2_preexisting(i32 inreg %0,
define amdgpu_kernel void @test_preload_hint_kernel_incompatible_attributes(ptr addrspace(4) byref(i32) %0, ptr nest %1) {
; NO-PRELOAD-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; NO-PRELOAD-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; NO-PRELOAD-NEXT: ret void
;
; PRELOAD-1-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-1-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-1-NEXT: ret void
;
; PRELOAD-3-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-3-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-3-NEXT: ret void
;
; PRELOAD-16-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-16-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-16-NEXT: ret void
;
; PRELOAD-20-LABEL: define {{[^@]+}}@test_preload_hint_kernel_incompatible_attributes
-; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
+; PRELOAD-20-SAME: (ptr addrspace(4) byref(i32) [[TMP0:%.*]], ptr nest [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
; PRELOAD-20-NEXT: ret void
;
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
index d20c3a4007ff..f0e709b5a172 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll
@@ -24,70 +24,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -98,70 +36,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -170,70 +46,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -242,70 +56,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -325,70 +77,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -399,70 +89,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -471,70 +99,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -543,70 +109,8 @@ define amdgpu_kernel void @ptr1_i8(ptr addrspace(1) %out, i8 %arg0) {
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -631,70 +135,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -705,70 +147,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -778,70 +158,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -851,70 +169,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -935,70 +191,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1009,70 +203,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -1082,70 +214,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -1155,70 +225,8 @@ define amdgpu_kernel void @ptr1_i8_zext_arg(ptr addrspace(1) %out, i8 zeroext %a
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i8_zext_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_mov_b32 s0, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -1244,70 +252,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1318,70 +264,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1390,70 +274,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1462,70 +284,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_and_b32 s0, s4, 0xffff
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1545,70 +305,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1619,70 +317,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -1691,70 +327,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -1763,70 +337,8 @@ define amdgpu_kernel void @ptr1_i16_preload_arg(ptr addrspace(1) %out, i16 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s0, s8, 0xffff
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -1850,70 +362,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -1923,70 +373,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s4
@@ -1994,70 +382,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s4
@@ -2065,70 +391,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s4
@@ -2146,70 +410,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -2219,70 +421,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s8
@@ -2290,70 +430,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s8
@@ -2361,70 +439,8 @@ define amdgpu_kernel void @ptr1_i32_preload_arg(ptr addrspace(1) %out, i32 %arg0
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s8
@@ -2449,70 +465,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s3, s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
@@ -2524,70 +478,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x10
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2598,70 +490,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2670,70 +500,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_add_i32 s0, s2, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -2754,70 +522,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s2, s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
@@ -2829,70 +535,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -2903,70 +547,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -2975,70 +557,8 @@ define amdgpu_kernel void @i32_ptr1_i32_preload_arg(i32 %arg0, ptr addrspace(1)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i32_ptr1_i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_add_i32 s0, s6, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -3065,70 +585,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3141,70 +599,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-2-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3217,70 +613,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-4-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3291,70 +625,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 16
; GFX940-PRELOAD-8-NEXT: s_and_b32 s1, s4, 0xffff
@@ -3378,70 +650,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3454,70 +664,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-2-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3530,70 +678,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-4-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3604,70 +690,8 @@ define amdgpu_kernel void @ptr1_i16_i16_preload_arg(ptr addrspace(1) %out, i16 %
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_i16_i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 16
; GFX90a-PRELOAD-8-NEXT: s_and_b32 s1, s8, 0xffff
@@ -3695,70 +719,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dword s0, s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -3768,70 +730,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3841,70 +741,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3914,70 +752,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -3997,70 +773,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dword s0, s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4070,70 +784,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4143,70 +795,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4216,70 +806,8 @@ define amdgpu_kernel void @ptr1_v2i8_preload_arg(ptr addrspace(1) %out, <2 x i8>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: ptr1_v2i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -4308,70 +836,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4385,70 +851,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4462,70 +866,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4539,70 +881,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x100
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4630,70 +910,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -4707,70 +925,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
@@ -4784,70 +940,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
@@ -4861,70 +955,8 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) %out, ptr addrspac
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: byref_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x100
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
@@ -4964,70 +996,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i32_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5046,70 +1016,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i32_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5128,70 +1036,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i32_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5210,70 +1056,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i32_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x20
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5311,70 +1095,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v4, 0
@@ -5393,70 +1115,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v4, 0
@@ -5475,70 +1135,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v4, 0
@@ -5557,70 +1155,8 @@ define amdgpu_kernel void @v8i32_arg(ptr addrspace(1) nocapture %out, <8 x i32>
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i32_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x20
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v4, 0
@@ -5654,70 +1190,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -5729,70 +1203,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s5
@@ -5802,70 +1214,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s5
@@ -5875,70 +1225,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s5
@@ -5959,70 +1247,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -6034,70 +1260,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s9
@@ -6107,70 +1271,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s9
@@ -6180,70 +1282,8 @@ define amdgpu_kernel void @v3i16_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i16_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s9
@@ -6269,70 +1309,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6344,70 +1322,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s7
@@ -6417,70 +1333,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s7
@@ -6490,70 +1344,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s7
@@ -6575,70 +1367,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6650,70 +1380,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v1, s11
@@ -6723,70 +1391,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v1, s11
@@ -6796,70 +1402,8 @@ define amdgpu_kernel void @v3i32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3i32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v1, s11
@@ -6885,70 +1429,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x10
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -6960,70 +1442,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s6
@@ -7033,70 +1453,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s6
@@ -7106,70 +1464,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s6
@@ -7191,70 +1487,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v3, 0
@@ -7266,70 +1500,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v0, s10
@@ -7339,70 +1511,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v0, s10
@@ -7412,70 +1522,8 @@ define amdgpu_kernel void @v3f32_preload_arg(ptr addrspace(1) nocapture %out, <3
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v3f32_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v3, 0
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v0, s10
@@ -7500,70 +1548,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7575,70 +1561,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7655,70 +1579,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7735,70 +1597,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s4, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7826,70 +1626,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v0, 0
@@ -7901,70 +1639,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -7981,70 +1657,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8061,70 +1675,8 @@ define amdgpu_kernel void @v5i8_preload_arg(ptr addrspace(1) nocapture %out, <5
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s8, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -8167,70 +1719,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v5f64_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8252,70 +1742,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v5f64_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8337,70 +1765,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v5f64_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-4-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8422,70 +1788,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v5f64_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x60
; GFX940-PRELOAD-8-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x40
@@ -8529,70 +1833,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8614,70 +1856,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-2-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8699,70 +1879,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-4-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8784,70 +1902,8 @@ define amdgpu_kernel void @v5f64_arg(ptr addrspace(1) nocapture %out, <5 x doubl
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v5f64_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x60
; GFX90a-PRELOAD-8-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x40
@@ -8882,70 +1938,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -8955,70 +1949,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9042,70 +1974,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9129,70 +1999,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: s_lshr_b32 s0, s5, 8
; GFX940-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9225,70 +2033,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9298,70 +2044,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-2-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9384,70 +2068,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-4-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9470,70 +2092,8 @@ define amdgpu_kernel void @v8i8_preload_arg(ptr addrspace(1) %out, <8 x i8> %in)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: v8i8_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: s_lshr_b32 s0, s9, 8
; GFX90a-PRELOAD-8-NEXT: v_lshlrev_b16_e64 v0, 8, s0
@@ -9570,70 +2130,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9643,70 +2141,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9714,70 +2150,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9785,70 +2159,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -9866,70 +2178,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -9939,70 +2189,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10010,70 +2198,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10081,70 +2207,8 @@ define amdgpu_kernel void @i64_kernel_preload_arg(ptr addrspace(1) %out, i64 %a)
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: i64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10166,70 +2230,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-NO-PRELOAD-NEXT: s_endpgm
;
; GFX940-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
-; GFX940-PRELOAD-1-NEXT: s_nop 0
+; GFX940-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-1-NEXT: ; %bb.0:
; GFX940-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
; GFX940-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10239,70 +2241,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-1-NEXT: s_endpgm
;
; GFX940-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
-; GFX940-PRELOAD-2-NEXT: s_nop 0
+; GFX940-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-2-NEXT: ; %bb.0:
; GFX940-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-2-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10310,70 +2250,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-2-NEXT: s_endpgm
;
; GFX940-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
-; GFX940-PRELOAD-4-NEXT: s_nop 0
+; GFX940-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-4-NEXT: ; %bb.0:
; GFX940-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-4-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10381,70 +2259,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX940-PRELOAD-4-NEXT: s_endpgm
;
; GFX940-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX940-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
-; GFX940-PRELOAD-8-NEXT: s_nop 0
+; GFX940-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX940-PRELOAD-8-NEXT: ; %bb.0:
; GFX940-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX940-PRELOAD-8-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
@@ -10462,70 +2278,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-NO-PRELOAD-NEXT: s_endpgm
;
; GFX90a-PRELOAD-1-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-1: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
-; GFX90a-PRELOAD-1-NEXT: s_nop 0
+; GFX90a-PRELOAD-1: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-1-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-1-NEXT: ; %bb.0:
; GFX90a-PRELOAD-1-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
; GFX90a-PRELOAD-1-NEXT: v_mov_b32_e32 v2, 0
@@ -10535,70 +2289,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-1-NEXT: s_endpgm
;
; GFX90a-PRELOAD-2-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-2: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
-; GFX90a-PRELOAD-2-NEXT: s_nop 0
+; GFX90a-PRELOAD-2: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-2-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-2-NEXT: ; %bb.0:
; GFX90a-PRELOAD-2-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-2-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10606,70 +2298,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-2-NEXT: s_endpgm
;
; GFX90a-PRELOAD-4-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-4: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
-; GFX90a-PRELOAD-4-NEXT: s_nop 0
+; GFX90a-PRELOAD-4: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-4-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-4-NEXT: ; %bb.0:
; GFX90a-PRELOAD-4-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-4-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
@@ -10677,70 +2307,8 @@ define amdgpu_kernel void @f64_kernel_preload_arg(ptr addrspace(1) %out, double
; GFX90a-PRELOAD-4-NEXT: s_endpgm
;
; GFX90a-PRELOAD-8-LABEL: f64_kernel_preload_arg:
-; GFX90a-PRELOAD-8: s_trap 2 ; Trap with incompatible firmware that doesn't support preloading kernel arguments.
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
-; GFX90a-PRELOAD-8-NEXT: s_nop 0
+; GFX90a-PRELOAD-8: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-8-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
; GFX90a-PRELOAD-8-NEXT: ; %bb.0:
; GFX90a-PRELOAD-8-NEXT: v_mov_b32_e32 v2, 0
; GFX90a-PRELOAD-8-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
index d92ba7774bd3..d070dc3b770f 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
@@ -203,13 +203,13 @@ attributes #5 = { "amdgpu-flat-work-group-size"="128,512" }
attributes #6 = { "amdgpu-flat-work-group-size"="512,512" }
attributes #7 = { "amdgpu-flat-work-group-size"="64,256" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
index 2df219bd0401..f62f1d57aec8 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-waves-per-eu.ll
@@ -399,26 +399,26 @@ attributes #17 = { "amdgpu-waves-per-eu"="5,8" }
attributes #18 = { "amdgpu-waves-per-eu"="9,10" }
attributes #19 = { "amdgpu-waves-per-eu"="8,9" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,4" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,1" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,2" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR10]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR11]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="0,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR12]] = { "amdgpu-flat-work-group-size"="1,64" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="1,123" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR13]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="2,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR14]] = { "amdgpu-flat-work-group-size"="1,512" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="3,6" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR15]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR16]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="6,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR17]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="5,5" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR18]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,8" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR19]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR20]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="9,9" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR21]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="8,9" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
index 2ccc24152a9f..fdfc9b043cc9 100644
--- a/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
+++ b/llvm/test/CodeGen/AMDGPU/ran-out-of-sgprs-allocation-failure.mir
@@ -24,6 +24,7 @@ registers:
- { id: 10, class: sreg_64_xexec, preferred-register: '$vcc' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
maxKernArgAlign: 1
diff --git a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
index eaef63bbfc3c..c1d647c5d3b9 100644
--- a/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
+++ b/llvm/test/CodeGen/AMDGPU/recursive_global_initializer.ll
@@ -19,5 +19,5 @@ define void @hoge() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
index 297a056526ca..384a9c4043a1 100644
--- a/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/remove-no-kernel-id-attribute.ll
@@ -191,11 +191,11 @@ define amdgpu_kernel void @kernel_lds_recursion() {
!1 = !{i32 1, !"amdhsa_code_object_version", i32 400}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "amdgpu-lds-size"="4" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-lds-size"="2" "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
; CHECK: attributes #[[ATTR6:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
index c0d199920bd9..09037709d51d 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
@@ -181,6 +181,8 @@ legalized: false
regBankSelected: false
selected: false
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins:
- { reg: '$vgpr0', virtual-reg: '%0' }
- { reg: '$vgpr1', virtual-reg: '%1' }
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
index efbdbca9da6b..c6ccbd99bf89 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
@@ -78,6 +78,7 @@
name: sgpr_spill_wrong_stack_id
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
index 764f4942cbd0..f523b4a2495f 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
@@ -16,7 +16,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-LABEL: spill_sgpr_with_no_lower_vgpr_available:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Spill
@@ -150,7 +150,6 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v255, 1
@@ -270,7 +269,7 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
; GCN-NEXT: buffer_load_dword v255, off, s[0:3], s33 offset:448 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -311,7 +310,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-LABEL: spill_to_lowest_available_vgpr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_or_saveexec_b64 s[16:17], -1
; GCN-NEXT: buffer_store_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -444,7 +443,6 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: v_readlane_b32 s31, v254, 1
@@ -563,7 +561,7 @@ define void @spill_to_lowest_available_vgpr() #0 {
; GCN-NEXT: buffer_load_dword v254, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_mov_b64 exec, s[4:5]
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, align 4, addrspace(5)
@@ -1530,7 +1528,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-LABEL: spill_sgpr_no_free_vgpr_ipra:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s24, s33
+; GCN-NEXT: s_mov_b32 s18, s33
; GCN-NEXT: s_mov_b32 s33, s32
; GCN-NEXT: s_add_i32 s32, s32, 0x7400
; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Spill
@@ -1668,7 +1666,6 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-NEXT: s_mov_b64 s[4:5], exec
; GCN-NEXT: s_mov_b64 exec, 1
@@ -1801,7 +1798,7 @@ define void @spill_sgpr_no_free_vgpr_ipra() #0 {
; GCN-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:440 ; 4-byte Folded Reload
; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:444 ; 4-byte Folded Reload
; GCN-NEXT: s_add_i32 s32, s32, 0xffff8c00
-; GCN-NEXT: s_mov_b32 s33, s24
+; GCN-NEXT: s_mov_b32 s33, s18
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: s_setpc_b64 s[30:31]
call void @child_function_ipra()
diff --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index f229f33664e1..539cfc71a80f 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -73,7 +73,7 @@ define amdgpu_kernel void @test_simple_indirect_call() {
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
index 355829825146..f8ec6bb5d943 100644
--- a/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
+++ b/llvm/test/CodeGen/AMDGPU/snippet-copy-bundle-regression.mir
@@ -21,6 +21,7 @@
name: kernel
tracksRegLiveness: true
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
index b8bc01e0b879..c6a599094fe4 100644
--- a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
@@ -916,13 +916,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-O0-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-O0-NEXT: s_mov_b32 s24, s0
-; WAVE32-O0-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-O0-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-O0-NEXT: s_mov_b32 s20, s0
+; WAVE32-O0-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-O0-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-O0-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-O0-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-O0-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-O0-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-O0-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
; WAVE32-O0-NEXT: s_mov_b32 s14, s8
; WAVE32-O0-NEXT: s_mov_b32 s13, s7
@@ -934,17 +934,17 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 0
; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:128 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:128 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-O0-NEXT: s_mov_b32 s6, s32
; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-O0-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1018,11 +1018,10 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s20, -1
-; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s20
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[20:23], 0 offset:128 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s1, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s0, v0, 0
@@ -1137,7 +1136,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:128 ; 4-byte Folded Reload
@@ -1155,13 +1153,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s32, 0x1200
-; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s0
-; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE32-WWM-PREALLOC-NEXT: s_getpc_b64 s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s20, s0
+; WAVE32-WWM-PREALLOC-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s27, 21
-; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s24, s24, s9
-; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s25, s25, 0
+; WAVE32-WWM-PREALLOC-NEXT: s_bitset0_b32 s23, 21
+; WAVE32-WWM-PREALLOC-NEXT: s_add_u32 s20, s20, s9
+; WAVE32-WWM-PREALLOC-NEXT: s_addc_u32 s21, s21, 0
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $vgpr32 : SGPR spill to VGPR lane
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s14, s8
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s13, s7
@@ -1174,13 +1172,13 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: s_lshr_b32 s0, s0, 5
; WAVE32-WWM-PREALLOC-NEXT: v_writelane_b32 v32, s0, 1
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 42
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], 0
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], 0
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt_vscnt null, 0x0
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[24:25]
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[26:27]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[0:1], s[20:21]
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b64 s[2:3], s[22:23]
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, s32
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v3, 17
-; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[24:27], s6 offset:4
+; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v3, off, s[20:23], s6 offset:4
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s6, stack_passed_argument@abs32@hi
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
; WAVE32-WWM-PREALLOC-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
@@ -1254,7 +1252,6 @@ define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s1, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s0, v32, 0
@@ -1347,7 +1344,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-O0: ; %bb.0:
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-O0-NEXT: s_mov_b32 s26, s33
+; WAVE32-O0-NEXT: s_mov_b32 s25, s33
; WAVE32-O0-NEXT: s_mov_b32 s33, s32
; WAVE32-O0-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1361,9 +1358,9 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 0
; WAVE32-O0-NEXT: s_lshr_b32 s16, s16, 5
; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 1
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 42
; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33
; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
@@ -1440,11 +1437,10 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
-; WAVE32-O0-NEXT: s_or_saveexec_b32 s25, -1
+; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
-; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s25
+; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: v_readlane_b32 s5, v0, 1
; WAVE32-O0-NEXT: v_readlane_b32 s4, v0, 0
@@ -1460,14 +1456,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-O0-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-O0-NEXT: s_mov_b32 s33, s26
+; WAVE32-O0-NEXT: s_mov_b32 s33, s25
; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE64-O0: ; %bb.0:
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE64-O0-NEXT: s_mov_b32 s28, s33
+; WAVE64-O0-NEXT: s_mov_b32 s19, s33
; WAVE64-O0-NEXT: s_mov_b32 s33, s32
; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[16:17], -1
; WAVE64-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1560,7 +1556,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
-; WAVE64-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE64-O0-NEXT: s_or_saveexec_b64 s[26:27], -1
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
@@ -1580,14 +1575,14 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
; WAVE64-O0-NEXT: s_add_i32 s32, s32, 0xffffdc00
-; WAVE64-O0-NEXT: s_mov_b32 s33, s28
+; WAVE64-O0-NEXT: s_mov_b32 s33, s19
; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
;
; WAVE32-WWM-PREALLOC-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
; WAVE32-WWM-PREALLOC: ; %bb.0:
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s25, s33
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s24, s33
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s32
; WAVE32-WWM-PREALLOC-NEXT: s_xor_saveexec_b32 s16, -1
; WAVE32-WWM-PREALLOC-NEXT: buffer_store_dword v33, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
@@ -1677,7 +1672,6 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v29, s18
; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18
; WAVE32-WWM-PREALLOC-NEXT: v_mov_b32_e32 v30, s18
-; WAVE32-WWM-PREALLOC-NEXT: ; implicit-def: $sgpr18_sgpr19
; WAVE32-WWM-PREALLOC-NEXT: s_swappc_b64 s[30:31], s[16:17]
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s5, v32, 1
; WAVE32-WWM-PREALLOC-NEXT: v_readlane_b32 s4, v32, 0
@@ -1693,7 +1687,7 @@ define void @func_stacksave_stackrestore_call_with_stack_objects() {
; WAVE32-WWM-PREALLOC-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 exec_lo, s4
; WAVE32-WWM-PREALLOC-NEXT: s_add_i32 s32, s32, 0xffffee00
-; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s25
+; WAVE32-WWM-PREALLOC-NEXT: s_mov_b32 s33, s24
; WAVE32-WWM-PREALLOC-NEXT: s_waitcnt vmcnt(0)
; WAVE32-WWM-PREALLOC-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca [32 x i32], addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
index 8d5dc7943164..049db01badac 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
@@ -31,6 +31,6 @@ define amdgpu_kernel void @kernel1() #1 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
index 7a6f82d589e6..c9387f196dff 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
@@ -98,7 +98,7 @@ define amdgpu_kernel void @kernel2() #0 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
; CHECK: attributes #[[ATTR0]] = { "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
index c04154c7c23f..7183da2c5efc 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
@@ -41,6 +41,6 @@ define amdgpu_kernel void @kernel3() #2 {
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
index 2d5ff045d12e..6ed04cf63d20 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
@@ -41,7 +41,7 @@ define amdgpu_kernel void @kernel2() #2 {
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
index e8bf6fc8321b..d5ba2fd617c6 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-propagate-attribute.ll
@@ -52,8 +52,8 @@ attributes #0 = { nounwind }
attributes #1 = { "uniform-work-group-size"="false" }
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { nounwind "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR3]] = { "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
index 473eea4eedce..7f0dfeaf75c8 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
@@ -101,7 +101,7 @@ define amdgpu_kernel void @kernel(ptr addrspace(1) %m) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { nounwind memory(none) "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
index 221f1a11676f..8616c73ad51c 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
@@ -61,6 +61,6 @@ define amdgpu_kernel void @kernel3() #0 {
attributes #0 = { "uniform-work-group-size"="false" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
index 717d3d975aaf..040799435db4 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-agpr-limit-gfx90a.ll
@@ -540,6 +540,7 @@ define internal void @use512vgprs() {
}
define void @foo() #0 {
+ call void asm sideeffect "; use $0", "a"(i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
index d2364a61ed68..bfc249e9081d 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll
@@ -233,10 +233,10 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %49:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %51:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %53:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %55:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %47:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.0, undef %49:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %51:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %53:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -249,8 +249,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %57:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
- ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %59:vgpr_32, %bb.4, [[PHI1]], %bb.2
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %55:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI undef %57:vgpr_32, %bb.4, [[PHI1]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -286,8 +286,8 @@ define amdgpu_ps float @loop(i32 %z, float %v, i32 inreg %bound, ptr %extern_fun
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %61:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
- ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %63:vgpr_32, %bb.8, [[COPY4]], %bb.6
+ ; SI-NEXT: [[PHI6:%[0-9]+]]:vreg_64 = PHI undef %59:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI undef %61:vgpr_32, %bb.8, [[COPY4]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI6]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
@@ -356,9 +356,9 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.1.Flow:
; SI-NEXT: successors: %bb.2(0x40000000), %bb.10(0x40000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %50:vgpr_32, %bb.0, %4, %bb.9
- ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %52:vgpr_32, %bb.9
- ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %54:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI undef %48:vgpr_32, %bb.0, %4, %bb.9
+ ; SI-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.0, undef %50:vgpr_32, %bb.9
+ ; SI-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI [[COPY2]], %bb.0, undef %52:vgpr_32, %bb.9
; SI-NEXT: [[SI_ELSE:%[0-9]+]]:sreg_32 = SI_ELSE killed [[SI_IF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
; SI-NEXT: S_BRANCH %bb.2
; SI-NEXT: {{ $}}
@@ -371,7 +371,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.3:
; SI-NEXT: successors: %bb.4(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
+ ; SI-NEXT: [[PHI3:%[0-9]+]]:vreg_64 = PHI undef %54:vreg_64, %bb.4, [[REG_SEQUENCE]], %bb.2
; SI-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI3]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_1]], %subreg.sub1
@@ -407,7 +407,7 @@ define amdgpu_ps float @loop_with_use(i32 %z, float %v, i32 inreg %bound, ptr %e
; SI-NEXT: bb.7:
; SI-NEXT: successors: %bb.8(0x80000000)
; SI-NEXT: {{ $}}
- ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %58:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
+ ; SI-NEXT: [[PHI4:%[0-9]+]]:vreg_64 = PHI undef %56:vreg_64, %bb.8, [[REG_SEQUENCE2]], %bb.6
; SI-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub0, implicit $exec
; SI-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[PHI4]].sub1, implicit $exec
; SI-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_64 = REG_SEQUENCE killed [[V_READFIRSTLANE_B32_2]], %subreg.sub0, killed [[V_READFIRSTLANE_B32_3]], %subreg.sub1
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
index 37f207fc7a54..4939d52651d9 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr_constant_to_sgpr.ll
@@ -47,7 +47,6 @@ define protected amdgpu_kernel void @kern(ptr %addr) !llvm.amdgcn.lds.kernel.id
; CHECK-NEXT: s_mov_b32 s15, 42
; CHECK-NEXT: s_mov_b64 s[0:1], s[20:21]
; CHECK-NEXT: s_mov_b64 s[2:3], s[22:23]
-; CHECK-NEXT: ; implicit-def: $sgpr18_sgpr19
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
index 3d9db687ffa1..6659e9532376 100644
--- a/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/virtregrewrite-undef-identity-copy.mir
@@ -20,6 +20,7 @@ name: undef_identity_copy
tracksRegLiveness: true
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
machineFunctionInfo:
isEntryFunction: true
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 82816b4564e8..901e88a4c6ac 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -2479,8 +2479,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1
; GFX1032-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX1032-NEXT: v_mul_lo_u32 v2, s1, v1
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_mul_hi_u32 v2, v1, v2
; GFX1032-NEXT: v_add_nc_u32_e32 v1, v1, v2
; GFX1032-NEXT: v_mul_hi_u32 v1, v0, v1
@@ -2494,8 +2493,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1032-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2529,10 +2527,7 @@ define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
@@ -2576,9 +2571,8 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: v_div_scale_f32 v1, s1, s0, s0, v0
; GFX1032-NEXT: v_div_scale_f32 v4, vcc_lo, v0, s0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s1, 0x80000000
+; GFX1032-NEXT: s_brev_b32 s1, 1
; GFX1032-NEXT: v_rcp_f32_e32 v2, v1
-; GFX1032-NEXT: s_add_i32 s1, s1, 32
; GFX1032-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1032-NEXT: v_fmac_f32_e32 v2, v3, v2
; GFX1032-NEXT: v_mul_f32_e32 v3, v4, v2
@@ -2592,8 +2586,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1032-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: s_lshr_b32 s0, vcc_lo, 1
; GFX1032-NEXT: v_cmp_nlg_f32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1032-NEXT: s_min_u32 s0, s0, s1
+; GFX1032-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1032-NEXT: s_cmp_gt_u32 s0, 9
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
; GFX1032-NEXT: s_and_b32 s0, vcc_lo, s0
@@ -2609,15 +2602,15 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
; GFX1064-NEXT: v_div_scale_f32 v1, s[0:1], s2, s2, v0
-; GFX1064-NEXT: v_div_scale_f32 v4, vcc, v0, s2, v0
; GFX1064-NEXT: v_rcp_f32_e32 v2, v1
; GFX1064-NEXT: v_fma_f32 v3, -v1, v2, 1.0
; GFX1064-NEXT: v_fmac_f32_e32 v2, v3, v2
-; GFX1064-NEXT: v_mul_f32_e32 v3, v4, v2
-; GFX1064-NEXT: v_fma_f32 v5, -v1, v3, v4
-; GFX1064-NEXT: v_fmac_f32_e32 v3, v5, v2
-; GFX1064-NEXT: v_fma_f32 v1, -v1, v3, v4
-; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v3
+; GFX1064-NEXT: v_div_scale_f32 v3, vcc, v0, s2, v0
+; GFX1064-NEXT: v_mul_f32_e32 v4, v3, v2
+; GFX1064-NEXT: v_fma_f32 v5, -v1, v4, v3
+; GFX1064-NEXT: v_fmac_f32_e32 v4, v5, v2
+; GFX1064-NEXT: v_fma_f32 v1, -v1, v4, v3
+; GFX1064-NEXT: v_div_fmas_f32 v1, v1, v2, v4
; GFX1064-NEXT: v_div_fixup_f32 v1, v1, s2, v0
; GFX1064-NEXT: v_trunc_f32_e32 v1, v1
; GFX1064-NEXT: v_fma_f32 v0, -v1, s2, v0
@@ -2625,10 +2618,7 @@ define amdgpu_kernel void @fcmp64(float %n, float %s) {
; GFX1064-NEXT: s_lshr_b64 s[0:1], vcc, 1
; GFX1064-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
; GFX1064-NEXT: s_bitset1_b32 s1, 31
-; GFX1064-NEXT: s_ff1_i32_b32 s0, s0
-; GFX1064-NEXT: s_ff1_i32_b32 s1, s1
-; GFX1064-NEXT: s_add_i32 s1, s1, 32
-; GFX1064-NEXT: s_min_u32 s0, s0, s1
+; GFX1064-NEXT: s_ff1_i32_b64 s0, s[0:1]
; GFX1064-NEXT: s_cmp_gt_u32 s0, 9
; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX1064-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
index 3a33194f17c8..7eabe982ff2b 100644
--- a/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll
@@ -101,7 +101,6 @@ define void @test() #0 {
; GCN-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
; GCN-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
; GCN-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
-; GCN-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GCN-O0-NEXT: s_waitcnt lgkmcnt(0)
; GCN-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GCN-O0-NEXT: s_or_saveexec_b64 s[28:29], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index 11f6a2960776..e79cb66dcd77 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -406,7 +406,6 @@ define amdgpu_gfx void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[44:45]
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[46:47]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
-; GFX9-O0-NEXT: ; implicit-def: $sgpr44_sgpr45
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[42:43]
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
@@ -633,7 +632,6 @@ define amdgpu_gfx void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 i
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[38:39]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
-; GFX9-O0-NEXT: ; implicit-def: $sgpr36_sgpr37
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[34:35]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[46:47], -1
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
index e5cebc1c3183..def51f2b16d3 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved.ll
@@ -413,7 +413,6 @@ define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -657,7 +656,6 @@ define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %ar
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
@@ -1285,7 +1283,6 @@ define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 in
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
@@ -1529,7 +1526,6 @@ define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i6
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-O0-NEXT: ; implicit-def: $sgpr18_sgpr19
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
diff --git a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 365727c9dd27..0795525fba1b 100644
--- a/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -8,10 +8,8 @@
%struct.Foo = type { ptr }
-; ARM-LABEL: foo:
-; THUMB-LABEL: foo:
-; T2-LABEL: foo:
define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
+; ARM-LABEL: foo:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: add r2, r0, #4
; ARM-NEXT: mov r12, #1
@@ -44,6 +42,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; ARM-NEXT: add r0, r0, r1, lsl #2
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: foo:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: .save {r4, r5, r7, lr}
; THUMB-NEXT: push {r4, r5, r7, lr}
@@ -91,6 +90,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; THUMB-NEXT: pop {r0}
; THUMB-NEXT: bx r0
;
+; T2-LABEL: foo:
; T2: @ %bb.0: @ %entry
; T2-NEXT: adds r2, r0, #4
; T2-NEXT: mov.w r12, #1
@@ -125,6 +125,7 @@ define ptr @foo(ptr %this, i32 %acc) nounwind readonly align 2 {
; T2-NEXT: add.w r0, r0, r1, lsl #2
; T2-NEXT: bx lr
;
+; V8-LABEL: foo:
; V8: @ %bb.0: @ %entry
; V8-NEXT: adds r2, r0, #4
; V8-NEXT: mov.w r12, #1
@@ -210,11 +211,8 @@ sw.epilog: ; preds = %tailrecurse.switch
%struct.S = type { ptr, [1 x i8] }
-; ARM-LABEL: bar:
-; THUMB-LABEL: bar:
-; T2-LABEL: bar:
-; V8-LABEL: bar:
define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
+; ARM-LABEL: bar:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r2, [r0, #4]
; ARM-NEXT: ands r2, r2, #112
@@ -230,6 +228,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; ARM-NEXT: mov r0, #1
; ARM-NEXT: mov pc, lr
;
+; THUMB-LABEL: bar:
; THUMB: @ %bb.0: @ %entry
; THUMB-NEXT: ldrb r2, [r0, #4]
; THUMB-NEXT: movs r3, #112
@@ -253,6 +252,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; THUMB-NEXT: ands r0, r1
; THUMB-NEXT: bx lr
;
+; T2-LABEL: bar:
; T2: @ %bb.0: @ %entry
; T2-NEXT: ldrb r2, [r0, #4]
; T2-NEXT: ands r2, r2, #112
@@ -270,6 +270,7 @@ define internal zeroext i8 @bar(ptr %x, ptr nocapture %y) nounwind readonly {
; T2-NEXT: movs r0, #1
; T2-NEXT: bx lr
;
+; V8-LABEL: bar:
; V8: @ %bb.0: @ %entry
; V8-NEXT: ldrb r2, [r0, #4]
; V8-NEXT: ands r2, r2, #112
diff --git a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
index 5c59566247d8..b4bbb9be8ae4 100644
--- a/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
+++ b/llvm/test/CodeGen/ARM/no-register-coalescing-in-returnsTwice.mir
@@ -86,6 +86,8 @@
---
name: main
exposesReturnsTwice: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: P0, size: 80, alignment: 8, local-offset: -80 }
- { id: 1, name: jb1, size: 160, alignment: 8, local-offset: -240 }
diff --git a/llvm/test/CodeGen/ARM/select.ll b/llvm/test/CodeGen/ARM/select.ll
index 4bb79651f040..24ca9aeac7f2 100644
--- a/llvm/test/CodeGen/ARM/select.ll
+++ b/llvm/test/CodeGen/ARM/select.ll
@@ -1,14 +1,25 @@
-; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
-
-; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-VFP
-
-; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - \
-; RUN: | FileCheck %s --check-prefix=CHECK-NEON
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=armv7-eabi -mattr=-fpregs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ARM
+; RUN: llc -mtriple=armv7-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-VFP
+; RUN: llc -mtriple=thumbv7-apple-darwin -mattr=+neon,+thumb2 %s -o - | FileCheck %s --check-prefix=CHECK-NEON
define i32 @f1(i32 %a.s) {
-;CHECK-LABEL: f1:
-;CHECK: moveq
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movweq r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f1:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: moveq r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp eq i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -16,8 +27,22 @@ entry:
}
define i32 @f2(i32 %a.s) {
-;CHECK-LABEL: f2:
-;CHECK: movgt
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r1, #3
+; CHECK-NEXT: cmp r0, #4
+; CHECK-NEXT: movwgt r1, #2
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f2:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r1, #3
+; CHECK-NEON-NEXT: cmp r0, #4
+; CHECK-NEON-NEXT: it gt
+; CHECK-NEON-NEXT: movgt r1, #2
+; CHECK-NEON-NEXT: mov r0, r1
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sgt i32 %a.s, 4
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -25,8 +50,22 @@ entry:
}
define i32 @f3(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f3:
-;CHECK: movlt
+; CHECK-LABEL: f3:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwlt r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f3:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it lt
+; CHECK-NEON-NEXT: movlt r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp slt i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -34,8 +73,22 @@ entry:
}
define i32 @f4(i32 %a.s, i32 %b.s) {
-;CHECK-LABEL: f4:
-;CHECK: movle
+; CHECK-LABEL: f4:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwle r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f4:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it le
+; CHECK-NEON-NEXT: movle r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp sle i32 %a.s, %b.s
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -43,8 +96,22 @@ entry:
}
define i32 @f5(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f5:
-;CHECK: movls
+; CHECK-LABEL: f5:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwls r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f5:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it ls
+; CHECK-NEON-NEXT: movls r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ule i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -52,8 +119,22 @@ entry:
}
define i32 @f6(i32 %a.u, i32 %b.u) {
-;CHECK-LABEL: f6:
-;CHECK: movhi
+; CHECK-LABEL: f6:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, #3
+; CHECK-NEXT: cmp r0, r1
+; CHECK-NEXT: movwhi r2, #2
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: f6:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: movs r2, #3
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it hi
+; CHECK-NEON-NEXT: movhi r2, #2
+; CHECK-NEON-NEXT: mov r0, r2
+; CHECK-NEON-NEXT: bx lr
entry:
%tmp = icmp ugt i32 %a.u, %b.u
%tmp1.s = select i1 %tmp, i32 2, i32 3
@@ -61,11 +142,61 @@ entry:
}
define double @f7(double %a, double %b) {
-;CHECK-LABEL: f7:
-;CHECK: movmi
-;CHECK: movpl
-;CHECK-VFP-LABEL: f7:
-;CHECK-VFP: vmovmi
+; CHECK-ARM-LABEL: f7:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: .save {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: push {r4, r5, r11, lr}
+; CHECK-ARM-NEXT: mov r4, r3
+; CHECK-ARM-NEXT: movw r3, #48758
+; CHECK-ARM-NEXT: mov r5, r2
+; CHECK-ARM-NEXT: movw r2, #14680
+; CHECK-ARM-NEXT: movt r2, #51380
+; CHECK-ARM-NEXT: movt r3, #16371
+; CHECK-ARM-NEXT: bl __aeabi_dcmplt
+; CHECK-ARM-NEXT: cmp r0, #0
+; CHECK-ARM-NEXT: movwne r4, #0
+; CHECK-ARM-NEXT: movwne r5, #0
+; CHECK-ARM-NEXT: movtne r4, #49136
+; CHECK-ARM-NEXT: mov r0, r5
+; CHECK-ARM-NEXT: mov r1, r4
+; CHECK-ARM-NEXT: pop {r4, r5, r11, pc}
+;
+; CHECK-VFP-LABEL: f7:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vldr d17, .LCPI6_0
+; CHECK-VFP-NEXT: vmov d19, r0, r1
+; CHECK-VFP-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-VFP-NEXT: vcmp.f64 d19, d17
+; CHECK-VFP-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-VFP-NEXT: vmov d18, r2, r3
+; CHECK-VFP-NEXT: vmovmi.f64 d18, d16
+; CHECK-VFP-NEXT: vmov r0, r1, d18
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 3
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI6_0:
+; CHECK-VFP-NEXT: .long 3367254360 @ double 1.234
+; CHECK-VFP-NEXT: .long 1072938614
+;
+; CHECK-NEON-LABEL: f7:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr d17, LCPI6_0
+; CHECK-NEON-NEXT: vmov d19, r0, r1
+; CHECK-NEON-NEXT: vmov d18, r2, r3
+; CHECK-NEON-NEXT: vcmp.f64 d19, d17
+; CHECK-NEON-NEXT: vmov.f64 d16, #-1.000000e+00
+; CHECK-NEON-NEXT: vmrs APSR_nzcv, fpscr
+; CHECK-NEON-NEXT: it mi
+; CHECK-NEON-NEXT: vmovmi.f64 d18, d16
+; CHECK-NEON-NEXT: vmov r0, r1, d18
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 3
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI6_0:
+; CHECK-NEON-NEXT: .long 3367254360 @ double 1.234
+; CHECK-NEON-NEXT: .long 1072938614
+; CHECK-NEON-NEXT: .end_data_region
%tmp = fcmp olt double %a, 1.234e+00
%tmp1 = select i1 %tmp, double -1.000e+00, double %b
ret double %tmp1
@@ -77,18 +208,49 @@ define double @f7(double %a, double %b) {
; a lack of a custom lowering routine for an ISD::SELECT. This would result in
; two "it" blocks in the code: one for the "icmp" and another to move the index
; into the constant pool based on the value of the "icmp". If we have one "it"
-; block generated, odds are good that we have close to the ideal code for this:
+; block generated, odds are good that we have close to the ideal code for this.
+define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-ARM-LABEL: f8:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: movw r1, #29905
+; CHECK-ARM-NEXT: movw r2, #1123
+; CHECK-ARM-NEXT: movt r1, #16408
+; CHECK-ARM-NEXT: cmp r0, r2
+; CHECK-ARM-NEXT: movweq r1, #62390
+; CHECK-ARM-NEXT: movteq r1, #16285
+; CHECK-ARM-NEXT: mov r0, r1
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f8:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: movw r2, #1123
+; CHECK-VFP-NEXT: adr r1, .LCPI7_0
+; CHECK-VFP-NEXT: cmp r0, r2
+; CHECK-VFP-NEXT: addeq r1, r1, #4
+; CHECK-VFP-NEXT: ldr r0, [r1]
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI7_0:
+; CHECK-VFP-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-VFP-NEXT: .long 0x3f9df3b6 @ float 1.23399997
;
; CHECK-NEON-LABEL: f8:
-; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
-; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
-; CHECK-NEON-NEXT: cmp r0, [[R3]]
-; CHECK-NEON-NEXT: it eq
-; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4
-; CHECK-NEON-NEXT: ldr
-; CHECK-NEON: bx
-
-define arm_apcscc float @f8(i32 %a) nounwind {
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: adr r1, LCPI7_0
+; CHECK-NEON-NEXT: movw r2, #1123
+; CHECK-NEON-NEXT: cmp r0, r2
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: addeq r1, #4
+; CHECK-NEON-NEXT: ldr r0, [r1]
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI7_0:
+; CHECK-NEON-NEXT: .long 0x401874d1 @ float 2.38212991
+; CHECK-NEON-NEXT: .long 0x3f9df3b6 @ float 1.23399997
+; CHECK-NEON-NEXT: .end_data_region
%tmp = icmp eq i32 %a, 1123
%tmp1 = select i1 %tmp, float 0x3FF3BE76C0000000, float 0x40030E9A20000000
ret float %tmp1
@@ -98,10 +260,40 @@ define arm_apcscc float @f8(i32 %a) nounwind {
; Glue values can only have a single use, but the following test exposed a
; case where a SELECT was lowered with 2 uses of a comparison, causing the
; scheduler to assert.
-; CHECK-VFP-LABEL: f9:
-
declare ptr @objc_msgSend(ptr, ptr, ...)
define void @f9() optsize {
+; CHECK-LABEL: f9:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, sp, #8
+; CHECK-NEXT: movw r2, #0
+; CHECK-NEXT: movw r3, #0
+; CHECK-NEXT: mov r1, #1065353216
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: movt r2, #16672
+; CHECK-NEXT: movt r3, #32704
+; CHECK-NEXT: strd r0, r1, [sp]
+; CHECK-NEXT: bl objc_msgSend
+; CHECK-NEXT: add sp, sp, #8
+; CHECK-NEXT: pop {r11, pc}
+;
+; CHECK-NEON-LABEL: f9:
+; CHECK-NEON: @ %bb.0: @ %entry
+; CHECK-NEON-NEXT: str lr, [sp, #-4]!
+; CHECK-NEON-NEXT: sub sp, #8
+; CHECK-NEON-NEXT: movs r2, #0
+; CHECK-NEON-NEXT: movs r3, #0
+; CHECK-NEON-NEXT: mov.w r0, #1065353216
+; CHECK-NEON-NEXT: movs r1, #0
+; CHECK-NEON-NEXT: movt r2, #16672
+; CHECK-NEON-NEXT: movt r3, #32704
+; CHECK-NEON-NEXT: strd r1, r0, [sp]
+; CHECK-NEON-NEXT: bl _objc_msgSend
+; CHECK-NEON-NEXT: add sp, #8
+; CHECK-NEON-NEXT: ldr lr, [sp], #4
+; CHECK-NEON-NEXT: bx lr
entry:
%cmp = icmp eq ptr undef, inttoptr (i32 4 to ptr)
%conv191 = select i1 %cmp, float -3.000000e+00, float 0.000000e+00
@@ -117,36 +309,151 @@ entry:
ret void
}
-; CHECK-LABEL: f10:
define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f10:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f10:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI9_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI9_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f10:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI9_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI9_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = zext i1 %1 to i32
%3 = sitofp i32 %2 to float
ret float %3
}
-; CHECK-LABEL: f11:
define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatsisf
+; CHECK-ARM-LABEL: f11:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: movweq r2, #0
+; CHECK-ARM-NEXT: movteq r2, #49024
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f11:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI10_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI10_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f11:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI10_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #-1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI10_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = sitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: f12:
define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
-; CHECK-NOT: floatunsisf
+; CHECK-ARM-LABEL: f12:
+; CHECK-ARM: @ %bb.0:
+; CHECK-ARM-NEXT: mov r2, #0
+; CHECK-ARM-NEXT: cmp r0, r1
+; CHECK-ARM-NEXT: moveq r2, #1065353216
+; CHECK-ARM-NEXT: mov r0, r2
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-VFP-LABEL: f12:
+; CHECK-VFP: @ %bb.0:
+; CHECK-VFP-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-VFP-NEXT: vldr s0, .LCPI11_0
+; CHECK-VFP-NEXT: cmp r0, r1
+; CHECK-VFP-NEXT: vmoveq.f32 s0, s2
+; CHECK-VFP-NEXT: vmov r0, s0
+; CHECK-VFP-NEXT: bx lr
+; CHECK-VFP-NEXT: .p2align 2
+; CHECK-VFP-NEXT: @ %bb.1:
+; CHECK-VFP-NEXT: .LCPI11_0:
+; CHECK-VFP-NEXT: .long 0x00000000 @ float 0
+;
+; CHECK-NEON-LABEL: f12:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: vldr s0, LCPI11_0
+; CHECK-NEON-NEXT: vmov.f32 s2, #1.000000e+00
+; CHECK-NEON-NEXT: cmp r0, r1
+; CHECK-NEON-NEXT: it eq
+; CHECK-NEON-NEXT: vmoveq.f32 s0, s2
+; CHECK-NEON-NEXT: vmov r0, s0
+; CHECK-NEON-NEXT: bx lr
+; CHECK-NEON-NEXT: .p2align 2
+; CHECK-NEON-NEXT: @ %bb.1:
+; CHECK-NEON-NEXT: .data_region
+; CHECK-NEON-NEXT: LCPI11_0:
+; CHECK-NEON-NEXT: .long 0x00000000 @ float 0
+; CHECK-NEON-NEXT: .end_data_region
%1 = icmp eq i32 %a, %b
%2 = uitofp i1 %1 to float
ret float %2
}
-; CHECK-LABEL: test_overflow_recombine:
define i1 @test_overflow_recombine(i32 %in1, i32 %in2) {
-; CHECK: smull [[LO:r[0-9]+]], [[HI:r[0-9]+]]
-; CHECK: subs [[ZERO:r[0-9]+]], [[HI]], [[LO]], asr #31
-; CHECK: movne [[ZERO]], #1
+; CHECK-LABEL: test_overflow_recombine:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mul r2, r0, r1
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: subs r0, r0, r2, asr #31
+; CHECK-NEXT: movwne r0, #1
+; CHECK-NEXT: bx lr
+;
+; CHECK-NEON-LABEL: test_overflow_recombine:
+; CHECK-NEON: @ %bb.0:
+; CHECK-NEON-NEXT: mul r2, r0, r1
+; CHECK-NEON-NEXT: smmul r0, r0, r1
+; CHECK-NEON-NEXT: subs.w r0, r0, r2, asr #31
+; CHECK-NEON-NEXT: it ne
+; CHECK-NEON-NEXT: movne r0, #1
+; CHECK-NEON-NEXT: bx lr
%prod = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %in1, i32 %in2)
%overflow = extractvalue { i32, i1 } %prod, 1
ret i1 %overflow
diff --git a/llvm/test/CodeGen/BPF/cttz-ctlz.ll b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
new file mode 100644
index 000000000000..f42b2e2d1087
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/cttz-ctlz.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -march=bpf | FileCheck %s
+
+; test that we can expand CTTZ & CTLZ
+
+declare i32 @llvm.cttz.i32(i32, i1)
+
+define i32 @cttz_i32_zdef(i32 %a) {
+; CHECK-LABEL: cttz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @cttz_i32(i32 %a) {
+; CHECK-LABEL: cttz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB1_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 *= 125613361
+; CHECK-NEXT: r2 = 4160749568 ll
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r1 >>= 27
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB1_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.cttz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.cttz.i64(i64, i1)
+
+define i64 @cttz_i64_zdef(i64 %a) {
+; CHECK-LABEL: cttz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @cttz_i64(i64 %a) {
+; CHECK-LABEL: cttz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB3_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 = -r2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r2 = 151050438420815295 ll
+; CHECK-NEXT: r1 *= r2
+; CHECK-NEXT: r1 >>= 58
+; CHECK-NEXT: r2 = {{\.?LCPI[0-9]+_[0-9]+}} ll
+; CHECK-NEXT: r2 += r1
+; CHECK-NEXT: r0 = *(u8 *)(r2 + 0)
+; CHECK-NEXT: LBB3_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.cttz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+define i32 @ctlz_i32_zdef(i32 %a) {
+; CHECK-LABEL: ctlz_i32_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 1)
+ ret i32 %ret
+}
+
+
+define i32 @ctlz_i32(i32 %a) {
+; CHECK-LABEL: ctlz_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 32
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 <<= 32
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: if r2 == 0 goto LBB5_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = 4294967294 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967292 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 2
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967280 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 4
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294967040 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 8
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r2 = 4294901760 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r3 >>= 16
+; CHECK-NEXT: r1 |= r3
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r2 &= 1431655765
+; CHECK-NEXT: r1 -= r2
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= 858993459
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= 858993459
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r0 &= 252645135
+; CHECK-NEXT: r0 *= 16843009
+; CHECK-NEXT: r1 = 4278190080 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r0 >>= 24
+; CHECK-NEXT: LBB5_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i32 @llvm.ctlz.i32(i32 %a, i1 0)
+ ret i32 %ret
+}
+
+declare i64 @llvm.ctlz.i64(i64, i1)
+
+define i64 @ctlz_i64_zdef(i64 %a) {
+; CHECK-LABEL: ctlz_i64_zdef:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 1)
+ ret i64 %ret
+}
+
+
+define i64 @ctlz_i64(i64 %a) {
+; CHECK-LABEL: ctlz_i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: r0 = 64
+; CHECK-NEXT: if r1 == 0 goto LBB7_2
+; CHECK-NEXT: # %bb.1: # %cond.false
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 1
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 2
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 4
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 8
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 16
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r2 = r1
+; CHECK-NEXT: r2 >>= 32
+; CHECK-NEXT: r1 |= r2
+; CHECK-NEXT: r1 ^= -1
+; CHECK-NEXT: r2 = 6148914691236517205 ll
+; CHECK-NEXT: r3 = r1
+; CHECK-NEXT: r3 >>= 1
+; CHECK-NEXT: r3 &= r2
+; CHECK-NEXT: r1 -= r3
+; CHECK-NEXT: r2 = 3689348814741910323 ll
+; CHECK-NEXT: r0 = r1
+; CHECK-NEXT: r0 &= r2
+; CHECK-NEXT: r1 >>= 2
+; CHECK-NEXT: r1 &= r2
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = r0
+; CHECK-NEXT: r1 >>= 4
+; CHECK-NEXT: r0 += r1
+; CHECK-NEXT: r1 = 1085102592571150095 ll
+; CHECK-NEXT: r0 &= r1
+; CHECK-NEXT: r1 = 72340172838076673 ll
+; CHECK-NEXT: r0 *= r1
+; CHECK-NEXT: r0 >>= 56
+; CHECK-NEXT: LBB7_2: # %cond.end
+; CHECK-NEXT: exit
+ %ret = call i64 @llvm.ctlz.i64(i64 %a, i1 0)
+ ret i64 %ret
+}
+
diff --git a/llvm/test/CodeGen/DirectX/abs-vec.ll b/llvm/test/CodeGen/DirectX/abs-vec.ll
new file mode 100644
index 000000000000..1c40555eb390
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs-vec.ll
@@ -0,0 +1,34 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for int vectors.
+
+; CHECK-LABEL: abs_i16Vec2
+define noundef <2 x i16> @abs_i16Vec2(<2 x i16> noundef %a) #0 {
+entry:
+; CHECK: sub <2 x i16> zeroinitializer, %a
+; CHECK: call <2 x i16> @llvm.smax.v2i16(<2 x i16> %a, <2 x i16> %{{.*}})
+ %elt.abs = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %a, i1 false)
+ ret <2 x i16> %elt.abs
+}
+
+; CHECK-LABEL: abs_i32Vec3
+define noundef <3 x i32> @abs_i32Vec3(<3 x i32> noundef %a) #0 {
+entry:
+; CHECK: sub <3 x i32> zeroinitializer, %a
+; CHECK: call <3 x i32> @llvm.smax.v3i32(<3 x i32> %a, <3 x i32> %{{.*}})
+ %elt.abs = call <3 x i32> @llvm.abs.v3i32(<3 x i32> %a, i1 false)
+ ret <3 x i32> %elt.abs
+}
+
+; CHECK-LABEL: abs_i64Vec4
+define noundef <4 x i64> @abs_i64Vec4(<4 x i64> noundef %a) #0 {
+entry:
+; CHECK: sub <4 x i64> zeroinitializer, %a
+; CHECK: call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %{{.*}})
+ %elt.abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
+ ret <4 x i64> %elt.abs
+}
+
+declare <2 x i16> @llvm.abs.v2i16(<2 x i16>, i1 immarg)
+declare <3 x i32> @llvm.abs.v3i32(<3 x i32>, i1 immarg)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/abs.ll b/llvm/test/CodeGen/DirectX/abs.ll
new file mode 100644
index 000000000000..822580e8c089
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/abs.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for abs are generated for int16_t/int/int64_t.
+
+; CHECK-LABEL: abs_i16
+define noundef i16 @abs_i16(i16 noundef %a) {
+entry:
+; CHECK: sub i16 0, %a
+; EXPCHECK: call i16 @llvm.smax.i16(i16 %a, i16 %{{.*}})
+; DOPCHECK: call i16 @dx.op.binary.i16(i32 37, i16 %a, i16 %{{.*}})
+ %elt.abs = call i16 @llvm.abs.i16(i16 %a, i1 false)
+ ret i16 %elt.abs
+}
+
+; CHECK-LABEL: abs_i32
+define noundef i32 @abs_i32(i32 noundef %a) {
+entry:
+; CHECK: sub i32 0, %a
+; EXPCHECK: call i32 @llvm.smax.i32(i32 %a, i32 %{{.*}})
+; DOPCHECK: call i32 @dx.op.binary.i32(i32 37, i32 %a, i32 %{{.*}})
+ %elt.abs = call i32 @llvm.abs.i32(i32 %a, i1 false)
+ ret i32 %elt.abs
+}
+
+; CHECK-LABEL: abs_i64
+define noundef i64 @abs_i64(i64 noundef %a) {
+entry:
+; CHECK: sub i64 0, %a
+; EXPCHECK: call i64 @llvm.smax.i64(i64 %a, i64 %{{.*}})
+; DOPCHECK: call i64 @dx.op.binary.i64(i32 37, i64 %a, i64 %{{.*}})
+ %elt.abs = call i64 @llvm.abs.i64(i64 %a, i1 false)
+ ret i64 %elt.abs
+}
+
+declare i16 @llvm.abs.i16(i16, i1 immarg)
+declare i32 @llvm.abs.i32(i32, i1 immarg)
+declare i64 @llvm.abs.i64(i64, i1 immarg)
diff --git a/llvm/test/CodeGen/DirectX/ceil.ll b/llvm/test/CodeGen/DirectX/ceil.ll
new file mode 100644
index 000000000000..158547146780
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for ceil are generated for float and half.
+
+define noundef float @ceil_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 28, float %{{.*}})
+ %elt.ceil = call float @llvm.ceil.f32(float %a)
+ ret float %elt.ceil
+}
+
+define noundef half @ceil_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 28, half %{{.*}})
+ %elt.ceil = call half @llvm.ceil.f16(half %a)
+ ret half %elt.ceil
+}
+
+declare half @llvm.ceil.f16(half)
+declare float @llvm.ceil.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/ceil_error.ll b/llvm/test/CodeGen/DirectX/ceil_error.ll
new file mode 100644
index 000000000000..1b554d871556
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ceil_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation ceil does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @ceil_double(double noundef %a) {
+entry:
+ %elt.ceil = call double @llvm.ceil.f64(double %a)
+ ret double %elt.ceil
+}
diff --git a/llvm/test/CodeGen/DirectX/cos.ll b/llvm/test/CodeGen/DirectX/cos.ll
new file mode 100644
index 000000000000..00f2e2c3f6e5
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for cos are generated for float and half.
+
+define noundef float @cos_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 12, float %{{.*}})
+ %elt.cos = call float @llvm.cos.f32(float %a)
+ ret float %elt.cos
+}
+
+define noundef half @cos_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 12, half %{{.*}})
+ %elt.cos = call half @llvm.cos.f16(half %a)
+ ret half %elt.cos
+}
+
+declare half @llvm.cos.f16(half)
+declare float @llvm.cos.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/cos_error.ll b/llvm/test/CodeGen/DirectX/cos_error.ll
new file mode 100644
index 000000000000..a074f5b493df
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/cos_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation cos does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @cos_double(double noundef %a) {
+entry:
+ %elt.cos = call double @llvm.cos.f64(double %a)
+ ret double %elt.cos
+}
diff --git a/llvm/test/CodeGen/DirectX/dot2_error.ll b/llvm/test/CodeGen/DirectX/dot2_error.ll
new file mode 100644
index 000000000000..a27bfaedacd5
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double2(<2 x double> noundef %a, <2 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot2.v2f64(<2 x double> %a, <2 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot3_error.ll b/llvm/test/CodeGen/DirectX/dot3_error.ll
new file mode 100644
index 000000000000..eb69fb145038
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot3_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot3 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double3(<3 x double> noundef %a, <3 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot3.v3f64(<3 x double> %a, <3 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/dot4_error.ll b/llvm/test/CodeGen/DirectX/dot4_error.ll
new file mode 100644
index 000000000000..5cd632684c0c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/dot4_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation dot4 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload
+
+define noundef double @dot_double4(<4 x double> noundef %a, <4 x double> noundef %b) {
+entry:
+ %dx.dot = call double @llvm.dx.dot4.v4f64(<4 x double> %a, <4 x double> %b)
+ ret double %dx.dot
+}
diff --git a/llvm/test/CodeGen/DirectX/fabs.ll b/llvm/test/CodeGen/DirectX/fabs.ll
new file mode 100644
index 000000000000..3b3f8aa9a4a9
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fabs.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for abs are generated for float, half, and double.
+
+
+; CHECK-LABEL: fabs_half
+define noundef half @fabs_half(half noundef %a) {
+entry:
+ ; CHECK: call half @dx.op.unary.f16(i32 6, half %{{.*}})
+ %elt.abs = call half @llvm.fabs.f16(half %a)
+ ret half %elt.abs
+}
+
+; CHECK-LABEL: fabs_float
+define noundef float @fabs_float(float noundef %a) {
+entry:
+; CHECK: call float @dx.op.unary.f32(i32 6, float %{{.*}})
+ %elt.abs = call float @llvm.fabs.f32(float %a)
+ ret float %elt.abs
+}
+
+; CHECK-LABEL: fabs_double
+define noundef double @fabs_double(double noundef %a) {
+entry:
+; CHECK: call double @dx.op.unary.f64(i32 6, double %{{.*}})
+ %elt.abs = call double @llvm.fabs.f64(double %a)
+ ret double %elt.abs
+}
+
+declare half @llvm.fabs.f16(half)
+declare float @llvm.fabs.f32(float)
+declare double @llvm.fabs.f64(double)
diff --git a/llvm/test/CodeGen/DirectX/fdot.ll b/llvm/test/CodeGen/DirectX/fdot.ll
new file mode 100644
index 000000000000..3e13b2ad2650
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fdot.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for dot are generated for int/uint vectors.
+
+; CHECK-LABEL: dot_half2
+define noundef half @dot_half2(<2 x half> noundef %a, <2 x half> noundef %b) {
+entry:
+; CHECK: extractelement <2 x half> %a, i32 0
+; CHECK: extractelement <2 x half> %a, i32 1
+; CHECK: extractelement <2 x half> %b, i32 0
+; CHECK: extractelement <2 x half> %b, i32 1
+; CHECK: call half @dx.op.dot2.f16(i32 54, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot2.v2f16(<2 x half> %a, <2 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half3
+define noundef half @dot_half3(<3 x half> noundef %a, <3 x half> noundef %b) {
+entry:
+; CHECK: extractelement <3 x half> %a, i32 0
+; CHECK: extractelement <3 x half> %a, i32 1
+; CHECK: extractelement <3 x half> %a, i32 2
+; CHECK: extractelement <3 x half> %b, i32 0
+; CHECK: extractelement <3 x half> %b, i32 1
+; CHECK: extractelement <3 x half> %b, i32 2
+; CHECK: call half @dx.op.dot3.f16(i32 55, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot3.v3f16(<3 x half> %a, <3 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_half4
+define noundef half @dot_half4(<4 x half> noundef %a, <4 x half> noundef %b) {
+entry:
+; CHECK: extractelement <4 x half> %a, i32 0
+; CHECK: extractelement <4 x half> %a, i32 1
+; CHECK: extractelement <4 x half> %a, i32 2
+; CHECK: extractelement <4 x half> %a, i32 3
+; CHECK: extractelement <4 x half> %b, i32 0
+; CHECK: extractelement <4 x half> %b, i32 1
+; CHECK: extractelement <4 x half> %b, i32 2
+; CHECK: extractelement <4 x half> %b, i32 3
+; CHECK: call half @dx.op.dot4.f16(i32 56, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}}, half %{{.*}})
+ %dx.dot = call half @llvm.dx.dot4.v4f16(<4 x half> %a, <4 x half> %b)
+ ret half %dx.dot
+}
+
+; CHECK-LABEL: dot_float2
+define noundef float @dot_float2(<2 x float> noundef %a, <2 x float> noundef %b) {
+entry:
+; CHECK: extractelement <2 x float> %a, i32 0
+; CHECK: extractelement <2 x float> %a, i32 1
+; CHECK: extractelement <2 x float> %b, i32 0
+; CHECK: extractelement <2 x float> %b, i32 1
+; CHECK: call float @dx.op.dot2.f32(i32 54, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %a, <2 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float3
+define noundef float @dot_float3(<3 x float> noundef %a, <3 x float> noundef %b) {
+entry:
+; CHECK: extractelement <3 x float> %a, i32 0
+; CHECK: extractelement <3 x float> %a, i32 1
+; CHECK: extractelement <3 x float> %a, i32 2
+; CHECK: extractelement <3 x float> %b, i32 0
+; CHECK: extractelement <3 x float> %b, i32 1
+; CHECK: extractelement <3 x float> %b, i32 2
+; CHECK: call float @dx.op.dot3.f32(i32 55, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot3.v3f32(<3 x float> %a, <3 x float> %b)
+ ret float %dx.dot
+}
+
+; CHECK-LABEL: dot_float4
+define noundef float @dot_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+; CHECK: extractelement <4 x float> %a, i32 0
+; CHECK: extractelement <4 x float> %a, i32 1
+; CHECK: extractelement <4 x float> %a, i32 2
+; CHECK: extractelement <4 x float> %a, i32 3
+; CHECK: extractelement <4 x float> %b, i32 0
+; CHECK: extractelement <4 x float> %b, i32 1
+; CHECK: extractelement <4 x float> %b, i32 2
+; CHECK: extractelement <4 x float> %b, i32 3
+; CHECK: call float @dx.op.dot4.f32(i32 56, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}}, float %{{.*}})
+ %dx.dot = call float @llvm.dx.dot4.v4f32(<4 x float> %a, <4 x float> %b)
+ ret float %dx.dot
+}
+
+declare half @llvm.dx.dot.v2f16(<2 x half> , <2 x half> )
+declare half @llvm.dx.dot.v3f16(<3 x half> , <3 x half> )
+declare half @llvm.dx.dot.v4f16(<4 x half> , <4 x half> )
+declare float @llvm.dx.dot.v2f32(<2 x float>, <2 x float>)
+declare float @llvm.dx.dot.v3f32(<3 x float>, <3 x float>)
+declare float @llvm.dx.dot.v4f32(<4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/floor.ll b/llvm/test/CodeGen/DirectX/floor.ll
new file mode 100644
index 000000000000..b033e2eaa491
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for floor are generated for float and half.
+
+define noundef float @floor_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 27, float %{{.*}})
+ %elt.floor = call float @llvm.floor.f32(float %a)
+ ret float %elt.floor
+}
+
+define noundef half @floor_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 27, half %{{.*}})
+ %elt.floor = call half @llvm.floor.f16(half %a)
+ ret half %elt.floor
+}
+
+declare half @llvm.floor.f16(half)
+declare float @llvm.floor.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/floor_error.ll b/llvm/test/CodeGen/DirectX/floor_error.ll
new file mode 100644
index 000000000000..3b51a4b543b7
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/floor_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation floor does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @floor_double(double noundef %a) {
+entry:
+ %elt.floor = call double @llvm.floor.f64(double %a)
+ ret double %elt.floor
+}
diff --git a/llvm/test/CodeGen/DirectX/log-vec.ll b/llvm/test/CodeGen/DirectX/log-vec.ll
new file mode 100644
index 000000000000..4768fdd94b02
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log-vec.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log are generated for float and half.
+
+; CHECK-LABEL: log_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000, float 0x3FE62E4300000000>, %{{.*}}
+define noundef <4 x float> @log_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log = call <4 x float> @llvm.log.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log
+}
+
+; CHECK-LABEL: log10_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %{{.*}})
+; CHECK: fmul <4 x float> <float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000, float 0x3FD3441340000000>, %{{.*}}
+define noundef <4 x float> @log10_float4(<4 x float> noundef %p0) {
+entry:
+ %p0.addr = alloca <4 x float>, align 16
+ store <4 x float> %p0, ptr %p0.addr, align 16
+ %0 = load <4 x float>, ptr %p0.addr, align 16
+ %elt.log10 = call <4 x float> @llvm.log10.v4f32(<4 x float> %0)
+ ret <4 x float> %elt.log10
+}
+
+declare <4 x float> @llvm.log.v4f32(<4 x float>)
+declare <4 x float> @llvm.log10.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/log.ll b/llvm/test/CodeGen/DirectX/log.ll
new file mode 100644
index 000000000000..172c3bfed3b7
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log are generated.
+
+define noundef float @log_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FE62E4300000000, %{{.*}}
+ %elt.log = call float @llvm.log.f32(float %a)
+ ret float %elt.log
+}
+
+define noundef half @log_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH398C, %{{.*}}
+ %elt.log = call half @llvm.log.f16(half %a)
+ ret half %elt.log
+}
+
+declare half @llvm.log.f16(half)
+declare float @llvm.log.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log10.ll b/llvm/test/CodeGen/DirectX/log10.ll
new file mode 100644
index 000000000000..d4f827a0d1af
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log10.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for log10 are generated.
+
+define noundef float @log10_float(float noundef %a) #0 {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %{{.*}})
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float 0x3FD3441340000000, %{{.*}}
+ %elt.log10 = call float @llvm.log10.f32(float %a)
+ ret float %elt.log10
+}
+
+define noundef half @log10_half(half noundef %a) #0 {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %{{.*}})
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half 0xH34D1, %{{.*}}
+ %elt.log10 = call half @llvm.log10.f16(half %a)
+ ret half %elt.log10
+}
+
+declare half @llvm.log10.f16(half)
+declare float @llvm.log10.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2.ll b/llvm/test/CodeGen/DirectX/log2.ll
new file mode 100644
index 000000000000..2164d4db9396
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for log2 are generated for float and half.
+
+define noundef float @log2_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 23, float %{{.*}})
+ %elt.log2 = call float @llvm.log2.f32(float %a)
+ ret float %elt.log2
+}
+
+define noundef half @log2_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 23, half %{{.*}})
+ %elt.log2 = call half @llvm.log2.f16(half %a)
+ ret half %elt.log2
+}
+
+declare half @llvm.log2.f16(half)
+declare float @llvm.log2.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/log2_error.ll b/llvm/test/CodeGen/DirectX/log2_error.ll
new file mode 100644
index 000000000000..a26f6e8c3117
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/log2_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation log2 does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @log2_double(double noundef %a) {
+entry:
+ %elt.log2 = call double @llvm.log2.f64(double %a)
+ ret double %elt.log2
+}
diff --git a/llvm/test/CodeGen/DirectX/pow-vec.ll b/llvm/test/CodeGen/DirectX/pow-vec.ll
new file mode 100644
index 000000000000..781fa5b8cb24
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow-vec.ll
@@ -0,0 +1,15 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s
+
+; Make sure dxil operation function calls for pow are generated for float and half.
+
+; CHECK-LABEL: pow_float4
+; CHECK: call <4 x float> @llvm.log2.v4f32(<4 x float> %a)
+; CHECK: fmul <4 x float> %{{.*}}, %b
+; CHECK: call <4 x float> @llvm.exp2.v4f32(<4 x float> %{{.*}})
+define noundef <4 x float> @pow_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+ %elt.pow = call <4 x float> @llvm.pow.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %elt.pow
+}
+
+declare <4 x float> @llvm.pow.v4f32(<4 x float>,<4 x float>)
diff --git a/llvm/test/CodeGen/DirectX/pow.ll b/llvm/test/CodeGen/DirectX/pow.ll
new file mode 100644
index 000000000000..25ce0fe731d0
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/pow.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefixes=CHECK,EXPCHECK
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s --check-prefixes=CHECK,DOPCHECK
+
+; Make sure dxil operation function calls for pow are generated.
+
+define noundef float @pow_float(float noundef %a, float noundef %b) {
+entry:
+; DOPCHECK: call float @dx.op.unary.f32(i32 23, float %a)
+; EXPCHECK: call float @llvm.log2.f32(float %a)
+; CHECK: fmul float %{{.*}}, %b
+; DOPCHECK: call float @dx.op.unary.f32(i32 21, float %{{.*}})
+; EXPCHECK: call float @llvm.exp2.f32(float %{{.*}})
+ %elt.pow = call float @llvm.pow.f32(float %a, float %b)
+ ret float %elt.pow
+}
+
+define noundef half @pow_half(half noundef %a, half noundef %b) {
+entry:
+; DOPCHECK: call half @dx.op.unary.f16(i32 23, half %a)
+; EXPCHECK: call half @llvm.log2.f16(half %a)
+; CHECK: fmul half %{{.*}}, %b
+; DOPCHECK: call half @dx.op.unary.f16(i32 21, half %{{.*}})
+; EXPCHECK: call half @llvm.exp2.f16(half %{{.*}})
+ %elt.pow = call half @llvm.pow.f16(half %a, half %b)
+ ret half %elt.pow
+}
+
+declare half @llvm.pow.f16(half,half)
+declare float @llvm.pow.f32(float,float)
diff --git a/llvm/test/CodeGen/DirectX/reversebits.ll b/llvm/test/CodeGen/DirectX/reversebits.ll
new file mode 100644
index 000000000000..b6a7a1bc6152
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/reversebits.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for reversebits are generated for all integer types.
+
+; Function Attrs: nounwind
+define noundef i16 @test_bitreverse_short(i16 noundef %a) {
+entry:
+; CHECK:call i16 @dx.op.unary.i16(i32 30, i16 %{{.*}})
+ %elt.bitreverse = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i32 @test_bitreverse_int(i32 noundef %a) {
+entry:
+; CHECK:call i32 @dx.op.unary.i32(i32 30, i32 %{{.*}})
+ %elt.bitreverse = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %elt.bitreverse
+}
+
+; Function Attrs: nounwind
+define noundef i64 @test_bitreverse_long(i64 noundef %a) {
+entry:
+; CHECK:call i64 @dx.op.unary.i64(i32 30, i64 %{{.*}})
+ %elt.bitreverse = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %elt.bitreverse
+}
+
+declare i16 @llvm.bitreverse.i16(i16)
+declare i32 @llvm.bitreverse.i32(i32)
+declare i64 @llvm.bitreverse.i64(i64)
diff --git a/llvm/test/CodeGen/DirectX/round.ll b/llvm/test/CodeGen/DirectX/round.ll
index 5d53a794b763..e0a3772ebca8 100644
--- a/llvm/test/CodeGen/DirectX/round.ll
+++ b/llvm/test/CodeGen/DirectX/round.ll
@@ -1,31 +1,22 @@
; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call float @dx.op.unary.f32(i32 26, float %{{.*}})
-; CHECK:call half @dx.op.unary.f16(i32 26, half %{{.*}})
-target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
-target triple = "dxil-pc-shadermodel6.7-library"
-
-; Function Attrs: noinline nounwind optnone
-define noundef float @round_float(float noundef %a) #0 {
+; CHECK-LABEL: round_half
+define noundef half @round_half(half noundef %a) {
entry:
- %a.addr = alloca float, align 4
- store float %a, ptr %a.addr, align 4
- %0 = load float, ptr %a.addr, align 4
- %elt.round = call float @llvm.round.f32(float %0)
- ret float %elt.round
+; CHECK: call half @dx.op.unary.f16(i32 26, half %{{.*}})
+ %elt.roundeven = call half @llvm.roundeven.f16(half %a)
+ ret half %elt.roundeven
}
-; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
-declare float @llvm.round.f32(float) #1
-
-; Function Attrs: noinline nounwind optnone
-define noundef half @round_half(half noundef %a) #0 {
+; CHECK-LABEL: round_float
+define noundef float @round_float(float noundef %a) {
entry:
- %a.addr = alloca half, align 2
- store half %a, ptr %a.addr, align 2
- %0 = load half, ptr %a.addr, align 2
- %elt.round = call half @llvm.round.f16(half %0)
- ret half %elt.round
+; CHECK: call float @dx.op.unary.f32(i32 26, float %{{.*}})
+ %elt.roundeven = call float @llvm.roundeven.f32(float %a)
+ ret float %elt.roundeven
}
+
+declare half @llvm.roundeven.f16(half)
+declare float @llvm.roundeven.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/round_error.ll b/llvm/test/CodeGen/DirectX/round_error.ll
index 3bd87b2bbf02..2d27fbb5ee20 100644
--- a/llvm/test/CodeGen/DirectX/round_error.ll
+++ b/llvm/test/CodeGen/DirectX/round_error.ll
@@ -8,6 +8,6 @@ entry:
%a.addr = alloca double, align 8
store double %a, ptr %a.addr, align 8
%0 = load double, ptr %a.addr, align 8
- %elt.round = call double @llvm.round.f64(double %0)
- ret double %elt.round
+ %elt.roundeven = call double @llvm.roundeven.f64(double %0)
+ ret double %elt.roundeven
}
diff --git a/llvm/test/CodeGen/DirectX/sqrt.ll b/llvm/test/CodeGen/DirectX/sqrt.ll
new file mode 100644
index 000000000000..76a572efd205
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for sqrt are generated for float and half.
+
+define noundef float @sqrt_float(float noundef %a) #0 {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 24, float %{{.*}})
+ %elt.sqrt = call float @llvm.sqrt.f32(float %a)
+ ret float %elt.sqrt
+}
+
+define noundef half @sqrt_half(half noundef %a) #0 {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 24, half %{{.*}})
+ %elt.sqrt = call half @llvm.sqrt.f16(half %a)
+ ret half %elt.sqrt
+}
+
+declare half @llvm.sqrt.f16(half)
+declare float @llvm.sqrt.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/sqrt_error.ll b/llvm/test/CodeGen/DirectX/sqrt_error.ll
new file mode 100644
index 000000000000..fffa2e19b80f
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/sqrt_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation sqrt does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @sqrt_double(double noundef %a) {
+entry:
+ %elt.sqrt = call double @llvm.sqrt.f64(double %a)
+ ret double %elt.sqrt
+}
diff --git a/llvm/test/CodeGen/DirectX/trunc.ll b/llvm/test/CodeGen/DirectX/trunc.ll
new file mode 100644
index 000000000000..2072f28cef50
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+
+; Make sure dxil operation function calls for trunc are generated for float and half.
+
+define noundef float @trunc_float(float noundef %a) {
+entry:
+; CHECK:call float @dx.op.unary.f32(i32 29, float %{{.*}})
+ %elt.trunc = call float @llvm.trunc.f32(float %a)
+ ret float %elt.trunc
+}
+
+define noundef half @trunc_half(half noundef %a) {
+entry:
+; CHECK:call half @dx.op.unary.f16(i32 29, half %{{.*}})
+ %elt.trunc = call half @llvm.trunc.f16(half %a)
+ ret half %elt.trunc
+}
+
+declare half @llvm.trunc.f16(half)
+declare float @llvm.trunc.f32(float)
diff --git a/llvm/test/CodeGen/DirectX/trunc_error.ll b/llvm/test/CodeGen/DirectX/trunc_error.ll
new file mode 100644
index 000000000000..751b0b94c280
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/trunc_error.ll
@@ -0,0 +1,10 @@
+; RUN: not opt -S -dxil-op-lower %s 2>&1 | FileCheck %s
+
+; DXIL operation trunc does not support double overload type
+; CHECK: LLVM ERROR: Invalid Overload Type
+
+define noundef double @trunc_double(double noundef %a) {
+entry:
+ %elt.trunc = call double @llvm.trunc.f64(double %a)
+ ret double %elt.trunc
+}
diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll
index 88412f552d13..7c6cb16eacca 100644
--- a/llvm/test/CodeGen/Generic/allow-check.ll
+++ b/llvm/test/CodeGen/Generic/allow-check.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -O3
-; RUN: llc < %s -O3 -global-isel
-; RUN: llc < %s -O3 -fast-isel
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O3 -global-isel=0 -fast-isel=1
-; RUN: llc < %s -O0
-; RUN: llc < %s -O0 -global-isel
-; RUN: llc < %s -O0 -fast-isel
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=1 -fast-isel=0
+; RUN: llc < %s -O0 -global-isel=0 -fast-isel=1
define i1 @test_runtime() local_unnamed_addr {
entry:
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
index 0771fda02cfb..7ccee1689185 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
index 25afb9f1a137..532f7fd06793 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_commutative.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -march=hexagon -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK: %v32.hexagon.vlcr = tail call <32 x i32> @llvm.hexagon.V6.vmaxub.128B
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
index 53973423732c..ecfcf53d9133 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_constant.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -hexagon-vlcr | opt -passes=adce -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes='loop(hexagon-vlcr),adce' -S %s | FileCheck %s
; CHECK-NOT: %.hexagon.vlcr
; ModuleID = 'hexagon_vector_loop_carried_reuse.c'
diff --git a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
index b440dba66f67..9872faeb3da0 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon_vector_loop_carried_reuse_invalid.ll
@@ -1,4 +1,4 @@
-; RUN: opt -hexagon-vlcr < %s -S | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -passes=hexagon-vlcr -S %s | FileCheck %s
; Test that reuse doesn't occur due to two shufflevectors with different masks.
diff --git a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
index ab7bf1b4b0e8..c53e57800edd 100644
--- a/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -mtriple=hexagon-- -p hexagon-loop-idiom -disable-memcpy-idiom -S < %s | FileCheck %s
; Make sure we don't convert load/store loops into memcpy if the access type
; is a vector. Using vector instructions is generally better in such cases.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
index c7110263c658..5ace9e6ee486 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove1.ll
@@ -1,6 +1,8 @@
; Check for recognizing the "memmove" idiom.
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
; CHECK: call void @llvm.memmove
; Function Attrs: norecurse nounwind
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
index 234e4f56b5d8..ed56a332f657 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/hexagon-memmove2.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S -mtriple hexagon-unknown-elf < %s \
+; RUN: | FileCheck %s
define void @PR14241(ptr %s, i64 %size) #0 {
; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
index 140c676175ea..e5bcc2b9aebf 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/lcssa.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -hexagon-loop-idiom < %s | opt -S -passes='loop(loop-deletion),gvn'
+; RUN: opt -mtriple hexagon-- -S -passes='loop(hexagon-loop-idiom,loop-deletion),gvn'
; REQUIRES: asserts
; This tests that the HexagonLoopIdiom pass does not mark LCSSA information
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
index 7a7d1d9b1a86..78f0c9e36b55 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/memmove-rt-check.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom -S < %s | FileCheck %s
; Make sure that we generate correct runtime checks.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
index 37e1bb6eb7df..ce02b62911c0 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/nullptr-crash.ll
@@ -1,4 +1,5 @@
; RUN: opt -hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
+; RUN: opt -p hexagon-loop-idiom -mtriple hexagon-unknown-elf < %s
; REQUIRES: asserts
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
index 1934ced7e7ae..74c02d63d54d 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; CHECK-LABEL: define void @fred
; Check that this test does not crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
index b25010f2a90f..94b0c96c3d51 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-long-loop.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
;
; The number of nested selects caused the simplification loop to take
; more than the maximum number of iterations. This caused the compiler
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
index e4b2b5a298ed..a00b1d5876ba 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll
@@ -1,4 +1,5 @@
; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+; RUN: opt -march=hexagon -p hexagon-loop-idiom -S < %s | FileCheck %s
; REQUIRES: asserts
;
; Check for sane output, this used to crash.
diff --git a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
index 781618e58901..2461e1cfde8d 100644
--- a/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
+++ b/llvm/test/CodeGen/Hexagon/loop-idiom/pmpy.ll
@@ -1,5 +1,7 @@
; RUN: opt -hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
; RUN: | FileCheck %s
+; RUN: opt -p hexagon-loop-idiom < %s -mtriple=hexagon-unknown-unknown -S \
+; RUN: | FileCheck %s
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
index 67f4dd72ea0b..9468b18bf8e4 100644
--- a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
+++ b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
@@ -135,7 +135,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
maxCallFrameSize: 0
hasOpaqueSPAdjustment: false
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
index 25106b456d2f..6629d3440549 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll
@@ -123,9 +123,10 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -149,9 +150,10 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -175,9 +177,10 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -201,9 +204,10 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 4, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 4, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -227,9 +231,10 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
@@ -253,9 +258,10 @@ define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounw
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 4, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
index 7f232073ae12..19171b7d8ed7 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll
@@ -87,9 +87,10 @@ define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 0
-; CHECK-NEXT: st.b $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 0
+; CHECK-NEXT: st.b $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -106,9 +107,10 @@ define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 1
-; CHECK-NEXT: st.h $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 1
+; CHECK-NEXT: st.h $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -125,9 +127,10 @@ define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 2
-; CHECK-NEXT: st.w $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 2
+; CHECK-NEXT: st.w $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -144,9 +147,10 @@ define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind {
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr0, $a0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a3, 3, 3
-; CHECK-NEXT: st.d $a2, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0
+; CHECK-NEXT: addi.d $a3, $sp, 0
+; CHECK-NEXT: bstrins.d $a3, $a0, 3, 3
+; CHECK-NEXT: st.d $a2, $a3, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -163,9 +167,10 @@ define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwi
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 2
-; CHECK-NEXT: fst.s $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 2
+; CHECK-NEXT: fst.s $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
@@ -182,9 +187,10 @@ define void @insert_2xdouble_idx(ptr %src, ptr %dst, double %ins, i32 %idx) noun
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: vld $vr1, $a0, 0
; CHECK-NEXT: vst $vr1, $sp, 0
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bstrins.d $a0, $a2, 3, 3
-; CHECK-NEXT: fst.d $fa0, $a0, 0
+; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: bstrins.d $a2, $a0, 3, 3
+; CHECK-NEXT: fst.d $fa0, $a2, 0
; CHECK-NEXT: vld $vr0, $sp, 0
; CHECK-NEXT: vst $vr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $sp, 16
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
index 64388933fda8..dc99ce8d7a09 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/trap.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/trap.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
--- |
declare void @llvm.trap()
@@ -9,12 +9,15 @@
---
name: f
alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
; MIPS32-LABEL: name: f
; MIPS32: TRAP
- ; MIPS32: RetRA
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ ; MIPS32-NEXT: RetRA
+ G_TRAP
RetRA
...
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
index 52352edbe339..e471e1047caa 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
@@ -220,10 +220,12 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY4]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -268,6 +270,7 @@ body: |
; MIPS32-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load (s32) from %fixed-stack.3)
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[COPY1]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[LOAD1]]
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -275,6 +278,7 @@ body: |
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LOAD2]], [[COPY2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[LOAD2]]
; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -283,13 +287,15 @@ body: |
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD4]](s32), [[C]]
; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP4]], [[OR]]
; MIPS32-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ICMP3]], [[AND2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[LOAD3]], [[COPY3]]
; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C1]]
; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[AND3]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD2]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD4]](s32)
- ; MIPS32-NEXT: $a1 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY6]](s32)
+ ; MIPS32-NEXT: $a1 = COPY [[COPY7]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1
@@ -331,10 +337,11 @@ body: |
; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $a3
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
; MIPS32-NEXT: G_STORE [[AND]](s32), [[COPY3]](p0) :: (store (s8) into %ir.pcarry_flag)
- ; MIPS32-NEXT: G_STORE [[ADD]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
+ ; MIPS32-NEXT: G_STORE [[COPY4]](s32), [[COPY2]](p0) :: (store (s32) into %ir.padd)
; MIPS32-NEXT: RetRA
%0:_(s32) = COPY $a0
%1:_(s32) = COPY $a1
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
index 136c03946f36..f518e9ec9e58 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ctpop.mir
@@ -10,29 +10,30 @@ body: |
; MIPS32-LABEL: name: ctpop_i32
; MIPS32: liveins: $a0
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: $v0 = COPY [[LSHR3]](s32)
- ; MIPS32: RetRA implicit $v0
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[LSHR3]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0
%0:_(s32) = COPY $a0
%1:_(s32) = G_CTPOP %0(s32)
$v0 = COPY %1(s32)
@@ -49,45 +50,46 @@ body: |
; MIPS32-LABEL: name: ctpop_i64
; MIPS32: liveins: $a0, $a1
- ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
- ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
- ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
- ; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
- ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
- ; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
- ; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
- ; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
- ; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
- ; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
- ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
- ; MIPS32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
- ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
- ; MIPS32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
- ; MIPS32: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
- ; MIPS32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; MIPS32: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
- ; MIPS32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; MIPS32: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
- ; MIPS32: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
- ; MIPS32: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
- ; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
- ; MIPS32: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
- ; MIPS32: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
- ; MIPS32: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
- ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
- ; MIPS32: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
- ; MIPS32: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
- ; MIPS32: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
- ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
- ; MIPS32: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
- ; MIPS32: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
- ; MIPS32: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; MIPS32: $v0 = COPY [[ADD4]](s32)
- ; MIPS32: $v1 = COPY [[C8]](s32)
- ; MIPS32: RetRA implicit $v0, implicit $v1
+ ; MIPS32-NEXT: {{ $}}
+ ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
+ ; MIPS32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
+ ; MIPS32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; MIPS32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; MIPS32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+ ; MIPS32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C1]]
+ ; MIPS32-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[AND]]
+ ; MIPS32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; MIPS32-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; MIPS32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 858993459
+ ; MIPS32-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C3]]
+ ; MIPS32-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C3]]
+ ; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
+ ; MIPS32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; MIPS32-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[LSHR2]], [[ADD]]
+ ; MIPS32-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
+ ; MIPS32-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
+ ; MIPS32-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
+ ; MIPS32-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; MIPS32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
+ ; MIPS32-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; MIPS32-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR4]], [[C1]]
+ ; MIPS32-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[AND4]]
+ ; MIPS32-NEXT: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
+ ; MIPS32-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR5]], [[C3]]
+ ; MIPS32-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[SUB1]], [[C3]]
+ ; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND5]], [[AND6]]
+ ; MIPS32-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C4]](s32)
+ ; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR6]], [[ADD2]]
+ ; MIPS32-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C5]]
+ ; MIPS32-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]]
+ ; MIPS32-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
+ ; MIPS32-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; MIPS32-NEXT: $v0 = COPY [[ADD4]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[C8]](s32)
+ ; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%1:_(s32) = COPY $a0
%2:_(s32) = COPY $a1
%0:_(s64) = G_MERGE_VALUES %1(s32), %2(s32)
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
index 3e7bcdc39d5d..a06bb6da45d2 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/cttz.mir
@@ -139,9 +139,11 @@ body: |
; MIPS32-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD1]], [[SUB1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[C]]
+ ; MIPS32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[C1]], [[C1]]
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ICMP1]]
- ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ADD3]](s32), [[ADD5]](s32)
+ ; MIPS32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; MIPS32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
; MIPS32-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C1]]
; MIPS32-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[C1]]
; MIPS32-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[XOR2]], [[XOR3]]
diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
index 7ad286b952cb..674d7b68bfae 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
@@ -275,8 +275,10 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY]]
; MIPS32-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[LOAD1]], [[COPY1]]
@@ -285,17 +287,22 @@ body: |
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[LOAD]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[MUL4]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[MUL5]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[MUL5]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[UMULH1]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
- ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[UMULH2]]
+ ; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[COPY8]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD8]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[ADD8]](s32)
; MIPS32-NEXT: [[ADD9:%[0-9]+]]:_(s32) = G_ADD [[ADD7]], [[ICMP5]]
- ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[ADD8]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD10:%[0-9]+]]:_(s32) = G_ADD [[COPY9]], [[ADD2]]
; MIPS32-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD10]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ADD10]](s32)
; MIPS32-NEXT: [[ADD11:%[0-9]+]]:_(s32) = G_ADD [[ADD9]], [[ICMP6]]
; MIPS32-NEXT: [[MUL6:%[0-9]+]]:_(s32) = G_MUL [[LOAD3]], [[COPY]]
; MIPS32-NEXT: [[MUL7:%[0-9]+]]:_(s32) = G_MUL [[LOAD2]], [[COPY1]]
@@ -312,8 +319,8 @@ body: |
; MIPS32-NEXT: [[ADD17:%[0-9]+]]:_(s32) = G_ADD [[ADD16]], [[UMULH5]]
; MIPS32-NEXT: [[ADD18:%[0-9]+]]:_(s32) = G_ADD [[ADD17]], [[ADD11]]
; MIPS32-NEXT: $v0 = COPY [[MUL]](s32)
- ; MIPS32-NEXT: $v1 = COPY [[ADD1]](s32)
- ; MIPS32-NEXT: $a0 = COPY [[ADD10]](s32)
+ ; MIPS32-NEXT: $v1 = COPY [[COPY5]](s32)
+ ; MIPS32-NEXT: $a0 = COPY [[COPY10]](s32)
; MIPS32-NEXT: $a1 = COPY [[ADD18]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1, implicit $a0, implicit $a1
%2:_(s32) = COPY $a0
@@ -359,23 +366,28 @@ body: |
; MIPS32-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY]]
; MIPS32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
; MIPS32-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL1]]
- ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; MIPS32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; MIPS32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH]]
; MIPS32-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; MIPS32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; MIPS32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; MIPS32-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY]]
; MIPS32-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[COPY2]], [[COPY1]]
; MIPS32-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL2]], [[UMULH1]]
; MIPS32-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[UMULH1]]
- ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; MIPS32-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[UMULH2]]
; MIPS32-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD4]](s32), [[UMULH2]]
+ ; MIPS32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
; MIPS32-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ICMP2]], [[ICMP3]]
- ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ADD2]]
+ ; MIPS32-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY7]], [[ADD2]]
; MIPS32-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD6]](s32), [[ADD2]]
+ ; MIPS32-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
; MIPS32-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ICMP4]]
; MIPS32-NEXT: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[COPY3]], [[COPY1]]
; MIPS32-NEXT: [[ADD8:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD7]]
- ; MIPS32-NEXT: $v0 = COPY [[ADD6]](s32)
+ ; MIPS32-NEXT: $v0 = COPY [[COPY8]](s32)
; MIPS32-NEXT: $v1 = COPY [[ADD8]](s32)
; MIPS32-NEXT: RetRA implicit $v0, implicit $v1
%2:_(s32) = COPY $a0
diff --git a/llvm/test/CodeGen/Mips/atomic-min-max.ll b/llvm/test/CodeGen/Mips/atomic-min-max.ll
index bc3643f3947a..a96581bdb39a 100644
--- a/llvm/test/CodeGen/Mips/atomic-min-max.ll
+++ b/llvm/test/CodeGen/Mips/atomic-min-max.ll
@@ -2146,6 +2146,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB6_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -2186,6 +2188,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB6_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -2225,6 +2229,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB6_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -2263,6 +2269,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB6_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -2300,6 +2308,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB6_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -2417,6 +2427,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB6_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -2456,6 +2468,8 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB6_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -2655,6 +2669,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS32-NEXT: $BB7_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -2696,6 +2712,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSEL-NEXT: $BB7_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -2735,6 +2753,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPSELR6-NEXT: $BB7_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -2773,6 +2793,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMEL-NEXT: $BB7_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -2810,6 +2832,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MMELR6-NEXT: $BB7_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -2927,6 +2951,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64EL-NEXT: .LBB7_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -2966,6 +2992,8 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
; MIPS64ELR6-NEXT: .LBB7_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
@@ -4244,6 +4272,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB10_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movn $3, $7, $5
@@ -4284,6 +4314,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB10_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movn $3, $7, $5
@@ -4323,6 +4355,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB10_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: seleqz $3, $2, $5
; MIPSELR6-NEXT: selnez $5, $7, $5
@@ -4361,6 +4395,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB10_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movn $3, $7, $5
@@ -4398,6 +4434,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB10_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: seleqz $3, $2, $5
; MMELR6-NEXT: selnez $5, $7, $5
@@ -4515,6 +4553,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB10_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movn $3, $7, $5
@@ -4554,6 +4594,8 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB10_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: seleqz $3, $2, $5
; MIPS64ELR6-NEXT: selnez $5, $7, $5
@@ -4753,6 +4795,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS32-NEXT: $BB11_1: # %entry
; MIPS32-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS32-NEXT: ll $2, 0($6)
+; MIPS32-NEXT: and $2, $2, $8
+; MIPS32-NEXT: and $7, $7, $8
; MIPS32-NEXT: sltu $5, $2, $7
; MIPS32-NEXT: move $3, $2
; MIPS32-NEXT: movz $3, $7, $5
@@ -4793,6 +4837,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSEL-NEXT: $BB11_1: # %entry
; MIPSEL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSEL-NEXT: ll $2, 0($6)
+; MIPSEL-NEXT: and $2, $2, $8
+; MIPSEL-NEXT: and $7, $7, $8
; MIPSEL-NEXT: sltu $5, $2, $7
; MIPSEL-NEXT: move $3, $2
; MIPSEL-NEXT: movz $3, $7, $5
@@ -4832,6 +4878,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPSELR6-NEXT: $BB11_1: # %entry
; MIPSELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPSELR6-NEXT: ll $2, 0($6)
+; MIPSELR6-NEXT: and $2, $2, $8
+; MIPSELR6-NEXT: and $7, $7, $8
; MIPSELR6-NEXT: sltu $5, $2, $7
; MIPSELR6-NEXT: selnez $3, $2, $5
; MIPSELR6-NEXT: seleqz $5, $7, $5
@@ -4870,6 +4918,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMEL-NEXT: $BB11_1: # %entry
; MMEL-NEXT: # =>This Inner Loop Header: Depth=1
; MMEL-NEXT: ll $2, 0($6)
+; MMEL-NEXT: and $2, $2, $8
+; MMEL-NEXT: and $7, $7, $8
; MMEL-NEXT: sltu $5, $2, $7
; MMEL-NEXT: or $3, $2, $zero
; MMEL-NEXT: movz $3, $7, $5
@@ -4907,6 +4957,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MMELR6-NEXT: $BB11_1: # %entry
; MMELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MMELR6-NEXT: ll $2, 0($6)
+; MMELR6-NEXT: and $2, $2, $8
+; MMELR6-NEXT: and $7, $7, $8
; MMELR6-NEXT: sltu $5, $2, $7
; MMELR6-NEXT: selnez $3, $2, $5
; MMELR6-NEXT: seleqz $5, $7, $5
@@ -5024,6 +5076,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64EL-NEXT: .LBB11_1: # %entry
; MIPS64EL-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EL-NEXT: ll $2, 0($6)
+; MIPS64EL-NEXT: and $2, $2, $8
+; MIPS64EL-NEXT: and $7, $7, $8
; MIPS64EL-NEXT: sltu $5, $2, $7
; MIPS64EL-NEXT: move $3, $2
; MIPS64EL-NEXT: movz $3, $7, $5
@@ -5063,6 +5117,8 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
; MIPS64ELR6-NEXT: .LBB11_1: # %entry
; MIPS64ELR6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64ELR6-NEXT: ll $2, 0($6)
+; MIPS64ELR6-NEXT: and $2, $2, $8
+; MIPS64ELR6-NEXT: and $7, $7, $8
; MIPS64ELR6-NEXT: sltu $5, $2, $7
; MIPS64ELR6-NEXT: selnez $3, $2, $5
; MIPS64ELR6-NEXT: seleqz $5, $7, $5
diff --git a/llvm/test/CodeGen/NVPTX/common-linkage.ll b/llvm/test/CodeGen/NVPTX/common-linkage.ll
new file mode 100644
index 000000000000..976074e12ba6
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/common-linkage.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefixes CHECK,PTX43
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefixes CHECK,PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
+
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
+@g = common addrspace(1) global i32 0, align 4
+
+; CHECK: .weak .const .align 4 .u32 c
+@c = common addrspace(4) global i32 0, align 4
+
+; CHECK: .weak .shared .align 4 .u32 s
+@s = common addrspace(3) global i32 0, align 4
+
+define i32 @f1() {
+ %1 = load i32, ptr addrspace(1) @g
+ ret i32 %1
+}
+
+define i32 @f4() {
+ %1 = load i32, ptr addrspace(4) @c
+ ret i32 %1
+}
+
+define i32 @f3() {
+ %1 = load i32, ptr addrspace(3) @s
+ ret i32 %1
+}
diff --git a/llvm/test/CodeGen/NVPTX/weak-global.ll b/llvm/test/CodeGen/NVPTX/weak-global.ll
index dd0160d1c0a6..c5467aad08a3 100644
--- a/llvm/test/CodeGen/NVPTX/weak-global.ll
+++ b/llvm/test/CodeGen/NVPTX/weak-global.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | FileCheck %s --check-prefix PTX43
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | FileCheck %s --check-prefix PTX50
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx43 | %ptxas-verify %}
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -mattr=+ptx50 | %ptxas-verify %}
-; CHECK: .weak .global .align 4 .u32 g
+; PTX43: .weak .global .align 4 .u32 g
+; PTX50: .common .global .align 4 .u32 g
@g = common addrspace(1) global i32 zeroinitializer
define i32 @func0() {
diff --git a/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
new file mode 100644
index 000000000000..276c6da3feae
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-overflow-toc-data.py
@@ -0,0 +1,59 @@
+# UNSUPPORTED: expensive_checks, debug
+
+# RUN: %python %s > %t.ll
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 < %t.ll | \
+# RUN: FileCheck --check-prefix=ASM64 %s
+
+# RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS32 %s
+
+# RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small -mcpu=pwr7 -mattr=-altivec -O0 \
+# RUN: -filetype=obj -o %t.o < %t.ll
+# RUN: llvm-objdump --no-print-imm-hex -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS64 %s
+
+numentries = 8195
+for x in range(0, numentries):
+ print("@a%d = global i32 0, align 4 #0" % (x))
+
+print("define void @foo() {")
+print("entry:")
+for x in range(0, numentries):
+ print("store i32 1, i32* @a%d, align 4" % (x))
+print("ret void")
+print("}")
+
+print('attributes #0 = { "toc-data" }')
+
+# 32-bit assembly check
+# ASM32: la 4, a0[TD](2)
+# ASM32: la 4, a1[TD](2)
+
+# ASM32: la 4, a8191[TD](2)
+# ASM32: la 4, a8192[TD](2)
+# ASM32: la 4, a8193[TD](2)
+
+# 64-bit assembly check
+# ASM64: la 4, a0[TD](2)
+# ASM64: la 4, a1[TD](2)
+
+# ASM64: la 4, a8191[TD](2)
+# ASM64: la 4, a8192[TD](2)
+# ASM64: la 4, a8193[TD](2)
+
+# DIS32: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS32: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS32: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS32: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS32: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS32: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
+
+# DIS64: fffc: 38 82 7f fc addi 4, 2, 32764
+# DIS64: 0000fffe: R_TOC (idx: [[#NFA+16391]]) a8191[TD]
+# DIS64: 10004: 38 82 80 00 addi 4, 2, -32768
+# DIS64: 00010006: R_TOC (idx: [[#NFA+16393]]) a8192[TD]
+# DIS64: 1000c: 38 82 80 04 addi 4, 2, -32764
+# DIS64: 0001000e: R_TOC (idx: [[#NFA+16395]]) a8193[TD]
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
new file mode 100644
index 000000000000..eb16bae67150
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-largeaccess.ll
@@ -0,0 +1,632 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+
+; Test disassembly of object.
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -xcoff-traceback-table=false \
+; RUN: --code-model=large -filetype=obj -o %t.o < %s
+; RUN: llvm-objdump -D -r --symbol-description %t.o | FileCheck -D#NFA=2 --check-prefix=DIS %s
+
+@ElementIntTLSv1 = thread_local(localdynamic) global [8187 x i32] zeroinitializer, align 4 ; Within 32K
+@ElementIntTLS2 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS3 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS4 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLS5 = thread_local(localdynamic) global [4000 x i32] zeroinitializer, align 4
+@ElementIntTLSv2 = thread_local(localdynamic) global [9000 x i32] zeroinitializer, align 4 ; Beyond 32K
+
+@ElementLongTLS6 = external thread_local(localdynamic) global [60 x i64], align 8
+@ElementLongTLS2 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8 ; Within 32K
+@MyTLSGDVar = thread_local global [800 x i64] zeroinitializer, align 8
+@ElementLongTLS3 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS4 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS5 = thread_local(localdynamic) global [3000 x i64] zeroinitializer, align 8
+@ElementLongTLS = thread_local(localdynamic) local_unnamed_addr global [7800 x i64] zeroinitializer, align 8 ; Beyond 32K
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16.
+define signext i32 @test1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C5(r2) # target-flags(ppc-tlsld) @ElementIntTLSv1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv1)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [8187 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add6 = add i32 %add, %load2
+ %add8 = add i32 %add6, %load3
+ %add10 = add i32 %add8, %load4
+ %add12 = add i32 %add10, %load5
+ ret i32 %add12
+}
+
+; All accesses use a "faster" local-dynamic sequence directly off the module handle.
+; Exercise PPCXCOFFObjectWriter::getRelocTypeAndSignSize/fixup_ppc_half16ds.
+define i64 @test2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @ElementLongTLS6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 212
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 424(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C7(r2) # target-flags(ppc-tlsld) @ElementLongTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C8(r2) # target-flags(ppc-tlsgdm) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsgd) @MyTLSGDVar
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C10(r2) # target-flags(ppc-tlsld) @ElementLongTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @ElementLongTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 100
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r4, r6, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r3, 6800(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C12(r2) # target-flags(ppc-tlsld) @ElementLongTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 212
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C6@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 424(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 203
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 1200(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C8@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_addr[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 44
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 440(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C10@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 2000(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 100
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C11@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 6800(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 882
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C12@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r4, 8400(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 1191
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS6)
+ %arrayidx = getelementptr inbounds [60 x i64], ptr %tls1, i64 0, i64 53
+ store i64 212, ptr %arrayidx, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS2)
+ %arrayidx1 = getelementptr inbounds [3000 x i64], ptr %tls2, i64 0, i64 150
+ store i64 203, ptr %arrayidx1, align 8
+ %tls3 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @MyTLSGDVar)
+ %arrayidx2 = getelementptr inbounds [800 x i64], ptr %tls3, i64 0, i64 55
+ store i64 44, ptr %arrayidx2, align 8
+ %tls4 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS3)
+ %arrayidx3 = getelementptr inbounds [3000 x i64], ptr %tls4, i64 0, i64 250
+ store i64 6, ptr %arrayidx3, align 8
+ %tls5 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS4)
+ %arrayidx4 = getelementptr inbounds [3000 x i64], ptr %tls5, i64 0, i64 850
+ store i64 100, ptr %arrayidx4, align 8
+ %tls6 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @ElementLongTLS5)
+ %arrayidx5 = getelementptr inbounds [3000 x i64], ptr %tls6, i64 0, i64 1050
+ store i64 882, ptr %arrayidx5, align 8
+ %load1 = load i64, ptr %arrayidx1, align 8
+ %load2 = load i64, ptr %arrayidx3, align 8
+ %load3 = load i64, ptr %arrayidx4, align 8
+ %add = add i64 %load1, 882
+ %add9 = add i64 %add, %load2
+ %add11 = add i64 %add9, %load3
+ ret i64 %add11
+}
+
+; Example of one access using the regular local-dynamic access from the TOC.
+define signext i32 @test3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r6, L..C1(r2) # target-flags(ppc-tlsld) @ElementIntTLS2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r7, L..C2(r2) # target-flags(ppc-tlsld) @ElementIntTLS3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r8, L..C3(r2) # target-flags(ppc-tlsld) @ElementIntTLS4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r9, L..C4(r2) # target-flags(ppc-tlsld) @ElementIntTLS5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r5, L..C13(r2) # target-flags(ppc-tlsld) @ElementIntTLSv2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: test3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r8, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r9, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r7, L..C2@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r8, L..C3@l(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r9, L..C4@l(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r5, L..C13@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r6, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r7, r3, r7
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r8, r3, r8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r9, r3, r9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r6, r3, r6
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stwux r4, r3, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r4, 4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 24(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 2
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 320(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 324(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 88
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r4, 328(r8)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stw r3, 332(r9)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: li r3, 102
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+entry:
+ %tls1 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLSv2)
+ store i32 1, ptr %tls1, align 4
+ %arrayidx1 = getelementptr inbounds [9000 x i32], ptr %tls1, i64 0, i64 6
+ store i32 4, ptr %arrayidx1, align 4
+ %tls2 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS2)
+ %arrayidx2 = getelementptr inbounds [4000 x i32], ptr %tls2, i64 0, i64 80
+ store i32 2, ptr %arrayidx2, align 4
+ %tls3 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS3)
+ %arrayidx3 = getelementptr inbounds [4000 x i32], ptr %tls3, i64 0, i64 81
+ store i32 3, ptr %arrayidx3, align 4
+ %tls4 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS4)
+ %arrayidx4 = getelementptr inbounds [4000 x i32], ptr %tls4, i64 0, i64 82
+ store i32 4, ptr %arrayidx4, align 4
+ %tls5 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @ElementIntTLS5)
+ %arrayidx5 = getelementptr inbounds [4000 x i32], ptr %tls5, i64 0, i64 83
+ store i32 88, ptr %arrayidx5, align 4
+ %load1 = load i32, ptr %tls1, align 4
+ %load2 = load i32, ptr %arrayidx1, align 4
+ %load3 = load i32, ptr %arrayidx2, align 4
+ %load4 = load i32, ptr %arrayidx3, align 4
+ %load5 = load i32, ptr %arrayidx4, align 4
+ %add = add i32 %load1, 88
+ %add9 = add i32 %add, %load2
+ %add11 = add i32 %add9, %load3
+ %add13 = add i32 %add11, %load4
+ %add15 = add i32 %add13, %load5
+ ret i32 %add15
+}
+
+; DIS: file format aix5coff64-rs6000
+; DIS: Disassembly of section .text:
+; DIS: 0000000000000000 (idx: [[#NFA+9]]) .test1:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 8(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+25]]) ElementIntTLSv1[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000090 (idx: [[#NFA+11]]) .test2:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 212
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mr 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 48(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+35]]) ElementLongTLS6[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 424(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 203
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 56(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+37]]) ElementLongTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 1200(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 4, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 64(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+39]]) .MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 4, 72(4)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+41]]) MyTLSGDVar[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+3]]) .__tls_get_addr[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 44
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 440(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 80(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+43]]) ElementLongTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 2000(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 100
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 88(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+45]]) ElementLongTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 6800(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 882
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 96(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+47]]) ElementLongTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 3, 6, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 4, 8400(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 1191
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: 0000000000000140 (idx: [[#NFA+13]]) .test3:
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mflr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stdu 1, -48(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 3, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} std 0, 64(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 7, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 3, 0(3)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 8, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 9, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 7, 16(7)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+27]]) ElementIntTLS3[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 8, 24(8)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+29]]) ElementIntTLS4[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 9, 32(9)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+31]]) ElementIntTLS5[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} bla 0x0
+; DIS-NEXT: {{0*}}[[#ADDR]]: R_RBA (idx: [[#NFA+1]]) .__tls_get_mod[PR]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 5, 104(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+49]]) ElementIntTLSv2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addis 6, 2, 0
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCU (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 1
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 6, 40(6)
+; DIS-NEXT: {{0*}}[[#ADDR + 2]]: R_TOCL (idx: [[#NFA+33]]) ElementIntTLS2[TE]
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 7, 3, 7
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 8, 3, 8
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 9, 3, 9
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} add 6, 3, 6
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stwux 4, 3, 5
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 4, 4
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 24(3)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 2
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 320(6)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 3
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 324(7)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 88
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 4, 328(8)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} stw 3, 332(9)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} li 3, 102
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} addi 1, 1, 48
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} ld 0, 16(1)
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} mtlr 0
+; DIS-NEXT: [[#%x, ADDR:]]: {{.*}} blr
+
+; DIS: Disassembly of section .data:
+
+; DIS: 00000000000001d0 (idx: 17) test1[DS]:
+; DIS-NEXT: 1d0: 00 00 00 00
+; DIS-NEXT: 00000000000001d0: R_POS (idx: [[#NFA+9]]) .test1
+; DIS-NEXT: 1d4: 00 00 00 00
+; DIS-NEXT: 1d8: 00 00 00 00
+; DIS-NEXT: 00000000000001d8: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1dc: 00 00 02 18
+
+; DIS: 00000000000001e8 (idx: 19) test2[DS]:
+; DIS-NEXT: 1e8: 00 00 00 00
+; DIS-NEXT: 00000000000001e8: R_POS (idx: [[#NFA+11]]) .test2
+; DIS-NEXT: 1ec: 00 00 00 90
+; DIS-NEXT: 1f0: 00 00 00 00
+; DIS-NEXT: 00000000000001f0: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 1f4: 00 00 02 18
+
+; DIS: 0000000000000200 (idx: 21) test3[DS]:
+; DIS-NEXT: 200: 00 00 00 00
+; DIS-NEXT: 0000000000000200: R_POS (idx: [[#NFA+13]]) .test3
+; DIS-NEXT: 204: 00 00 01 40
+; DIS-NEXT: 208: 00 00 00 00
+; DIS-NEXT: 0000000000000208: R_POS (idx: [[#NFA+21]]) TOC[TC0]
+; DIS-NEXT: 20c: 00 00 02 18
+
+; DIS: 0000000000000218 (idx: 25) _$TLSML[TC]:
+; DIS-NEXT: 218: 00 00 00 00
+; DIS-NEXT: 0000000000000218: R_TLSML (idx: [[#NFA+23]]) _$TLSML[TC]
+; DIS-NEXT: 21c: 00 00 00 00
+
+; DIS: 0000000000000220 (idx: 27) ElementIntTLSv1[TE]:
+; DIS-NEXT: 220: 00 00 00 00
+; DIS-NEXT: 0000000000000220: R_TLS_LD (idx: [[#NFA+51]]) ElementIntTLSv1[TL]
+; DIS-NEXT: 224: 00 00 00 00
+
+; DIS: 0000000000000228 (idx: 29) ElementIntTLS3[TE]:
+; DIS-NEXT: 228: 00 00 00 00
+; DIS-NEXT: 0000000000000228: R_TLS_LD (idx: [[#NFA+55]]) ElementIntTLS3[TL]
+; DIS-NEXT: 22c: 00 00 be 6c
+
+; DIS: 0000000000000230 (idx: 31) ElementIntTLS4[TE]:
+; DIS-NEXT: 230: 00 00 00 00
+; DIS-NEXT: 0000000000000230: R_TLS_LD (idx: [[#NFA+57]]) ElementIntTLS4[TL]
+; DIS-NEXT: 234: 00 00 fc ec
+
+; DIS: 0000000000000238 (idx: 33) ElementIntTLS5[TE]:
+; DIS-NEXT: 238: 00 00 00 00
+; DIS-NEXT: 0000000000000238: R_TLS_LD (idx: [[#NFA+59]]) ElementIntTLS5[TL]
+; DIS-NEXT: 23c: 00 01 3b 6c
+
+; DIS: 0000000000000240 (idx: 35) ElementIntTLS2[TE]:
+; DIS-NEXT: 240: 00 00 00 00
+; DIS-NEXT: 0000000000000240: R_TLS_LD (idx: [[#NFA+53]]) ElementIntTLS2[TL]
+; DIS-NEXT: 244: 00 00 7f ec
+
+; DIS: 0000000000000248 (idx: 37) ElementLongTLS6[TE]:
+; DIS-NEXT: 248: 00 00 00 00
+; DIS-NEXT: 0000000000000248: R_TLS_LD (idx: [[#NFA+5]]) ElementLongTLS6[UL]
+; DIS-NEXT: 24c: 00 00 00 00
+
+; DIS: 0000000000000250 (idx: 39) ElementLongTLS2[TE]:
+; DIS-NEXT: 250: 00 00 00 00
+; DIS-NEXT: 0000000000000250: R_TLS_LD (idx: [[#NFA+63]]) ElementLongTLS2[TL]
+; DIS-NEXT: 254: 00 02 06 90
+
+; DIS: 0000000000000258 (idx: 41) .MyTLSGDVar[TE]:
+; DIS-NEXT: 258: 00 00 00 00
+; DIS-NEXT: 0000000000000258: R_TLSM (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 25c: 00 00 00 00
+
+; DIS: 0000000000000260 (idx: 43) MyTLSGDVar[TE]:
+; DIS-NEXT: 260: 00 00 00 00
+; DIS-NEXT: 0000000000000260: R_TLS (idx: [[#NFA+65]]) MyTLSGDVar[TL]
+; DIS-NEXT: 264: 00 02 64 50
+
+; DIS: 0000000000000268 (idx: 45) ElementLongTLS3[TE]:
+; DIS-NEXT: 268: 00 00 00 00
+; DIS-NEXT: 0000000000000268: R_TLS_LD (idx: [[#NFA+67]]) ElementLongTLS3[TL]
+; DIS-NEXT: 26c: 00 02 7d 50
+
+; DIS: 0000000000000270 (idx: 47) ElementLongTLS4[TE]:
+; DIS-NEXT: 270: 00 00 00 00
+; DIS-NEXT: 0000000000000270: R_TLS_LD (idx: [[#NFA+69]]) ElementLongTLS4[TL]
+; DIS-NEXT: 274: 00 02 db 10
+
+; DIS: 0000000000000278 (idx: 49) ElementLongTLS5[TE]:
+; DIS-NEXT: 278: 00 00 00 00
+; DIS-NEXT: 0000000000000278: R_TLS_LD (idx: [[#NFA+71]]) ElementLongTLS5[TL]
+; DIS-NEXT: 27c: 00 03 38 d0
+
+; DIS: 0000000000000280 (idx: 51) ElementIntTLSv2[TE]:
+; DIS-NEXT: 280: 00 00 00 00
+; DIS-NEXT: 0000000000000280: R_TLS_LD (idx: [[#NFA+61]]) ElementIntTLSv2[TL]
+; DIS-NEXT: 284: 00 01 79 ec
+
+; DIS: Disassembly of section .tdata:
+; DIS: 0000000000000000 (idx: [[#NFA+51]]) ElementIntTLSv1[TL]:
+; DIS: 0000000000007fec (idx: [[#NFA+53]]) ElementIntTLS2[TL]:
+; DIS: 000000000000be6c (idx: [[#NFA+55]]) ElementIntTLS3[TL]:
+; DIS: 000000000000fcec (idx: [[#NFA+57]]) ElementIntTLS4[TL]:
+; DIS: 0000000000013b6c (idx: [[#NFA+59]]) ElementIntTLS5[TL]:
+; DIS: 00000000000179ec (idx: [[#NFA+61]]) ElementIntTLSv2[TL]:
+; DIS: 0000000000020690 (idx: [[#NFA+63]]) ElementLongTLS2[TL]:
+; DIS: 0000000000026450 (idx: [[#NFA+65]]) MyTLSGDVar[TL]:
+; DIS: 0000000000027d50 (idx: [[#NFA+67]]) ElementLongTLS3[TL]:
+; DIS: 000000000002db10 (idx: [[#NFA+69]]) ElementLongTLS4[TL]:
+; DIS: 00000000000338d0 (idx: [[#NFA+71]]) ElementLongTLS5[TL]:
+; DIS: 0000000000039690 (idx: [[#NFA+73]]) ElementLongTLS[TL]:
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
new file mode 100644
index 000000000000..d996d86a23d8
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-local-dynamic-tls-types.ll
@@ -0,0 +1,1066 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefix=SMALL-LOCAL-DYNAMIC-SMALLCM64-O0
+; RUN: llc -O0 -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s \
+; RUN: --check-prefix=SMALL-LOCAL-DYNAMIC-LARGECM64-O0
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull) #1
+@tlv_int_init = local_unnamed_addr global i32 87, align 4
+
+@tlv_char = thread_local(localdynamic) global i8 1, align 1
+@tlv_short = thread_local(localdynamic) global i8 1, align 2
+@tlv_int = thread_local(localdynamic) global i32 1, align 4
+@internal_tlv_int = internal thread_local(localdynamic) global i32 1, align 4
+@tlv_long = thread_local(localdynamic) global i64 1, align 8
+@internal_tlv_long = internal thread_local(localdynamic) global i64 1, align 8
+@tlv_float = thread_local(localdynamic) global float 1.000000e+00, align 4
+@internal_tlv_double = internal thread_local(localdynamic) global double 1.000000e+00, align 8
+
+%struct.anon = type { i32 }
+@ThreadLocalStruct = thread_local(localdynamic) global %struct.anon zeroinitializer, align 1
+@a = thread_local(localdynamic) global [87 x i32] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() local_unnamed_addr {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C1@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C1(r2) # target-flags(ppc-tlsld) @a
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: AddrTest1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C0@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 12
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @a)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tlv_addr, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define signext i32 @testUnaligned() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C2@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C2(r2) # target-flags(ppc-tlsld) @ThreadLocalStruct
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testUnaligned:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C2@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C2@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @ThreadLocalStruct)
+ %x = getelementptr inbounds %struct.anon, ptr %tlv_addr, i32 0, i32 0
+ %value = load i32, ptr %x, align 1
+ ret i32 %value
+}
+
+define void @testChar(i8 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C3@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stbx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C3(r2) # target-flags(ppc-tlsld) @tlv_char
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testChar:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C3@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C3@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stb r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @tlv_char)
+ store i8 %x, ptr %tlv_addr, align 1
+ ret void
+}
+
+define void @testShort(i16 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mr r6, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r7, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C4@l(r7)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: sthx r6, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r5, L..C4(r2) # target-flags(ppc-tlsld) @tlv_short
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testShort:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stw r3, 60(r1) # 4-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C4@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mr r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 60(r1) # 4-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r5, L..C4@l(r5)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r4, r5
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: sth r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @tlv_short)
+ store i16 %x, ptr %tlv_addr, align 2
+ ret void
+}
+
+define signext i32 @testInt1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C5@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwax r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C5(r2) # target-flags(ppc-tlsld) @tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C5@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C5@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_int)
+ %value = load i32, ptr %tlv_addr, align 4
+ ret i32 %value
+}
+
+define signext i32 @testInt2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C6@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwzx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r4, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C7@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r4, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: add r3, r4, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C6(r2) # target-flags(ppc-tlsld) @internal_tlv_int
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C7(r2) # @tlv_int_init
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testInt2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C6@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C6@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r4, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C7@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C7@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: extsw r3, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @internal_tlv_int)
+ %tlv_val = load i32, ptr %tlv_addr, align 4
+ %global_val = load i32, ptr @tlv_int_init, align 4
+ %sum = add nsw i32 %global_val, %tlv_val
+ ret i32 %sum
+}
+
+define signext i64 @testLong1() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 4
+ ret i64 %value
+}
+
+define void @testLong2(i64 noundef signext %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C9@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r5, r5, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdx r5, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C9(r2) # target-flags(ppc-tlsld) @internal_tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C9@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C9@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r4, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r3, 9
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %add = add nsw i64 %value, 9
+ store i64 %add, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testLong3() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C8@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ldx r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C8(r2) # target-flags(ppc-tlsld) @tlv_long
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testLong3:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C8@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C8@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: # kill: def $r3 killed $r3 killed $x3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @tlv_long)
+ %value = load i64, ptr %tlv_addr, align 8
+ %conv = trunc i64 %value to i32
+ ret i32 %conv
+}
+
+define void @testFloat1(float noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v2, 1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: vspltisw v3, 8
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs0, vs34
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f1, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xvcvsxwdp vs1, vs35
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C11(r2) # %const.1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C12(r2) # %const.0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C11@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r4, L..C12@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C12@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f1, 0(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fadds f0, f0, f1
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %inc = fadd float %value, 1.000000e+00
+ %add = fadd float %inc, 8.000000e+00
+ store float %add, ptr %tlv_addr, align 4
+ ret void
+}
+
+define i32 @testFloat2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C10@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfsx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C10(r2) # target-flags(ppc-tlsld) @tlv_float
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testFloat2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C10@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 48(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 48(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C10@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfs f0, 0(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: fctiwz f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfd f0, 56(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwa r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @tlv_float)
+ %value = load float, ptr %tlv_addr, align 4
+ %conv = fptosi float %value to i32
+ ret i32 %conv
+}
+
+define void @testDouble1(double noundef %x) {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfdx f1, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -48(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 48
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble1:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: add r3, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stxsdx f1, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ store double %x, ptr %tlv_addr, align 8
+ ret void
+}
+
+define i32 @testDouble2() {
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r4, L..C11(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addis r6, L..C11@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r4, L..C11@l(r6)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r3, r1, 60
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: lwz r3, 60(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stdu r1, -64(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: std r0, 80(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tlsldm) @"_$TLSML"
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r4, L..C13(r2) # target-flags(ppc-tlsld) @internal_tlv_double
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r3, r1, 52
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: lwz r3, 52(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: addi r1, r1, 64
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-SMALLCM64-O0-NEXT: blr
+;
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-LABEL: testDouble2:
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0: # %bb.0: # %entry
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mflr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stdu r1, -80(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r0, 96(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C13@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: std r3, 56(r1) # 8-byte Folded Spill
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addis r3, L..C1@u(r2)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r3, L..C1@l(r3)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: bla .__tls_get_mod[PR]
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, 56(r1) # 8-byte Folded Reload
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r4, L..C13@l(r4)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lfdx f0, r3, r4
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: xscvdpsxws f0, f0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r3, r1, 68
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: stfiwx f0, 0, r3
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: lwz r3, 68(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: clrldi r3, r3, 32
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: addi r1, r1, 80
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: ld r0, 16(r1)
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: mtlr r0
+; SMALL-LOCAL-DYNAMIC-LARGECM64-O0-NEXT: blr
+entry:
+ %tlv_addr = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @internal_tlv_double)
+ %value = load double, ptr %tlv_addr, align 8
+ %conv = fptosi double %value to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
new file mode 100644
index 000000000000..38b35dc6c81c
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-funcattr.ll
@@ -0,0 +1,105 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,CHECK-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,CHECK-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; All accesses use a "faster" local-exec sequence directly off the thread pointer,
+; except for mySmallTLS, as this variable is over the 32KB size limit.
+define i64 @StoreLargeAccess1() #1 {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r4, 0
+; CHECK-SMALLCM64-NEXT: li r5, 23
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+; Since this function does not have the 'aix-small-local-exec-tls` attribute,
+; only some local-exec variables should have the small-local-exec TLS access
+; sequence (as opposed to all of them).
+define i64 @StoreLargeAccess2() {
+; COMMONCM-LABEL: StoreLargeAccess2:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; CHECK-SMALLCM64: ld r5, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; CHECK-SMALLCM64-NEXT: li r3, 0
+; CHECK-SMALLCM64-NEXT: li r4, 23
+; CHECK-SMALLCM64-NEXT: ori r3, r3, 53328
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: stdx r4, r5, r3
+; CHECK-SMALLCM64-NEXT: ld r5, L..C1(r2) # target-flags(ppc-tprel) @mySmallTLS3
+; CHECK-SMALLCM64-NEXT: li r3, 55
+; CHECK-SMALLCM64-NEXT: li r4, 64
+; CHECK-SMALLCM64-NEXT: std r3, mySmallTLS2[TL]@le+696(r13)
+; CHECK-SMALLCM64-NEXT: li r3, 142
+; CHECK-SMALLCM64-NEXT: add r5, r13, r5
+; CHECK-SMALLCM64-NEXT: std r4, 20000(r5)
+; CHECK-LARGECM64: addis r3, L..C0@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 0
+; CHECK-LARGECM64-NEXT: li r5, 23
+; CHECK-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; CHECK-LARGECM64-NEXT: ori r4, r4, 53328
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: stdx r5, r3, r4
+; CHECK-LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; CHECK-LARGECM64-NEXT: li r4, 55
+; CHECK-LARGECM64-NEXT: li r5, 64
+; CHECK-LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; CHECK-LARGECM64-NEXT: std r4, mySmallTLS2[TL]@le+696(r13)
+; CHECK-LARGECM64-NEXT: add r3, r13, r3
+; CHECK-LARGECM64-NEXT: std r5, 20000(r3)
+; CHECK-LARGECM64-NEXT: li r3, 142
+; COMMONCM-NEXT: blr
+;
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
+attributes #1 = { "target-features"="+aix-small-local-exec-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
new file mode 100644
index 000000000000..c8537fba6a3c
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-loadaddr.ll
@@ -0,0 +1,222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=-aix-small-local-exec-tls \
+; RUN: < %s | FileCheck %s --check-prefixes=COMMONCM,SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=-aix-small-local-exec-tls < %s | \
+; RUN: FileCheck %s --check-prefixes=COMMONCM,LARGECM64
+
+; Test that the 'aix-small-tls' global variable attribute generates the
+; optimized small-local-exec TLS sequence. Global variables without this
+; attribute should still generate a TOC-based local-exec access sequence.
+
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+@a = thread_local(localexec) global [87 x i8] zeroinitializer, align 1 #0
+@a_noattr = thread_local(localexec) global [87 x i8] zeroinitializer, align 1
+@b = thread_local(localexec) global [87 x i16] zeroinitializer, align 2 #0
+@b_noattr = thread_local(localexec) global [87 x i16] zeroinitializer, align 2
+@c = thread_local(localexec) global [87 x i32] zeroinitializer, align 4 #0
+@c_noattr = thread_local(localexec) global [87 x i32] zeroinitializer, align 4
+@d = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+@d_noattr = thread_local(localexec) global [87 x i64] zeroinitializer, align 8 #0
+
+@e = thread_local(localexec) global [87 x double] zeroinitializer, align 8 #0
+@e_noattr = thread_local(localexec) global [87 x double] zeroinitializer, align 8
+@f = thread_local(localexec) global [87 x float] zeroinitializer, align 4 #0
+@f_noattr = thread_local(localexec) global [87 x float] zeroinitializer, align 4
+
+define nonnull ptr @AddrTest1() {
+; COMMONCM-LABEL: AddrTest1:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, a[TL]@le+1
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest1_NoAttr() {
+; SMALLCM64-LABEL: AddrTest1_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C0(r2) # target-flags(ppc-tprel) @a_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 1
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest1_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C0@u(r2)
+; LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 1
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @a_noattr)
+ %arrayidx = getelementptr inbounds [87 x i8], ptr %tls0, i64 0, i64 1
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2() {
+; COMMONCM-LABEL: AddrTest2:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, b[TL]@le+4
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest2_NoAttr() {
+; SMALLCM64-LABEL: AddrTest2_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C1(r2) # target-flags(ppc-tprel) @b_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 4
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest2_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C1@u(r2)
+; LARGECM64-NEXT: ld r3, L..C1@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 4
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 2 ptr @llvm.threadlocal.address.p0(ptr align 2 @b_noattr)
+ %arrayidx = getelementptr inbounds [87 x i16], ptr %tls0, i64 0, i64 2
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3() {
+; COMMONCM-LABEL: AddrTest3:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+12
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest3_NoAttr() {
+; SMALLCM64-LABEL: AddrTest3_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 12
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest3_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 12
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i32], ptr %tls0, i64 0, i64 3
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4() {
+; COMMONCM-LABEL: AddrTest4:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, c[TL]@le+56
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest4_NoAttr() {
+; SMALLCM64-LABEL: AddrTest4_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C2(r2) # target-flags(ppc-tprel) @c_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 56
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest4_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C2@u(r2)
+; LARGECM64-NEXT: ld r3, L..C2@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 56
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @c_noattr)
+ %arrayidx = getelementptr inbounds [87 x i64], ptr %tls0, i64 0, i64 7
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5() {
+; COMMONCM-LABEL: AddrTest5:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, e[TL]@le+48
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest5_NoAttr() {
+; SMALLCM64-LABEL: AddrTest5_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C3(r2) # target-flags(ppc-tprel) @e_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 48
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest5_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C3@u(r2)
+; LARGECM64-NEXT: ld r3, L..C3@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 48
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @e_noattr)
+ %arrayidx = getelementptr inbounds [87 x double], ptr %tls0, i64 0, i64 6
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6() {
+; COMMONCM-LABEL: AddrTest6:
+; COMMONCM: # %bb.0: # %entry
+; COMMONCM-NEXT: addi r3, r13, f[TL]@le+16
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+define nonnull ptr @AddrTest6_NoAttr() {
+; SMALLCM64-LABEL: AddrTest6_NoAttr:
+; SMALLCM64: # %bb.0: # %entry
+; SMALLCM64-NEXT: ld r3, L..C4(r2) # target-flags(ppc-tprel) @f_noattr
+; SMALLCM64-NEXT: add r3, r13, r3
+; SMALLCM64-NEXT: addi r3, r3, 16
+; SMALLCM64-NEXT: blr
+;
+; LARGECM64-LABEL: AddrTest6_NoAttr:
+; LARGECM64: # %bb.0: # %entry
+; LARGECM64-NEXT: addis r3, L..C4@u(r2)
+; LARGECM64-NEXT: ld r3, L..C4@l(r3)
+; LARGECM64-NEXT: add r3, r13, r3
+; LARGECM64-NEXT: addi r3, r3, 16
+; LARGECM64-NEXT: blr
+entry:
+ %tls0 = tail call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @f_noattr)
+ %arrayidx = getelementptr inbounds [87 x float], ptr %tls0, i64 0, i64 4
+ ret ptr %arrayidx
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
new file mode 100644
index 000000000000..1e4a3b9bcc47
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-small-tls-globalvarattr-targetattr.ll
@@ -0,0 +1,53 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff -mattr=+aix-small-local-exec-tls < %s \
+; RUN: | FileCheck %s --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-SMALLCM64
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-asm-full-reg-names \
+; RUN: -mtriple powerpc64-ibm-aix-xcoff --code-model=large \
+; RUN: -mattr=+aix-small-local-exec-tls < %s | FileCheck %s \
+; RUN: --check-prefixes=COMMONCM,SMALL-LOCAL-EXEC-LARGECM64
+
+@mySmallTLS = thread_local(localexec) global [7800 x i64] zeroinitializer, align 8 #0
+@mySmallTLS2 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8 #0
+@mySmallTLS3 = thread_local(localexec) global [3000 x i64] zeroinitializer, align 8
+declare nonnull ptr @llvm.threadlocal.address.p0(ptr nonnull)
+
+; Although some global variables are annotated with 'aix-small-tls', because the
+; aix-small-local-exec-tls target attribute is turned on, all accesses will use
+; a "faster" local-exec sequence directly off the thread pointer.
+define i64 @StoreLargeAccess1() {
+; COMMONCM-LABEL: StoreLargeAccess1:
+; COMMONCM-NEXT: # %bb.0: # %entry
+; SMALL-LOCAL-EXEC-SMALLCM64: ld r3, L..C0(r2) # target-flags(ppc-tprel) @mySmallTLS
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-SMALLCM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64: addis r3, L..C0@u(r2)
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r4, 0
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: li r5, 23
+; SMALL-LOCAL-EXEC-LARGECM64-NEXT: ld r3, L..C0@l(r3)
+; COMMONCM: ori r4, r4, 53328
+; COMMONCM-NEXT: add r3, r13, r3
+; COMMONCM-NEXT: stdx r5, r3, r4
+; COMMONCM-NEXT: li r3, 55
+; COMMONCM-NEXT: li r4, 64
+; COMMONCM-NEXT: std r3, (mySmallTLS2[TL]@le+696)-65536(r13)
+; COMMONCM-NEXT: li r3, 142
+; COMMONCM-NEXT: std r4, (mySmallTLS3[TL]@le+20000)-131072(r13)
+; COMMONCM-NEXT: blr
+entry:
+ %tls0 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS)
+ %arrayidx = getelementptr inbounds i8, ptr %tls0, i32 53328
+ store i64 23, ptr %arrayidx, align 8
+ %tls1 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS2)
+ %arrayidx1 = getelementptr inbounds i8, ptr %tls1, i32 696
+ store i64 55, ptr %arrayidx1, align 8
+ %tls2 = tail call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @mySmallTLS3)
+ %arrayidx2 = getelementptr inbounds i8, ptr %tls2, i32 20000
+ store i64 64, ptr %arrayidx2, align 8
+ %load1 = load i64, ptr %arrayidx, align 8
+ %load2 = load i64, ptr %arrayidx1, align 8
+ %add1 = add i64 %load1, 64
+ %add2 = add i64 %add1, %load2
+ ret i64 %add2
+}
+
+attributes #0 = { "aix-small-tls" }
diff --git a/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
new file mode 100644
index 000000000000..4e94228404d6
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/aix-xcoff-funcsect-explicitsect.ll
@@ -0,0 +1,142 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
+; RUN: -xcoff-traceback-table=false -filetype=obj -function-sections -o %t.o < %s
+; RUN: llvm-readobj -s %t.o | FileCheck %s
+
+define dso_local signext i32 @foo1() section "sect" {
+entry:
+ ret i32 1
+}
+
+define dso_local signext i32 @foo2() section "sect2" {
+entry:
+ ret i32 2
+}
+
+define dso_local signext i32 @foo3() section "sect2" {
+entry:
+ ret i32 3
+}
+
+define dso_local signext i32 @foo4() {
+entry:
+ ret i32 4
+}
+
+; CHECK: Symbol {{[{][[:space:]] *}}Index: [[#INDX:]]{{[[:space:]] *}}Name: sect
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+1]]
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+2]]
+; CHECK-NEXT: Name: .foo1
+; CHECK-NEXT: Value (RelocatableAddress): 0x0
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+3]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+4]]
+; CHECK-NEXT: Name: sect2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_HIDEXT (0x6B)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+5]]
+; CHECK-NEXT: SectionLen: 24
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+6]]
+; CHECK-NEXT: Name: .foo2
+; CHECK-NEXT: Value (RelocatableAddress): 0x20
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+7]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+8]]
+; CHECK-NEXT: Name: .foo3
+; CHECK-NEXT: Value (RelocatableAddress): 0x30
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: [[#INDX+9]]
+; CHECK-NEXT: ContainingCsectSymbolIndex: [[#INDX+4]]
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 0
+; CHECK-NEXT: SymbolType: XTY_LD (0x2)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
+; CHECK-NEXT: Symbol {
+; CHECK-NEXT: Index: [[#INDX+10]]
+; CHECK-NEXT: Name: .foo4
+; CHECK-NEXT: Value (RelocatableAddress): 0x40
+; CHECK-NEXT: Section: .text
+; CHECK-NEXT: Type: 0x0
+; CHECK-NEXT: StorageClass: C_EXT (0x2)
+; CHECK-NEXT: NumberOfAuxEntries: 1
+; CHECK-NEXT: CSECT Auxiliary Entry {
+; CHECK-NEXT: Index: 16
+; CHECK-NEXT: SectionLen: 8
+; CHECK-NEXT: ParameterHashIndex: 0x0
+; CHECK-NEXT: TypeChkSectNum: 0x0
+; CHECK-NEXT: SymbolAlignmentLog2: 5
+; CHECK-NEXT: SymbolType: XTY_SD (0x1)
+; CHECK-NEXT: StorageMappingClass: XMC_PR (0x0)
+; CHECK-NEXT: StabInfoIndex: 0x0
+; CHECK-NEXT: StabSectNum: 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: }
diff --git a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
index c8278e58ad06..8748767501bd 100644
--- a/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
+++ b/llvm/test/CodeGen/PowerPC/stack-restore-with-setjmp.ll
@@ -29,9 +29,7 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; CHECK-NEXT: nop
; CHECK-NEXT: # kill: def $r3 killed $r3 killed $x3
; CHECK-NEXT: cmpwi 3, 0
-; CHECK-NEXT: crmove 20, 10
; CHECK-NEXT: crorc 20, 10, 2
-; CHECK-NEXT: crmove 21, 2
; CHECK-NEXT: bc 4, 20, .LBB0_4
; CHECK-NEXT: # %bb.2: # %if.end5
; CHECK-NEXT: addis 3, 2, .L.str@toc@ha
@@ -76,11 +74,9 @@ define dso_local signext i32 @main(i32 signext %argc, ptr nocapture readnone %ar
; BE-NEXT: addi 3, 31, 128
; BE-NEXT: bl _setjmp
; BE-NEXT: nop
-; BE-NEXT: crmove 20, 10
; BE-NEXT: # kill: def $r3 killed $r3 killed $x3
; BE-NEXT: cmpwi 3, 0
; BE-NEXT: crorc 20, 10, 2
-; BE-NEXT: crmove 21, 2
; BE-NEXT: bc 4, 20, .LBB0_4
; BE-NEXT: # %bb.2: # %if.end5
; BE-NEXT: addis 3, 2, .L.str@toc@ha
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
new file mode 100644
index 000000000000..42bf32122870
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/select.mir
@@ -0,0 +1,345 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s8>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s8>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 32 x s8>) = G_SELECT %0(<vscale x 32 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s16>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s16>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s16>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: select_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 1 x s32>) = G_SELECT %0(<vscale x 1 x s1>), %1, %1
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: select_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 4 x s32>) = G_SELECT %0(<vscale x 4 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 16 x s32>) = G_SELECT %0(<vscale x 16 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: select_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 2 x s64>) = G_SELECT %0(<vscale x 2 x s1>), %1, %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: select_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV32I-NEXT: $v0 = COPY [[DEF]]
+ ; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
+ ; RV64I-NEXT: $v0 = COPY [[DEF]]
+ ; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], $v0, -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %2:vrb(<vscale x 8 x s64>) = G_SELECT %0(<vscale x 8 x s1>), %1, %1
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
new file mode 100644
index 000000000000..27dfb3fdda9d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale32.mir
@@ -0,0 +1,300 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 2
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 3
+ %3:gprb(s32) = G_LSHR %1, %2(s32)
+ %4:gprb(s32) = G_CONSTANT i32 3
+ %0:gprb(s32) = G_MUL %3, %4
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s32) = G_READ_VLENB
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 1
+ %0:gprb(s32) = G_SHL %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:gprb(s32) = G_READ_VLENB
+ %2:gprb(s32) = G_CONSTANT i32 5
+ %0:gprb(s32) = G_MUL %1, %2
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_1_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 1
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 2
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 3
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 4
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 8
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 16
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40_s64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:gprb(s32) = G_READ_VLENB
+ %18:gprb(s32) = G_CONSTANT i32 3
+ %2:gprb(s32) = G_LSHR %17, %18(s32)
+ %15:gprb(s32) = G_CONSTANT i32 40
+ %9:gprb(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
new file mode 100644
index 000000000000..4a96be2471f8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/vscale64.mir
@@ -0,0 +1,139 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: test_1
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_2
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 2
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 2
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_3
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[SRLI]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 3
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ %3:gprb(s64) = G_CONSTANT i64 3
+ %4:gprb(s64) = G_MUL %2, %3
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SRLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_LSHR %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: $x10 = COPY [[PseudoReadVLENB]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoReadVLENB]], 1
+ ; CHECK-NEXT: $x10 = COPY [[SLLI]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 1
+ %2:gprb(s64) = G_SHL %0, %1(s64)
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_40
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PseudoReadVLENB]], [[ADDI]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprb(s64) = G_READ_VLENB
+ %1:gprb(s64) = G_CONSTANT i64 5
+ %2:gprb(s64) = G_MUL %0, %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
index 11789a030e6f..5f52030fc170 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/trap.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-LABEL: name: test_trap
; CHECK: UNIMP
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
PseudoRET
...
@@ -28,7 +28,7 @@ body: |
; CHECK-LABEL: name: test_debugtrap
; CHECK: EBREAK
; CHECK-NEXT: PseudoRET
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.debugtrap)
+ G_DEBUGTRAP
PseudoRET
...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
index d169eb316dfc..b3c62df4ffdc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv32.mir
@@ -89,10 +89,12 @@ body: |
; CHECK-NEXT: %yhi:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %xlo, %ylo
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %ylo
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %xhi, %yhi
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%xlo:_(s32) = COPY $x10
%xhi:_(s32) = COPY $x11
@@ -121,10 +123,12 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s32) = COPY $x10
%hi1:_(s32) = COPY $x11
@@ -152,6 +156,7 @@ body: |
; CHECK-NEXT: %hi2:_(s32) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
@@ -159,11 +164,13 @@ body: |
; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[C]]
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[ICMP]]
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[OR]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index f394e4d5064e..6e76bb0e3eff 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -121,10 +121,12 @@ body: |
; CHECK-NEXT: %y01:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %x00, %y00
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %y00
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %x01, %y01
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%x00:_(s64) = COPY $x10
%x01:_(s64) = COPY $x11
@@ -153,10 +155,12 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%lo1:_(s64) = COPY $x10
%hi1:_(s64) = COPY $x11
@@ -184,6 +188,7 @@ body: |
; CHECK-NEXT: %hi2:_(s64) = COPY $x15
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD %lo1, %lo2
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), %lo2
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD %mid1, %mid2
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), %mid1
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
@@ -194,14 +199,16 @@ body: |
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC2]], [[AND]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD %hi1, %hi2
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[OR]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[AND1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY2]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
index c348ec6f73ad..9227e6530221 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv32.mir
@@ -92,7 +92,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -119,21 +120,23 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY1]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY]]
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY1]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY3]](s32), [[C1]]
; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[ICMP6]], [[ICMP4]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SELECT1]], [[SELECT]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
@@ -241,7 +244,8 @@ body: |
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[SUB]](s32), [[COPY]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY1]](s32), [[C]]
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP1]], [[ICMP]]
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -377,7 +381,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
@@ -404,14 +409,16 @@ body: |
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
- ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ADD2]](s32), [[COPY3]]
- ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY5]](s32), [[COPY3]]
+ ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[COPY2]]
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[ICMP3]], [[ICMP1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD2]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[COPY4]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY5]](s32)
; CHECK-NEXT: $x12 = COPY [[SELECT]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%2:_(s32) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
index 5506f5228e9d..8acaff5dbb25 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
@@ -125,8 +125,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -261,8 +262,9 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP1]](s64)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP]](s64)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SUB]](s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
@@ -364,7 +366,8 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ZEXT]](s64), [[AND]]
- ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY2]](s32)
; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
@@ -393,7 +396,8 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
index a890a411544e..354fc109a463 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv32.mir
@@ -50,8 +50,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -129,8 +129,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -201,8 +201,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -267,8 +267,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -306,8 +306,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
@@ -389,8 +389,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND8]], [[C14]]
; RV32I-NEXT: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C15]](s32)
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C16]], [[LSHR6]]
@@ -468,8 +468,8 @@ body: |
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C15]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C16]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND10:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND10]], [[C17]](s32)
@@ -540,8 +540,8 @@ body: |
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C10]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C11]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C12]](s32)
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[LSHR8]]
@@ -606,8 +606,8 @@ body: |
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C11]]
; RV32I-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C12]]
; RV32I-NEXT: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C13]](s32)
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[LSHR8]]
@@ -645,8 +645,8 @@ body: |
; RV32I-NEXT: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD4]], [[C26]]
; RV32I-NEXT: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C27]]
; RV32I-NEXT: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C28]](s32)
; RV32I-NEXT: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[LSHR17]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
index add8a565202d..38a4b9c6dae3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
@@ -283,8 +283,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
@@ -583,8 +583,8 @@ body: |
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C11]]
; RV64I-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C12]]
; RV64I-NEXT: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C13]](s64)
; RV64I-NEXT: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C14]], [[LSHR9]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
index d4eb5ebc2e29..c64669cb7341 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv32.mir
@@ -35,8 +35,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -90,8 +90,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND5]], [[C8]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C10]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[C9]](s32)
@@ -143,8 +143,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -190,8 +190,8 @@ body: |
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ADD1]], [[C5]]
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND3]], [[C6]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C7]](s32)
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C8]](s32)
@@ -210,8 +210,8 @@ body: |
; RV32I-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ADD3]], [[C13]]
; RV32I-NEXT: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C14]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C15]](s32)
; RV32I-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[LSHR7]], [[LSHR3]]
; RV32I-NEXT: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
index e2434ba9301c..196b367e5927 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
@@ -205,8 +205,8 @@ body: |
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C5]]
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND3]], [[C6]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C7]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
index 19555a702b73..372becaf08d9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv32.mir
@@ -39,8 +39,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -98,8 +98,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -155,8 +155,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -208,8 +208,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -234,8 +234,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -304,8 +304,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C10]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -363,8 +363,8 @@ body: |
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 3855
; RV32I-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 257
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND6]], [[C9]]
; RV32I-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; RV32I-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C11]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[C10]](s32)
@@ -420,8 +420,8 @@ body: |
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C6]]
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C7]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C8]](s32)
; RV32I-NEXT: $x10 = COPY [[LSHR3]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
@@ -473,8 +473,8 @@ body: |
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ADD2]], [[C7]]
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND4]], [[C8]]
; RV32I-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C9]](s32)
; RV32I-NEXT: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[LSHR3]], [[C10]]
@@ -499,8 +499,8 @@ body: |
; RV32I-NEXT: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 252645135
; RV32I-NEXT: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ADD6]], [[C17]]
; RV32I-NEXT: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 16843009
- ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; RV32I-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND9]], [[C18]]
; RV32I-NEXT: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C19]](s32)
; RV32I-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[ADD3]], [[LSHR7]]
; RV32I-NEXT: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
index e030e3ce2a80..e51a2143efd0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
@@ -221,8 +221,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
@@ -457,8 +457,8 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1085102592571150095
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
- ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
+ ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND4]], [[C7]]
; RV64I-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[MUL]], [[C8]](s64)
; RV64I-NEXT: $x10 = COPY [[LSHR3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
index 433d6e6b821f..ec2dc568a5ec 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv32.mir
@@ -162,8 +162,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s32) = G_MUL %mid1, %mid2
@@ -171,13 +173,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD4]](s32)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD6]](s32)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD7]](s32)
; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s32)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s32)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s32) = COPY $x10
%mid1:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
index 09e002e8428d..39d9c5b7dfd1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-ext-rv64.mir
@@ -194,8 +194,10 @@ body: |
; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH %lo1, %lo2
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[MUL2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[UMULH]]
; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD1]](s64), [[UMULH]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[ADD1]](s64)
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ICMP]], [[ICMP1]]
; CHECK-NEXT: [[MUL3:%[0-9]+]]:_(s64) = G_MUL %hi1, %lo2
; CHECK-NEXT: [[MUL4:%[0-9]+]]:_(s64) = G_MUL %mid1, %mid2
@@ -203,13 +205,18 @@ body: |
; CHECK-NEXT: [[UMULH1:%[0-9]+]]:_(s64) = G_UMULH %mid1, %lo2
; CHECK-NEXT: [[UMULH2:%[0-9]+]]:_(s64) = G_UMULH %lo1, %mid2
; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD [[MUL3]], [[MUL4]]
- ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[ADD3]], [[MUL5]]
- ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[ADD4]], [[UMULH1]]
- ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[ADD5]], [[UMULH2]]
- ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[ADD6]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ADD3]](s64)
+ ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[MUL5]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ADD4]](s64)
+ ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[COPY3]], [[UMULH1]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD5]](s64)
+ ; CHECK-NEXT: [[ADD6:%[0-9]+]]:_(s64) = G_ADD [[COPY4]], [[UMULH2]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ADD6]](s64)
+ ; CHECK-NEXT: [[ADD7:%[0-9]+]]:_(s64) = G_ADD [[COPY5]], [[ADD2]]
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ADD7]](s64)
; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
- ; CHECK-NEXT: $x11 = COPY [[ADD1]](s64)
- ; CHECK-NEXT: $x12 = COPY [[ADD7]](s64)
+ ; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: $x12 = COPY [[COPY6]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
%lo1:_(s64) = COPY $x10
%mid1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
index f9eda1252937..16542f580012 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-vacopy.mir
@@ -14,7 +14,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY1]](p0) :: (load (p0))
- ; CHECK-NEXT: G_STORE [[COPY]](p0), [[LOAD]](p0) :: (store (p0))
+ ; CHECK-NEXT: G_STORE [[LOAD]](p0), [[COPY]](p0) :: (store (p0))
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(p0) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
new file mode 100644
index 000000000000..8ee40861ce02
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-implicit-def.mir
@@ -0,0 +1,410 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: implicitdef_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: implicitdef_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
new file mode 100644
index 000000000000..6e1d4aa4d7d6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-select.mir
@@ -0,0 +1,400 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: select_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: select_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; CHECK-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
new file mode 100644
index 000000000000..899f7955a273
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv32.mir
@@ -0,0 +1,228 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s32
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s32) = G_VSCALE i32 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+---
+name: test_1_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_1_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_2_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_2_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_3_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_3_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_4_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_4_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_8_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_8_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_16_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_16_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
+---
+name: test_40_s64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_40_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ %1:_(s32) = G_TRUNC %0
+ $x10 = COPY %1
+ PseudoRET implicit $x10
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
new file mode 100644
index 000000000000..c0453a04a181
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-vscale-rv64.mir
@@ -0,0 +1,110 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: test_1
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_1
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 1
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_2
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_2
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 2
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_3
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_3
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 3
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_4
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_4
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 4
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_8
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_8
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: $x10 = COPY [[READ_VLENB]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 8
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_16
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_16
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[SHL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 16
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+---
+name: test_40
+body: |
+ bb.0.entry:
+
+ ; CHECK-LABEL: name: test_40
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[READ_VLENB]], [[C]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:_(s64) = G_VSCALE i64 40
+ $x10 = COPY %0
+ PseudoRET implicit $x10
+...
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
new file mode 100644
index 000000000000..ef1e355252e1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/implicit-def.mir
@@ -0,0 +1,425 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+---
+name: implicitdef_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: implicitdef_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: implicitdef_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: implicitdef_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: implicitdef_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: implicitdef_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: implicitdef_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
new file mode 100644
index 000000000000..4dc077ae6bfe
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/select.mir
@@ -0,0 +1,558 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: select_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s8>), %2(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s8>), %2(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s8>), %2(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s8>), %2(<vscale x 8 x s8>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s8>), %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s8>), %2(<vscale x 32 x s8>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SELECT %1(<vscale x 64 x s1>), %2(<vscale x 64 x s8>), %2(<vscale x 64 x s8>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s16>), %2(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s16>), %2(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s16>), %2(<vscale x 4 x s16>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s16>), %2(<vscale x 8 x s16>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s16>), %2(<vscale x 16 x s16>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SELECT [[DEF]](<vscale x 32 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SELECT %1(<vscale x 32 x s1>), %2(<vscale x 32 x s16>), %2(<vscale x 32 x s16>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s32>), %2(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s32>), %2(<vscale x 2 x s32>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s32>), %2(<vscale x 4 x s32>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s32>), %2(<vscale x 8 x s32>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SELECT [[DEF]](<vscale x 16 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SELECT %1(<vscale x 16 x s1>), %2(<vscale x 16 x s32>), %2(<vscale x 16 x s32>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: select_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: select_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SELECT [[DEF]](<vscale x 1 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8 = COPY [[SELECT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SELECT %1(<vscale x 1 x s1>), %2(<vscale x 1 x s64>), %2(<vscale x 1 x s64>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: select_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: select_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SELECT [[DEF]](<vscale x 2 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m2 = COPY [[SELECT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SELECT %1(<vscale x 2 x s1>), %2(<vscale x 2 x s64>), %2(<vscale x 2 x s64>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: select_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: select_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m4 = COPY [[SELECT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SELECT %1(<vscale x 4 x s1>), %2(<vscale x 4 x s64>), %2(<vscale x 4 x s64>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: select_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: select_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV32I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: select_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SELECT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[DEF1]], [[DEF1]]
+ ; RV64I-NEXT: $v8m8 = COPY [[SELECT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %2:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SELECT %1(<vscale x 8 x s1>), %2(<vscale x 8 x s64>), %2(<vscale x 8 x s64>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
new file mode 100644
index 000000000000..ae3bb0a18020
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv32.mir
@@ -0,0 +1,48 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test_s32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s32
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = G_READ_VLENB
+ %2:_(s32) = G_CONSTANT i32 3
+ %0:_(s32) = G_LSHR %1, %2(s32)
+ $x10 = COPY %0(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: test_s64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_s64
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s32) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:gprb(s32) = G_MUL [[LSHR]], [[C1]]
+ ; CHECK-NEXT: $x10 = COPY [[MUL]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %17:_(s32) = G_READ_VLENB
+ %18:_(s32) = G_CONSTANT i32 3
+ %2:_(s32) = G_LSHR %17, %18(s32)
+ %15:_(s32) = G_CONSTANT i32 1
+ %9:_(s32) = G_MUL %2, %15
+ $x10 = COPY %9(s32)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
new file mode 100644
index 000000000000..a7446d976f25
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vscale-rv64.mir
@@ -0,0 +1,25 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: test
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test
+ ; CHECK: [[READ_VLENB:%[0-9]+]]:gprb(s64) = G_READ_VLENB
+ ; CHECK-NEXT: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 3
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gprb(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = G_READ_VLENB
+ %2:_(s64) = G_CONSTANT i64 3
+ %0:_(s64) = G_LSHR %1, %2(s64)
+ $x10 = COPY %0(s64)
+ PseudoRET implicit $x10
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 7b110e562e05..d55adf371119 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -17,6 +17,12 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64d \
; RUN: -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv32 -global-isel \
+; RUN: -frame-pointer=all -target-abi ilp32 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV32-WITHFP %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \
+; RUN: -frame-pointer=all -target-abi lp64 -verify-machineinstrs \
+; RUN: | FileCheck -check-prefixes=RV64-WITHFP %s
; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
@@ -79,6 +85,67 @@ define i32 @va1(ptr %fmt, ...) {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, -20(s0)
+; RV64-WITHFP-NEXT: lwu a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: srli a2, a1, 32
+; RV64-WITHFP-NEXT: sw a1, -24(s0)
+; RV64-WITHFP-NEXT: sw a2, -20(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va, align 4
@@ -131,6 +198,58 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -212,6 +331,78 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_va_arg_alloca:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: addi a0, s1, 15
+; RV32-WITHFP-NEXT: andi a0, a0, -16
+; RV32-WITHFP-NEXT: sub a0, sp, a0
+; RV32-WITHFP-NEXT: mv sp, a0
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: mv a0, s1
+; RV32-WITHFP-NEXT: addi sp, s0, -16
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_va_arg_alloca:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, s1, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: andi a0, a0, -16
+; RV64-WITHFP-NEXT: sub a0, sp, a0
+; RV64-WITHFP-NEXT: mv sp, a0
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: mv a0, s1
+; RV64-WITHFP-NEXT: addi sp, s0, -32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -273,6 +464,36 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64D-NEXT: addi sp, sp, 16
; LP64D-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va1_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a3, 261888
+; RV32-WITHFP-NEXT: li a4, 2
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va1
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va1_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64-WITHFP-NEXT: ld a1, %lo(.LCPI3_0)(a0)
+; RV64-WITHFP-NEXT: li a2, 2
+; RV64-WITHFP-NEXT: call va1
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
ret void
}
@@ -395,6 +616,59 @@ define i64 @va2(ptr %fmt, ...) nounwind {
; RV64-NEXT: ld a0, 0(a1)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a1, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lw a1, 4(a1)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a1, a0, 7
+; RV64-WITHFP-NEXT: andi a1, a1, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a1)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -459,6 +733,61 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
; RV64-NEXT: srli a0, a0, 32
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: li a1, 0
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -487,6 +816,32 @@ define void @va2_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va2_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: li a1, 1
+; RV32-WITHFP-NEXT: call va2
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va2_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: li a1, 1
+; RV64-WITHFP-NEXT: call va2
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
ret void
}
@@ -617,6 +972,61 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 7
+; RV32-WITHFP-NEXT: andi a3, a0, -8
+; RV32-WITHFP-NEXT: addi a0, a0, 8
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a4, 0(a3)
+; RV32-WITHFP-NEXT: lw a3, 4(a3)
+; RV32-WITHFP-NEXT: add a0, a1, a4
+; RV32-WITHFP-NEXT: sltu a1, a0, a4
+; RV32-WITHFP-NEXT: add a2, a2, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: addi a2, a0, 7
+; RV64-WITHFP-NEXT: andi a2, a2, -8
+; RV64-WITHFP-NEXT: addi a0, a0, 15
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%argp.cur = load ptr, ptr %va
@@ -682,6 +1092,61 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi sp, sp, 64
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_va_arg:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 24
+; RV32-WITHFP-NEXT: sw a3, 4(s0)
+; RV32-WITHFP-NEXT: sw a4, 8(s0)
+; RV32-WITHFP-NEXT: sw a5, 12(s0)
+; RV32-WITHFP-NEXT: sw a6, 16(s0)
+; RV32-WITHFP-NEXT: sw a7, 20(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a3, a0, 4
+; RV32-WITHFP-NEXT: sw a3, -12(s0)
+; RV32-WITHFP-NEXT: lw a3, 0(a0)
+; RV32-WITHFP-NEXT: add a0, a1, a3
+; RV32-WITHFP-NEXT: sltu a1, a0, a3
+; RV32-WITHFP-NEXT: add a1, a2, a1
+; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_va_arg:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -80
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a2, 0(s0)
+; RV64-WITHFP-NEXT: sd a3, 8(s0)
+; RV64-WITHFP-NEXT: sd a4, 16(s0)
+; RV64-WITHFP-NEXT: sd a5, 24(s0)
+; RV64-WITHFP-NEXT: sd a6, 32(s0)
+; RV64-WITHFP-NEXT: sd a7, 40(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a2, a0, 4
+; RV64-WITHFP-NEXT: sd a2, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: srli a0, a0, 32
+; RV64-WITHFP-NEXT: add a0, a1, a0
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 80
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -718,6 +1183,39 @@ define void @va3_caller() nounwind {
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va3_caller:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: lui a0, 5
+; RV32-WITHFP-NEXT: addi a3, a0, -480
+; RV32-WITHFP-NEXT: li a0, 2
+; RV32-WITHFP-NEXT: li a1, 1111
+; RV32-WITHFP-NEXT: li a2, 0
+; RV32-WITHFP-NEXT: call va3
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va3_caller:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -16
+; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 16
+; RV64-WITHFP-NEXT: lui a0, 5
+; RV64-WITHFP-NEXT: addiw a2, a0, -480
+; RV64-WITHFP-NEXT: li a0, 2
+; RV64-WITHFP-NEXT: li a1, 1111
+; RV64-WITHFP-NEXT: call va3
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: ret
%1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
ret void
}
@@ -745,9 +1243,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-NEXT: addi a1, a0, 4
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: lw a1, 4(sp)
-; RV32-NEXT: mv a2, sp
; RV32-NEXT: lw s0, 0(a0)
-; RV32-NEXT: sw a2, 0(a1)
+; RV32-NEXT: sw a1, 0(sp)
; RV32-NEXT: lw a0, 0(sp)
; RV32-NEXT: call notdead
; RV32-NEXT: lw a0, 4(sp)
@@ -796,9 +1293,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: addi a1, a0, 4
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: ld a1, 8(sp)
-; RV64-NEXT: mv a2, sp
; RV64-NEXT: lw s0, 0(a0)
-; RV64-NEXT: sd a2, 0(a1)
+; RV64-NEXT: sd a1, 0(sp)
; RV64-NEXT: lw a0, 4(sp)
; RV64-NEXT: lwu a1, 0(sp)
; RV64-NEXT: slli a0, a0, 32
@@ -829,6 +1325,115 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 96
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va4_va_copy:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -64
+; RV32-WITHFP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a0, s0, 4
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw s1, 0(a0)
+; RV32-WITHFP-NEXT: sw a1, -20(s0)
+; RV32-WITHFP-NEXT: lw a0, -20(s0)
+; RV32-WITHFP-NEXT: call notdead
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: addi a1, a1, 3
+; RV32-WITHFP-NEXT: andi a1, a1, -4
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, -16(s0)
+; RV32-WITHFP-NEXT: lw a1, 0(a1)
+; RV32-WITHFP-NEXT: addi a2, a2, 3
+; RV32-WITHFP-NEXT: andi a2, a2, -4
+; RV32-WITHFP-NEXT: addi a3, a2, 4
+; RV32-WITHFP-NEXT: sw a3, -16(s0)
+; RV32-WITHFP-NEXT: lw a2, 0(a2)
+; RV32-WITHFP-NEXT: add a0, a0, s1
+; RV32-WITHFP-NEXT: add a1, a1, a2
+; RV32-WITHFP-NEXT: add a0, a0, a1
+; RV32-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 64
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va4_va_copy:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -112
+; RV64-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 48
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: addi a0, s0, 8
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw s1, 0(a0)
+; RV64-WITHFP-NEXT: sd a1, -40(s0)
+; RV64-WITHFP-NEXT: lw a0, -36(s0)
+; RV64-WITHFP-NEXT: lwu a1, -40(s0)
+; RV64-WITHFP-NEXT: slli a0, a0, 32
+; RV64-WITHFP-NEXT: or a0, a0, a1
+; RV64-WITHFP-NEXT: call notdead
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: ld a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: addi a1, a1, 3
+; RV64-WITHFP-NEXT: andi a1, a1, -4
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: sd a2, -32(s0)
+; RV64-WITHFP-NEXT: ld a2, -32(s0)
+; RV64-WITHFP-NEXT: lw a1, 0(a1)
+; RV64-WITHFP-NEXT: addi a2, a2, 3
+; RV64-WITHFP-NEXT: andi a2, a2, -4
+; RV64-WITHFP-NEXT: addi a3, a2, 4
+; RV64-WITHFP-NEXT: sd a3, -32(s0)
+; RV64-WITHFP-NEXT: lw a2, 0(a2)
+; RV64-WITHFP-NEXT: add a0, a0, s1
+; RV64-WITHFP-NEXT: add a1, a1, a2
+; RV64-WITHFP-NEXT: addw a0, a0, a1
+; RV64-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 112
+; RV64-WITHFP-NEXT: ret
%vargs = alloca ptr
%wargs = alloca ptr
call void @llvm.va_start(ptr %vargs)
@@ -899,6 +1504,60 @@ define i32 @va6_no_fixed_args(...) nounwind {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va6_no_fixed_args:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: sw a0, 0(s0)
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: mv a0, s0
+; RV32-WITHFP-NEXT: sw a0, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va6_no_fixed_args:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: sd a0, 0(s0)
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: mv a0, s0
+; RV64-WITHFP-NEXT: sd a0, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
%va = alloca ptr
call void @llvm.va_start(ptr %va)
%1 = va_arg ptr %va, i32
@@ -993,6 +1652,85 @@ define i32 @va_large_stack(ptr %fmt, ...) {
; RV64-NEXT: addiw a1, a1, 336
; RV64-NEXT: add sp, sp, a1
; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_large_stack:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -2032
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV32-WITHFP-NEXT: sw ra, 1996(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 1992(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 2000
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, -1728
+; RV32-WITHFP-NEXT: sub sp, sp, a0
+; RV32-WITHFP-NEXT: lui a0, 24414
+; RV32-WITHFP-NEXT: addi a0, a0, 272
+; RV32-WITHFP-NEXT: sub a0, s0, a0
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, 0(a0)
+; RV32-WITHFP-NEXT: lw a1, 0(a0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: addi a2, a1, 4
+; RV32-WITHFP-NEXT: sw a2, 0(a0)
+; RV32-WITHFP-NEXT: lw a0, 0(a1)
+; RV32-WITHFP-NEXT: lui a1, 24414
+; RV32-WITHFP-NEXT: addi a1, a1, -1728
+; RV32-WITHFP-NEXT: add sp, sp, a1
+; RV32-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 2032
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_large_stack:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -2032
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 2032
+; RV64-WITHFP-NEXT: sd ra, 1960(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 1952(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 1968
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, -1680
+; RV64-WITHFP-NEXT: sub sp, sp, a0
+; RV64-WITHFP-NEXT: lui a0, 24414
+; RV64-WITHFP-NEXT: addiw a0, a0, 288
+; RV64-WITHFP-NEXT: sub a0, s0, a0
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, 0(a0)
+; RV64-WITHFP-NEXT: lw a1, 4(a0)
+; RV64-WITHFP-NEXT: lwu a2, 0(a0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: slli a1, a1, 32
+; RV64-WITHFP-NEXT: or a1, a1, a2
+; RV64-WITHFP-NEXT: addi a2, a1, 4
+; RV64-WITHFP-NEXT: srli a3, a2, 32
+; RV64-WITHFP-NEXT: sw a2, 0(a0)
+; RV64-WITHFP-NEXT: sw a3, 4(a0)
+; RV64-WITHFP-NEXT: lw a0, 0(a1)
+; RV64-WITHFP-NEXT: lui a1, 24414
+; RV64-WITHFP-NEXT: addiw a1, a1, -1680
+; RV64-WITHFP-NEXT: add sp, sp, a1
+; RV64-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 2032
+; RV64-WITHFP-NEXT: ret
%large = alloca [ 100000000 x i8 ]
%va = alloca ptr
call void @llvm.va_start(ptr %va)
@@ -1004,5 +1742,193 @@ define i32 @va_large_stack(ptr %fmt, ...) {
ret i32 %1
}
+define i32 @va_vprintf(ptr %fmt, ptr %arg_start) {
+; RV32-LABEL: va_vprintf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: lw a0, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: lw a0, 8(sp)
+; RV32-NEXT: addi a0, a0, 3
+; RV32-NEXT: andi a0, a0, -4
+; RV32-NEXT: addi a1, a0, 4
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_vprintf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd a1, 8(sp)
+; RV64-NEXT: ld a0, 8(sp)
+; RV64-NEXT: sd a0, 0(sp)
+; RV64-NEXT: ld a0, 0(sp)
+; RV64-NEXT: addi a0, a0, 3
+; RV64-NEXT: andi a0, a0, -4
+; RV64-NEXT: addi a1, a0, 4
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: lw a0, 0(a0)
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_vprintf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -16
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -4
+; RV32-WITHFP-NEXT: .cfi_offset s0, -8
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a0, -12(s0)
+; RV32-WITHFP-NEXT: sw a0, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, -16(s0)
+; RV32-WITHFP-NEXT: addi a0, a0, 3
+; RV32-WITHFP-NEXT: andi a0, a0, -4
+; RV32-WITHFP-NEXT: addi a1, a0, 4
+; RV32-WITHFP-NEXT: sw a1, -16(s0)
+; RV32-WITHFP-NEXT: lw a0, 0(a0)
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_vprintf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -32
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 32
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -8
+; RV64-WITHFP-NEXT: .cfi_offset s0, -16
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a0, -24(s0)
+; RV64-WITHFP-NEXT: sd a0, -32(s0)
+; RV64-WITHFP-NEXT: ld a0, -32(s0)
+; RV64-WITHFP-NEXT: addi a0, a0, 3
+; RV64-WITHFP-NEXT: andi a0, a0, -4
+; RV64-WITHFP-NEXT: addi a1, a0, 4
+; RV64-WITHFP-NEXT: sd a1, -32(s0)
+; RV64-WITHFP-NEXT: lw a0, 0(a0)
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 32
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ %args_cp = alloca ptr
+ store ptr %arg_start, ptr %args
+ call void @llvm.va_copy(ptr %args_cp, ptr %args)
+ %width = va_arg ptr %args_cp, i32
+ call void @llvm.va_end(ptr %args_cp)
+ ret i32 %width
+}
-
+define i32 @va_printf(ptr %fmt, ...) {
+; RV32-LABEL: va_printf:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -36
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a2, 24(sp)
+; RV32-NEXT: sw a3, 28(sp)
+; RV32-NEXT: sw a4, 32(sp)
+; RV32-NEXT: addi a1, sp, 20
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lw a1, 8(sp)
+; RV32-NEXT: sw a5, 36(sp)
+; RV32-NEXT: sw a6, 40(sp)
+; RV32-NEXT: sw a7, 44(sp)
+; RV32-NEXT: call va_vprintf
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: va_printf:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -80
+; RV64-NEXT: .cfi_def_cfa_offset 80
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -72
+; RV64-NEXT: sd a1, 24(sp)
+; RV64-NEXT: sd a2, 32(sp)
+; RV64-NEXT: sd a3, 40(sp)
+; RV64-NEXT: sd a4, 48(sp)
+; RV64-NEXT: addi a1, sp, 24
+; RV64-NEXT: sd a1, 0(sp)
+; RV64-NEXT: ld a1, 0(sp)
+; RV64-NEXT: sd a5, 56(sp)
+; RV64-NEXT: sd a6, 64(sp)
+; RV64-NEXT: sd a7, 72(sp)
+; RV64-NEXT: call va_vprintf
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 80
+; RV64-NEXT: ret
+;
+; RV32-WITHFP-LABEL: va_printf:
+; RV32-WITHFP: # %bb.0:
+; RV32-WITHFP-NEXT: addi sp, sp, -48
+; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
+; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-WITHFP-NEXT: .cfi_offset ra, -36
+; RV32-WITHFP-NEXT: .cfi_offset s0, -40
+; RV32-WITHFP-NEXT: addi s0, sp, 16
+; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
+; RV32-WITHFP-NEXT: sw a1, 4(s0)
+; RV32-WITHFP-NEXT: sw a2, 8(s0)
+; RV32-WITHFP-NEXT: sw a3, 12(s0)
+; RV32-WITHFP-NEXT: sw a4, 16(s0)
+; RV32-WITHFP-NEXT: addi a1, s0, 4
+; RV32-WITHFP-NEXT: sw a1, -12(s0)
+; RV32-WITHFP-NEXT: lw a1, -12(s0)
+; RV32-WITHFP-NEXT: sw a5, 20(s0)
+; RV32-WITHFP-NEXT: sw a6, 24(s0)
+; RV32-WITHFP-NEXT: sw a7, 28(s0)
+; RV32-WITHFP-NEXT: call va_vprintf
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 48
+; RV32-WITHFP-NEXT: ret
+;
+; RV64-WITHFP-LABEL: va_printf:
+; RV64-WITHFP: # %bb.0:
+; RV64-WITHFP-NEXT: addi sp, sp, -96
+; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
+; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-WITHFP-NEXT: .cfi_offset ra, -72
+; RV64-WITHFP-NEXT: .cfi_offset s0, -80
+; RV64-WITHFP-NEXT: addi s0, sp, 32
+; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
+; RV64-WITHFP-NEXT: sd a1, 8(s0)
+; RV64-WITHFP-NEXT: sd a2, 16(s0)
+; RV64-WITHFP-NEXT: sd a3, 24(s0)
+; RV64-WITHFP-NEXT: sd a4, 32(s0)
+; RV64-WITHFP-NEXT: addi a1, s0, 8
+; RV64-WITHFP-NEXT: sd a1, -24(s0)
+; RV64-WITHFP-NEXT: ld a1, -24(s0)
+; RV64-WITHFP-NEXT: sd a5, 40(s0)
+; RV64-WITHFP-NEXT: sd a6, 48(s0)
+; RV64-WITHFP-NEXT: sd a7, 56(s0)
+; RV64-WITHFP-NEXT: call va_vprintf
+; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 96
+; RV64-WITHFP-NEXT: ret
+ %args = alloca ptr
+ call void @llvm.va_start(ptr %args)
+ %arg_start = load ptr, ptr %args
+ %ret_val = call i32 @va_vprintf(ptr %fmt, ptr %arg_start)
+ call void @llvm.va_end(ptr %args)
+ ret i32 %ret_val
+}
diff --git a/llvm/test/CodeGen/RISCV/allow-check.ll b/llvm/test/CodeGen/RISCV/allow-check.ll
index 2ac6370adbd5..0ddb5266db8f 100644
--- a/llvm/test/CodeGen/RISCV/allow-check.ll
+++ b/llvm/test/CodeGen/RISCV/allow-check.ll
@@ -1,10 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv32 | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 | FileCheck %s
-; RUN: llc < %s -mtriple=riscv32 -global-isel | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -global-isel | FileCheck %s
-; RUN: llc < %s -mtriple=riscv32 -fast-isel=true | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -fast-isel=true | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -global-isel=0 -fast-isel=1 | FileCheck %s
+
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -global-isel=0 -fast-isel=1 | FileCheck %s
define i1 @test_runtime() local_unnamed_addr {
; CHECK-LABEL: test_runtime:
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 455e6e54c9b3..549d531e829e 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -1160,8 +1160,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB10_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1189,12 +1187,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: li a0, 32
@@ -1205,8 +1202,6 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: beqz a1, .LBB10_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1232,14 +1227,13 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: li a0, 32
@@ -1354,19 +1348,16 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB11_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1377,28 +1368,26 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB11_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -1409,43 +1398,27 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB11_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB11_3
-; RV32I-NEXT: .LBB11_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB11_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB11_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -1481,14 +1454,13 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: li a0, 64
@@ -1831,8 +1803,6 @@ define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind {
define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-LABEL: test_ctlz_i32_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -1860,18 +1830,15 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i32_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -1897,14 +1864,13 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i32_zero_undef:
@@ -2005,19 +1971,16 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-LABEL: test_ctlz_i64_zero_undef:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB15_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2028,28 +1991,26 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB15_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -2060,41 +2021,25 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB15_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB15_3
-; RV32I-NEXT: .LBB15_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB15_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i64_zero_undef:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -2130,14 +2075,13 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctlz_i64_zero_undef:
@@ -2464,8 +2408,6 @@ define i16 @test_ctpop_i16(i16 %a) nounwind {
define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -2482,18 +2424,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2508,14 +2447,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i32:
@@ -2578,8 +2516,6 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i32:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -16
-; RV32XTHEADBB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32XTHEADBB-NEXT: srli a1, a0, 1
; RV32XTHEADBB-NEXT: lui a2, 349525
; RV32XTHEADBB-NEXT: addi a2, a2, 1365
@@ -2596,18 +2532,15 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32XTHEADBB-NEXT: lui a1, 61681
; RV32XTHEADBB-NEXT: addi a1, a1, -241
; RV32XTHEADBB-NEXT: and a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi a1, a1, 257
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: slli a1, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a1
+; RV32XTHEADBB-NEXT: slli a1, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 16
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i32:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2622,14 +2555,13 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64XTHEADBB-NEXT: srli a1, a0, 4
; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 61681
-; RV64XTHEADBB-NEXT: addiw a1, a1, -241
+; RV64XTHEADBB-NEXT: addi a1, a1, -241
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srliw a0, a0, 24
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i32 @llvm.ctpop.i32(i32 %a)
ret i32 %1
@@ -2638,65 +2570,48 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: test_ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -2719,14 +2634,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32M-LABEL: test_ctpop_i64:
@@ -2814,65 +2728,48 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
;
; RV32XTHEADBB-LABEL: test_ctpop_i64:
; RV32XTHEADBB: # %bb.0:
-; RV32XTHEADBB-NEXT: addi sp, sp, -32
-; RV32XTHEADBB-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32XTHEADBB-NEXT: mv s0, a0
-; RV32XTHEADBB-NEXT: srli a0, a1, 1
-; RV32XTHEADBB-NEXT: lui a2, 349525
-; RV32XTHEADBB-NEXT: addi s2, a2, 1365
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub a1, a1, a0
-; RV32XTHEADBB-NEXT: lui a0, 209715
-; RV32XTHEADBB-NEXT: addi s3, a0, 819
-; RV32XTHEADBB-NEXT: and a0, a1, s3
+; RV32XTHEADBB-NEXT: srli a2, a1, 1
+; RV32XTHEADBB-NEXT: lui a3, 349525
+; RV32XTHEADBB-NEXT: addi a3, a3, 1365
+; RV32XTHEADBB-NEXT: and a2, a2, a3
+; RV32XTHEADBB-NEXT: sub a1, a1, a2
+; RV32XTHEADBB-NEXT: lui a2, 209715
+; RV32XTHEADBB-NEXT: addi a2, a2, 819
+; RV32XTHEADBB-NEXT: and a4, a1, a2
; RV32XTHEADBB-NEXT: srli a1, a1, 2
-; RV32XTHEADBB-NEXT: and a1, a1, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: lui a1, 61681
-; RV32XTHEADBB-NEXT: addi s4, a1, -241
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: lui a1, 4112
-; RV32XTHEADBB-NEXT: addi s1, a1, 257
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
-; RV32XTHEADBB-NEXT: srli s5, a0, 24
-; RV32XTHEADBB-NEXT: srli a0, s0, 1
-; RV32XTHEADBB-NEXT: and a0, a0, s2
-; RV32XTHEADBB-NEXT: sub s0, s0, a0
-; RV32XTHEADBB-NEXT: and a0, s0, s3
-; RV32XTHEADBB-NEXT: srli s0, s0, 2
-; RV32XTHEADBB-NEXT: and a1, s0, s3
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: srli a1, a0, 4
-; RV32XTHEADBB-NEXT: add a0, a0, a1
-; RV32XTHEADBB-NEXT: and a0, a0, s4
-; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3
+; RV32XTHEADBB-NEXT: and a1, a1, a2
+; RV32XTHEADBB-NEXT: add a1, a4, a1
+; RV32XTHEADBB-NEXT: srli a4, a1, 4
+; RV32XTHEADBB-NEXT: add a1, a1, a4
+; RV32XTHEADBB-NEXT: lui a4, 61681
+; RV32XTHEADBB-NEXT: addi a4, a4, -241
+; RV32XTHEADBB-NEXT: and a1, a1, a4
+; RV32XTHEADBB-NEXT: slli a5, a1, 8
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: slli a5, a1, 16
+; RV32XTHEADBB-NEXT: add a1, a1, a5
+; RV32XTHEADBB-NEXT: srli a1, a1, 24
+; RV32XTHEADBB-NEXT: srli a5, a0, 1
+; RV32XTHEADBB-NEXT: and a3, a5, a3
+; RV32XTHEADBB-NEXT: sub a0, a0, a3
+; RV32XTHEADBB-NEXT: and a3, a0, a2
+; RV32XTHEADBB-NEXT: srli a0, a0, 2
+; RV32XTHEADBB-NEXT: and a0, a0, a2
+; RV32XTHEADBB-NEXT: add a0, a3, a0
+; RV32XTHEADBB-NEXT: srli a2, a0, 4
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: and a0, a0, a4
+; RV32XTHEADBB-NEXT: slli a2, a0, 8
+; RV32XTHEADBB-NEXT: add a0, a0, a2
+; RV32XTHEADBB-NEXT: slli a2, a0, 16
+; RV32XTHEADBB-NEXT: add a0, a0, a2
; RV32XTHEADBB-NEXT: srli a0, a0, 24
-; RV32XTHEADBB-NEXT: add a0, a0, s5
+; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: li a1, 0
-; RV32XTHEADBB-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32XTHEADBB-NEXT: addi sp, sp, 32
; RV32XTHEADBB-NEXT: ret
;
; RV64XTHEADBB-LABEL: test_ctpop_i64:
; RV64XTHEADBB: # %bb.0:
-; RV64XTHEADBB-NEXT: addi sp, sp, -16
-; RV64XTHEADBB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64XTHEADBB-NEXT: srli a1, a0, 1
; RV64XTHEADBB-NEXT: lui a2, 349525
; RV64XTHEADBB-NEXT: addiw a2, a2, 1365
@@ -2895,14 +2792,13 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64XTHEADBB-NEXT: slli a2, a1, 32
; RV64XTHEADBB-NEXT: add a1, a1, a2
; RV64XTHEADBB-NEXT: and a0, a0, a1
-; RV64XTHEADBB-NEXT: lui a1, 4112
-; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: slli a2, a1, 32
-; RV64XTHEADBB-NEXT: add a1, a1, a2
-; RV64XTHEADBB-NEXT: call __muldi3
+; RV64XTHEADBB-NEXT: slli a1, a0, 8
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 16
+; RV64XTHEADBB-NEXT: add a0, a0, a1
+; RV64XTHEADBB-NEXT: slli a1, a0, 32
+; RV64XTHEADBB-NEXT: add a0, a0, a1
; RV64XTHEADBB-NEXT: srli a0, a0, 56
-; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64XTHEADBB-NEXT: addi sp, sp, 16
; RV64XTHEADBB-NEXT: ret
%1 = call i64 @llvm.ctpop.i64(i64 %a)
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index adf614435b31..9ae30e646fdb 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -602,19 +602,16 @@ define signext i32 @ctlz(i64 %b) nounwind {
;
; RV32I-LABEL: ctlz:
; RV32I: # %bb.0: # %entry
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB7_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -625,28 +622,26 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: andi a0, a0, 63
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB7_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -657,41 +652,25 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB7_2
-; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi s1, a0, 32
-; RV32I-NEXT: j .LBB7_3
-; RV32I-NEXT: .LBB7_2:
-; RV32I-NEXT: srli s1, s1, 24
-; RV32I-NEXT: .LBB7_3: # %entry
-; RV32I-NEXT: andi a0, s1, 63
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: andi a0, a0, 63
; RV32I-NEXT: ret
;
; RV64I-LABEL: ctlz:
; RV64I: # %bb.0: # %entry
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -727,15 +706,14 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: srli a0, a0, 58
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 7a9439e5b322..da882cafd997 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -683,47 +683,41 @@ define i64 @fcvt_l_d(double %a) nounwind {
define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-LABEL: fcvt_l_d_sat:
; RV32IFD: # %bb.0: # %start
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: lui a0, %hi(.LCPI12_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI12_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB12_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB12_2
; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB12_2: # %start
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB12_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB12_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB12_4: # %start
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_l_d_sat:
@@ -737,48 +731,44 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
;
; RV32IZFINXZDINX-LABEL: fcvt_l_d_sat:
; RV32IZFINXZDINX: # %bb.0: # %start
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: mv s1, a1
; RV32IZFINXZDINX-NEXT: mv s0, a0
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB12_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB12_2
; RV32IZFINXZDINX-NEXT: # %bb.1: # %start
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %start
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB12_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI12_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI12_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI12_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB12_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB12_4: # %start
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fcvt_l_d_sat:
@@ -1116,13 +1106,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_x_d:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_x_d:
@@ -1257,13 +1241,13 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV32IFD-LABEL: fmv_d_x:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw a3, 4(sp)
-; RV32IFD-NEXT: sw a2, 0(sp)
-; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: sw a0, 8(sp)
-; RV32IFD-NEXT: fld fa5, 0(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld fa5, 8(sp)
+; RV32IFD-NEXT: sw a2, 8(sp)
+; RV32IFD-NEXT: sw a3, 12(sp)
; RV32IFD-NEXT: fld fa4, 8(sp)
-; RV32IFD-NEXT: fadd.d fa0, fa4, fa5
+; RV32IFD-NEXT: fadd.d fa0, fa5, fa4
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
@@ -1276,17 +1260,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
;
; RV32IZFINXZDINX-LABEL: fmv_d_x:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
-; RV32IZFINXZDINX-NEXT: sw a3, 4(sp)
-; RV32IZFINXZDINX-NEXT: sw a2, 0(sp)
-; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
-; RV32IZFINXZDINX-NEXT: lw a1, 4(sp)
-; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
-; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
-; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
+; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: fmv_d_x:
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 29a9e5070950..f1c56b320b76 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -43,48 +43,42 @@ define signext i32 @test_floor_si32(double %x) {
define i64 @test_floor_si64(double %x) nounwind {
; RV32IFD-LABEL: test_floor_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI1_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI1_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB1_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB1_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB1_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB1_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB1_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB1_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_si64:
@@ -98,49 +92,45 @@ define i64 @test_floor_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_floor_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call floor
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB1_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB1_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB1_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB1_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB1_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_floor_si64:
@@ -299,48 +289,42 @@ define signext i32 @test_ceil_si32(double %x) {
define i64 @test_ceil_si64(double %x) nounwind {
; RV32IFD-LABEL: test_ceil_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI5_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI5_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB5_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB5_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB5_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB5_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB5_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB5_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_si64:
@@ -354,49 +338,45 @@ define i64 @test_ceil_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_ceil_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call ceil
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB5_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB5_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB5_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB5_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB5_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_ceil_si64:
@@ -555,48 +535,42 @@ define signext i32 @test_trunc_si32(double %x) {
define i64 @test_trunc_si64(double %x) nounwind {
; RV32IFD-LABEL: test_trunc_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI9_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI9_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB9_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB9_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB9_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB9_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB9_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB9_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_si64:
@@ -610,49 +584,45 @@ define i64 @test_trunc_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_trunc_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call trunc
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB9_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB9_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB9_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB9_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB9_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_trunc_si64:
@@ -811,48 +781,42 @@ define signext i32 @test_round_si32(double %x) {
define i64 @test_round_si64(double %x) nounwind {
; RV32IFD-LABEL: test_round_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI13_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI13_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB13_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB13_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB13_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB13_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB13_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB13_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_si64:
@@ -866,49 +830,45 @@ define i64 @test_round_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_round_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call round
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB13_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB13_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB13_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB13_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB13_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_round_si64:
@@ -1067,48 +1027,42 @@ define signext i32 @test_roundeven_si32(double %x) {
define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IFD-LABEL: test_roundeven_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI17_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI17_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB17_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB17_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB17_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB17_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB17_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB17_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_si64:
@@ -1122,49 +1076,45 @@ define i64 @test_roundeven_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call roundeven
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB17_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB17_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB17_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB17_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB17_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
@@ -1323,48 +1273,42 @@ define signext i32 @test_rint_si32(double %x) {
define i64 @test_rint_si64(double %x) nounwind {
; RV32IFD-LABEL: test_rint_si64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: addi sp, sp, -32
-; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
-; RV32IFD-NEXT: lui a0, %hi(.LCPI21_1)
-; RV32IFD-NEXT: fld fa4, %lo(.LCPI21_1)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: flt.d s0, fa5, fa0
-; RV32IFD-NEXT: neg s1, s0
-; RV32IFD-NEXT: fle.d s2, fa4, fa0
-; RV32IFD-NEXT: neg s3, s2
+; RV32IFD-NEXT: fle.d s0, fa5, fa0
; RV32IFD-NEXT: call __fixdfdi
-; RV32IFD-NEXT: and a0, s3, a0
-; RV32IFD-NEXT: or a0, s1, a0
-; RV32IFD-NEXT: feq.d a2, fs0, fs0
-; RV32IFD-NEXT: neg a2, a2
-; RV32IFD-NEXT: lui a4, 524288
-; RV32IFD-NEXT: li a5, 1
; RV32IFD-NEXT: lui a3, 524288
-; RV32IFD-NEXT: bne s2, a5, .LBB21_2
+; RV32IFD-NEXT: li a4, 1
+; RV32IFD-NEXT: lui a2, 524288
+; RV32IFD-NEXT: bne s0, a4, .LBB21_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: mv a3, a1
+; RV32IFD-NEXT: mv a2, a1
; RV32IFD-NEXT: .LBB21_2:
-; RV32IFD-NEXT: and a0, a2, a0
-; RV32IFD-NEXT: beqz s0, .LBB21_4
+; RV32IFD-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_1)(a1)
+; RV32IFD-NEXT: flt.d a4, fa5, fs0
+; RV32IFD-NEXT: beqz a4, .LBB21_4
; RV32IFD-NEXT: # %bb.3:
-; RV32IFD-NEXT: addi a3, a4, -1
+; RV32IFD-NEXT: addi a2, a3, -1
; RV32IFD-NEXT: .LBB21_4:
-; RV32IFD-NEXT: and a1, a2, a3
-; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: feq.d a1, fs0, fs0
+; RV32IFD-NEXT: neg a3, a1
+; RV32IFD-NEXT: and a1, a3, a2
+; RV32IFD-NEXT: neg a2, a4
+; RV32IFD-NEXT: neg a4, s0
+; RV32IFD-NEXT: and a0, a4, a0
+; RV32IFD-NEXT: or a0, a2, a0
+; RV32IFD-NEXT: and a0, a3, a0
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT: addi sp, sp, 32
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_rint_si64:
@@ -1378,49 +1322,45 @@ define i64 @test_rint_si64(double %x) nounwind {
;
; RV32IZFINXZDINX-LABEL: test_rint_si64:
; RV32IZFINXZDINX: # %bb.0:
-; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
-; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
+; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINXZDINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: call rint
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINXZDINX-NEXT: mv s0, a0
; RV32IZFINXZDINX-NEXT: mv s1, a1
-; RV32IZFINXZDINX-NEXT: fle.d s2, a2, s0
-; RV32IZFINXZDINX-NEXT: neg s3, s2
; RV32IZFINXZDINX-NEXT: call __fixdfdi
-; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_1)
-; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_1+4)(a2)
-; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
-; RV32IZFINXZDINX-NEXT: and a0, s3, a0
-; RV32IZFINXZDINX-NEXT: flt.d a3, a2, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a3
-; RV32IZFINXZDINX-NEXT: or a0, a2, a0
-; RV32IZFINXZDINX-NEXT: feq.d a2, s0, s0
-; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0)
+; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
+; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
+; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: lui a5, 524288
-; RV32IZFINXZDINX-NEXT: li a6, 1
-; RV32IZFINXZDINX-NEXT: lui a4, 524288
-; RV32IZFINXZDINX-NEXT: bne s2, a6, .LBB21_2
+; RV32IZFINXZDINX-NEXT: li a4, 1
+; RV32IZFINXZDINX-NEXT: lui a3, 524288
+; RV32IZFINXZDINX-NEXT: bne a2, a4, .LBB21_2
; RV32IZFINXZDINX-NEXT: # %bb.1:
-; RV32IZFINXZDINX-NEXT: mv a4, a1
+; RV32IZFINXZDINX-NEXT: mv a3, a1
; RV32IZFINXZDINX-NEXT: .LBB21_2:
-; RV32IZFINXZDINX-NEXT: and a0, a2, a0
-; RV32IZFINXZDINX-NEXT: beqz a3, .LBB21_4
+; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI21_1)
+; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI21_1)(a1)
+; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI21_1+4)(a1)
+; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0
+; RV32IZFINXZDINX-NEXT: beqz a4, .LBB21_4
; RV32IZFINXZDINX-NEXT: # %bb.3:
-; RV32IZFINXZDINX-NEXT: addi a4, a5, -1
+; RV32IZFINXZDINX-NEXT: addi a3, a5, -1
; RV32IZFINXZDINX-NEXT: .LBB21_4:
-; RV32IZFINXZDINX-NEXT: and a1, a2, a4
-; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
+; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0
+; RV32IZFINXZDINX-NEXT: neg a5, a1
+; RV32IZFINXZDINX-NEXT: and a1, a5, a3
+; RV32IZFINXZDINX-NEXT: neg a2, a2
+; RV32IZFINXZDINX-NEXT: and a0, a2, a0
+; RV32IZFINXZDINX-NEXT: neg a2, a4
+; RV32IZFINXZDINX-NEXT: or a0, a2, a0
+; RV32IZFINXZDINX-NEXT: and a0, a5, a0
+; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: test_rint_si64:
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 9fb78d4c4d52..2c7315fbe59f 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -629,23 +629,23 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB12_2
; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB12_2: # %start
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB12_4
+; RV32IF-NEXT: beqz a3, .LBB12_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB12_4: # %start
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -668,37 +668,35 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
-; RV32IZFINX-NEXT: lui a2, %hi(.LCPI12_0)
-; RV32IZFINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
-; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
-; RV32IZFINX-NEXT: or a0, a2, a0
-; RV32IZFINX-NEXT: feq.s a2, s0, s0
-; RV32IZFINX-NEXT: neg a2, a2
-; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
+; RV32IZFINX-NEXT: lui a2, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB12_2
; RV32IZFINX-NEXT: # %bb.1: # %start
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: .LBB12_2: # %start
-; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB12_4
+; RV32IZFINX-NEXT: lui a1, %hi(.LCPI12_0)
+; RV32IZFINX-NEXT: lw a1, %lo(.LCPI12_0)(a1)
+; RV32IZFINX-NEXT: flt.s a3, a1, s0
+; RV32IZFINX-NEXT: beqz a3, .LBB12_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a2, a4, -1
; RV32IZFINX-NEXT: .LBB12_4: # %start
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: feq.s a1, s0, s0
+; RV32IZFINX-NEXT: neg a4, a1
+; RV32IZFINX-NEXT: and a1, a4, a2
+; RV32IZFINX-NEXT: neg a2, s1
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: neg a2, a3
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a0, a4, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index c72e69c92a13..4f747c278da0 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -59,23 +59,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB1_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB1_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB1_6
+; RV32IF-NEXT: beqz a3, .LBB1_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB1_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -117,23 +117,23 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB1_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB1_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB1_6
+; RV32IZFINX-NEXT: beqz a3, .LBB1_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB1_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -321,23 +321,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB5_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB5_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB5_6
+; RV32IF-NEXT: beqz a3, .LBB5_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB5_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -379,23 +379,23 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB5_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB5_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB5_6
+; RV32IZFINX-NEXT: beqz a3, .LBB5_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB5_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -583,23 +583,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB9_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB9_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB9_6
+; RV32IF-NEXT: beqz a3, .LBB9_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB9_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -641,23 +641,23 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB9_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB9_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB9_6
+; RV32IZFINX-NEXT: beqz a3, .LBB9_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB9_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -845,23 +845,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB13_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB13_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB13_6
+; RV32IF-NEXT: beqz a3, .LBB13_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB13_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -903,23 +903,23 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB13_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB13_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB13_6
+; RV32IZFINX-NEXT: beqz a3, .LBB13_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB13_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1107,23 +1107,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB17_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB17_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB17_6
+; RV32IF-NEXT: beqz a3, .LBB17_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB17_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1165,23 +1165,23 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB17_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB17_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB17_6
+; RV32IZFINX-NEXT: beqz a3, .LBB17_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB17_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1369,23 +1369,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IF-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IF-NEXT: and a0, s1, a0
-; RV32IF-NEXT: flt.s a4, fa5, fs0
-; RV32IF-NEXT: neg a2, a4
+; RV32IF-NEXT: flt.s a3, fa5, fs0
+; RV32IF-NEXT: neg a2, a3
; RV32IF-NEXT: or a0, a2, a0
; RV32IF-NEXT: feq.s a2, fs0, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: lui a5, 524288
-; RV32IF-NEXT: lui a3, 524288
+; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: beqz s0, .LBB21_4
; RV32IF-NEXT: # %bb.3:
-; RV32IF-NEXT: mv a3, a1
+; RV32IF-NEXT: mv a4, a1
; RV32IF-NEXT: .LBB21_4:
; RV32IF-NEXT: and a0, a2, a0
-; RV32IF-NEXT: beqz a4, .LBB21_6
+; RV32IF-NEXT: beqz a3, .LBB21_6
; RV32IF-NEXT: # %bb.5:
-; RV32IF-NEXT: addi a3, a5, -1
+; RV32IF-NEXT: addi a4, a5, -1
; RV32IF-NEXT: .LBB21_6:
-; RV32IF-NEXT: and a1, a2, a3
+; RV32IF-NEXT: and a1, a2, a4
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1427,23 +1427,23 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
-; RV32IZFINX-NEXT: flt.s a4, a2, s0
-; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: flt.s a3, a2, s0
+; RV32IZFINX-NEXT: neg a2, a3
; RV32IZFINX-NEXT: or a0, a2, a0
; RV32IZFINX-NEXT: feq.s a2, s0, s0
; RV32IZFINX-NEXT: neg a2, a2
; RV32IZFINX-NEXT: lui a5, 524288
-; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB21_4
; RV32IZFINX-NEXT: # %bb.3:
-; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: mv a4, a1
; RV32IZFINX-NEXT: .LBB21_4:
; RV32IZFINX-NEXT: and a0, a2, a0
-; RV32IZFINX-NEXT: beqz a4, .LBB21_6
+; RV32IZFINX-NEXT: beqz a3, .LBB21_6
; RV32IZFINX-NEXT: # %bb.5:
-; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: addi a4, a5, -1
; RV32IZFINX-NEXT: .LBB21_6:
-; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: and a1, a2, a4
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index bc63b3961952..16c096290720 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2460,47 +2460,42 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
;
; RV32ID-ILP32-LABEL: fcvt_l_h_sat:
; RV32ID-ILP32: # %bb.0: # %start
-; RV32ID-ILP32-NEXT: addi sp, sp, -32
-; RV32ID-ILP32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: addi sp, sp, -16
+; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fsw fa4, 8(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: flt.s s0, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s1, s0
; RV32ID-ILP32-NEXT: lui a1, 913408
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1
-; RV32ID-ILP32-NEXT: fle.s s2, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s3, s2
+; RV32ID-ILP32-NEXT: fsw fa4, 4(sp) # 4-byte Folded Spill
+; RV32ID-ILP32-NEXT: fle.s s0, fa5, fa4
; RV32ID-ILP32-NEXT: call __fixsfdi
-; RV32ID-ILP32-NEXT: and a0, s3, a0
-; RV32ID-ILP32-NEXT: or a0, s1, a0
-; RV32ID-ILP32-NEXT: flw fa5, 8(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: feq.s a2, fa5, fa5
-; RV32ID-ILP32-NEXT: neg a2, a2
; RV32ID-ILP32-NEXT: lui a4, 524288
-; RV32ID-ILP32-NEXT: lui a3, 524288
-; RV32ID-ILP32-NEXT: beqz s2, .LBB10_2
+; RV32ID-ILP32-NEXT: lui a2, 524288
+; RV32ID-ILP32-NEXT: beqz s0, .LBB10_2
; RV32ID-ILP32-NEXT: # %bb.1: # %start
-; RV32ID-ILP32-NEXT: mv a3, a1
+; RV32ID-ILP32-NEXT: mv a2, a1
; RV32ID-ILP32-NEXT: .LBB10_2: # %start
-; RV32ID-ILP32-NEXT: and a0, a2, a0
-; RV32ID-ILP32-NEXT: beqz s0, .LBB10_4
+; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
+; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-ILP32-NEXT: flw fa4, 4(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: flt.s a3, fa5, fa4
+; RV32ID-ILP32-NEXT: fmv.s fa5, fa4
+; RV32ID-ILP32-NEXT: beqz a3, .LBB10_4
; RV32ID-ILP32-NEXT: # %bb.3:
-; RV32ID-ILP32-NEXT: addi a3, a4, -1
+; RV32ID-ILP32-NEXT: addi a2, a4, -1
; RV32ID-ILP32-NEXT: .LBB10_4: # %start
-; RV32ID-ILP32-NEXT: and a1, a2, a3
-; RV32ID-ILP32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32ID-ILP32-NEXT: addi sp, sp, 32
+; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
+; RV32ID-ILP32-NEXT: neg a4, a1
+; RV32ID-ILP32-NEXT: and a1, a4, a2
+; RV32ID-ILP32-NEXT: neg a2, a3
+; RV32ID-ILP32-NEXT: neg a3, s0
+; RV32ID-ILP32-NEXT: and a0, a3, a0
+; RV32ID-ILP32-NEXT: or a0, a2, a0
+; RV32ID-ILP32-NEXT: and a0, a4, a0
+; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
;
; RV64ID-LP64-LABEL: fcvt_l_h_sat:
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index dd1115b20225..9c95210bfa7c 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -120,16 +120,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB1_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB1_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -137,11 +137,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB1_6
+; RV32IZFH-NEXT: beqz a3, .LBB1_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB1_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_si64:
@@ -179,16 +179,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB1_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB1_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -196,11 +196,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB1_6
+; RV32IZHINX-NEXT: beqz a3, .LBB1_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB1_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_floor_si64:
@@ -251,16 +251,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB1_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB1_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -268,11 +268,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB1_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB1_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_floor_si64:
@@ -324,16 +324,16 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB1_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB1_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -341,11 +341,11 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB1_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB1_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB1_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_floor_si64:
@@ -836,16 +836,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB5_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB5_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -853,11 +853,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB5_6
+; RV32IZFH-NEXT: beqz a3, .LBB5_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB5_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_si64:
@@ -895,16 +895,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB5_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB5_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -912,11 +912,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB5_6
+; RV32IZHINX-NEXT: beqz a3, .LBB5_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB5_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_ceil_si64:
@@ -967,16 +967,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB5_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB5_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -984,11 +984,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB5_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB5_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_ceil_si64:
@@ -1040,16 +1040,16 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB5_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB5_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1057,11 +1057,11 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB5_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB5_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB5_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_ceil_si64:
@@ -1552,16 +1552,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB9_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB9_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1569,11 +1569,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB9_6
+; RV32IZFH-NEXT: beqz a3, .LBB9_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB9_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_si64:
@@ -1611,16 +1611,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB9_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB9_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1628,11 +1628,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB9_6
+; RV32IZHINX-NEXT: beqz a3, .LBB9_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB9_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_trunc_si64:
@@ -1683,16 +1683,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB9_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB9_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1700,11 +1700,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB9_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB9_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_trunc_si64:
@@ -1756,16 +1756,16 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB9_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB9_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1773,11 +1773,11 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB9_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB9_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB9_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_trunc_si64:
@@ -2268,16 +2268,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB13_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB13_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2285,11 +2285,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB13_6
+; RV32IZFH-NEXT: beqz a3, .LBB13_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB13_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_si64:
@@ -2327,16 +2327,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB13_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB13_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2344,11 +2344,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB13_6
+; RV32IZHINX-NEXT: beqz a3, .LBB13_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB13_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_round_si64:
@@ -2399,16 +2399,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB13_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB13_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2416,11 +2416,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB13_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB13_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_round_si64:
@@ -2472,16 +2472,16 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB13_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB13_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2489,11 +2489,11 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB13_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB13_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB13_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_round_si64:
@@ -2984,16 +2984,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB17_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB17_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3001,11 +3001,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB17_6
+; RV32IZFH-NEXT: beqz a3, .LBB17_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB17_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_si64:
@@ -3043,16 +3043,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB17_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB17_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3060,11 +3060,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB17_6
+; RV32IZHINX-NEXT: beqz a3, .LBB17_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB17_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_roundeven_si64:
@@ -3115,16 +3115,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB17_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB17_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3132,11 +3132,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB17_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB17_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_roundeven_si64:
@@ -3188,16 +3188,16 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB17_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB17_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3205,11 +3205,11 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB17_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB17_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB17_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_roundeven_si64:
@@ -3700,16 +3700,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a2)
; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: flt.s a4, fa5, fs0
-; RV32IZFH-NEXT: neg a2, a4
+; RV32IZFH-NEXT: flt.s a3, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a3
; RV32IZFH-NEXT: or a0, a2, a0
; RV32IZFH-NEXT: feq.s a2, fs0, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: lui a5, 524288
-; RV32IZFH-NEXT: lui a3, 524288
+; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: beqz s0, .LBB21_4
; RV32IZFH-NEXT: # %bb.3:
-; RV32IZFH-NEXT: mv a3, a1
+; RV32IZFH-NEXT: mv a4, a1
; RV32IZFH-NEXT: .LBB21_4:
; RV32IZFH-NEXT: and a0, a2, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3717,11 +3717,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
-; RV32IZFH-NEXT: beqz a4, .LBB21_6
+; RV32IZFH-NEXT: beqz a3, .LBB21_6
; RV32IZFH-NEXT: # %bb.5:
-; RV32IZFH-NEXT: addi a3, a5, -1
+; RV32IZFH-NEXT: addi a4, a5, -1
; RV32IZFH-NEXT: .LBB21_6:
-; RV32IZFH-NEXT: and a1, a2, a3
+; RV32IZFH-NEXT: and a1, a2, a4
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_rint_si64:
@@ -3759,16 +3759,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
-; RV32IZHINX-NEXT: flt.s a4, a2, s0
-; RV32IZHINX-NEXT: neg a2, a4
+; RV32IZHINX-NEXT: flt.s a3, a2, s0
+; RV32IZHINX-NEXT: neg a2, a3
; RV32IZHINX-NEXT: or a0, a2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
; RV32IZHINX-NEXT: neg a2, a2
; RV32IZHINX-NEXT: lui a5, 524288
-; RV32IZHINX-NEXT: lui a3, 524288
+; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: beqz s1, .LBB21_4
; RV32IZHINX-NEXT: # %bb.3:
-; RV32IZHINX-NEXT: mv a3, a1
+; RV32IZHINX-NEXT: mv a4, a1
; RV32IZHINX-NEXT: .LBB21_4:
; RV32IZHINX-NEXT: and a0, a2, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3776,11 +3776,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
-; RV32IZHINX-NEXT: beqz a4, .LBB21_6
+; RV32IZHINX-NEXT: beqz a3, .LBB21_6
; RV32IZHINX-NEXT: # %bb.5:
-; RV32IZHINX-NEXT: addi a3, a5, -1
+; RV32IZHINX-NEXT: addi a4, a5, -1
; RV32IZHINX-NEXT: .LBB21_6:
-; RV32IZHINX-NEXT: and a1, a2, a3
+; RV32IZHINX-NEXT: and a1, a2, a4
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: test_rint_si64:
@@ -3831,16 +3831,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s1, a0
-; RV32IZFHMIN-NEXT: flt.s a4, fa5, fs0
-; RV32IZFHMIN-NEXT: neg a2, a4
+; RV32IZFHMIN-NEXT: flt.s a3, fa5, fs0
+; RV32IZFHMIN-NEXT: neg a2, a3
; RV32IZFHMIN-NEXT: or a0, a2, a0
; RV32IZFHMIN-NEXT: feq.s a2, fs0, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: lui a5, 524288
-; RV32IZFHMIN-NEXT: lui a3, 524288
+; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB21_4
; RV32IZFHMIN-NEXT: # %bb.3:
-; RV32IZFHMIN-NEXT: mv a3, a1
+; RV32IZFHMIN-NEXT: mv a4, a1
; RV32IZFHMIN-NEXT: .LBB21_4:
; RV32IZFHMIN-NEXT: and a0, a2, a0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3848,11 +3848,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: flw fs0, 0(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
-; RV32IZFHMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZFHMIN-NEXT: beqz a3, .LBB21_6
; RV32IZFHMIN-NEXT: # %bb.5:
-; RV32IZFHMIN-NEXT: addi a3, a5, -1
+; RV32IZFHMIN-NEXT: addi a4, a5, -1
; RV32IZFHMIN-NEXT: .LBB21_6:
-; RV32IZFHMIN-NEXT: and a1, a2, a3
+; RV32IZFHMIN-NEXT: and a1, a2, a4
; RV32IZFHMIN-NEXT: ret
;
; RV64IZFHMIN-LABEL: test_rint_si64:
@@ -3904,16 +3904,16 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
-; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0
-; RV32IZHINXMIN-NEXT: neg a2, a4
+; RV32IZHINXMIN-NEXT: flt.s a3, a2, s0
+; RV32IZHINXMIN-NEXT: neg a2, a3
; RV32IZHINXMIN-NEXT: or a0, a2, a0
; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0
; RV32IZHINXMIN-NEXT: neg a2, a2
; RV32IZHINXMIN-NEXT: lui a5, 524288
-; RV32IZHINXMIN-NEXT: lui a3, 524288
+; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: beqz s1, .LBB21_4
; RV32IZHINXMIN-NEXT: # %bb.3:
-; RV32IZHINXMIN-NEXT: mv a3, a1
+; RV32IZHINXMIN-NEXT: mv a4, a1
; RV32IZHINXMIN-NEXT: .LBB21_4:
; RV32IZHINXMIN-NEXT: and a0, a2, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3921,11 +3921,11 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
-; RV32IZHINXMIN-NEXT: beqz a4, .LBB21_6
+; RV32IZHINXMIN-NEXT: beqz a3, .LBB21_6
; RV32IZHINXMIN-NEXT: # %bb.5:
-; RV32IZHINXMIN-NEXT: addi a3, a5, -1
+; RV32IZHINXMIN-NEXT: addi a4, a5, -1
; RV32IZHINXMIN-NEXT: .LBB21_6:
-; RV32IZHINXMIN-NEXT: and a1, a2, a3
+; RV32IZHINXMIN-NEXT: and a1, a2, a4
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: test_rint_si64:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index 71769a800c06..c480ba800c69 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -75,24 +75,10 @@ define double @constraint_f_double_abi_name(double %a) nounwind {
define double @constraint_gpr(double %x) {
; RV32F-LABEL: constraint_gpr:
; RV32F: # %bb.0:
-; RV32F-NEXT: addi sp, sp, -32
-; RV32F-NEXT: .cfi_def_cfa_offset 32
-; RV32F-NEXT: sw a0, 8(sp)
-; RV32F-NEXT: sw a1, 12(sp)
-; RV32F-NEXT: fld fa5, 8(sp)
-; RV32F-NEXT: fsd fa5, 24(sp)
-; RV32F-NEXT: lw a0, 24(sp)
-; RV32F-NEXT: lw a1, 28(sp)
+; RV32F-NEXT: .cfi_def_cfa_offset 0
; RV32F-NEXT: #APP
; RV32F-NEXT: mv a0, a0
; RV32F-NEXT: #NO_APP
-; RV32F-NEXT: sw a1, 20(sp)
-; RV32F-NEXT: sw a0, 16(sp)
-; RV32F-NEXT: fld fa5, 16(sp)
-; RV32F-NEXT: fsd fa5, 8(sp)
-; RV32F-NEXT: lw a0, 8(sp)
-; RV32F-NEXT: lw a1, 12(sp)
-; RV32F-NEXT: addi sp, sp, 32
; RV32F-NEXT: ret
;
; RV64F-LABEL: constraint_gpr:
diff --git a/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
new file mode 100644
index 000000000000..89a6ca7af9be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/make-compressible-zbc.mir
@@ -0,0 +1,585 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+zcb -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+ define void @store_common_value_i8(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i8 0, ptr %a, align 1
+ store i8 0, ptr %b, align 1
+ store i8 0, ptr %c, align 1
+ ret void
+ }
+
+ define void @store_common_value_i16(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store i16 0, ptr %a, align 2
+ store i16 0, ptr %b, align 2
+ store i16 0, ptr %c, align 2
+ ret void
+ }
+
+ define void @store_common_ptr_i8(ptr %p) #0 {
+ entry:
+ store volatile i8 1, ptr %p, align 1
+ store volatile i8 3, ptr %p, align 1
+ store volatile i8 5, ptr %p, align 1
+ ret void
+ }
+
+ define void @store_common_ptr_i16(ptr %p) #0 {
+ entry:
+ store volatile i16 1, ptr %p, align 2
+ store volatile i16 3, ptr %p, align 2
+ store volatile i16 5, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_i8(ptr %p) #0 {
+ entry:
+ %0 = load volatile i8, ptr %p, align 1
+ %a = sext i8 %0 to i32
+ %1 = load volatile i8, ptr %p, align 1
+ %2 = load volatile i8, ptr %p, align 1
+ ret void
+ }
+
+ define void @load_common_ptr_s16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_u16(ptr %p) #0 {
+ entry:
+ %0 = load volatile i16, ptr %p, align 2
+ %1 = load volatile i16, ptr %p, align 2
+ %2 = load volatile i16, ptr %p, align 2
+ ret void
+ }
+
+ define void @store_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ store volatile i8 5, ptr %2, align 1
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ store volatile i8 7, ptr %3, align 1
+ ret void
+ }
+
+ define void @store_large_offset_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 1, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ store volatile i16 3, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 3, ptr %1, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ store volatile i16 7, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 102
+ %c = load volatile i8, ptr %2
+ %3 = getelementptr inbounds i8, ptr %p, i8 103
+ %d = load volatile i8, ptr %3
+ ret void
+ }
+
+ define void @load_large_offset_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 101
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+ define void @store_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile i8 1, ptr %0, align 1
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile i8 3, ptr %1, align 1
+ %2 = getelementptr inbounds i8, ptr %p, i8 104
+ store volatile i8 5, ptr %2, align 1
+ ret void
+ }
+
+ define void @store_large_offset_no_opt_i16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 100
+ %b = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %2, align 2
+ %3 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_i8(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ %a = load volatile i8, ptr %0
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ %b = load volatile i8, ptr %1
+ %2 = getelementptr inbounds i8, ptr %p, i8 103
+ %c = load volatile i8, ptr %2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_s16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_u16(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i16, ptr %p, i16 100
+ %a = load volatile i16, ptr %0, align 2
+ %1 = getelementptr inbounds i16, ptr %p, i16 101
+ %c = load volatile i16, ptr %1, align 2
+ %2 = getelementptr inbounds i16, ptr %p, i16 102
+ %d = load volatile i16, ptr %2, align 2
+ ret void
+ }
+ attributes #0 = { minsize }
+
+...
+---
+name: store_common_value_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i8
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SB $x13, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ ; CHECK-NEXT: SB $x13, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ ; CHECK-NEXT: SB $x13, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SB $x0, killed renamable $x10, 0 :: (store (s8) into %ir.a)
+ SB $x0, killed renamable $x11, 0 :: (store (s8) into %ir.b)
+ SB $x0, killed renamable $x12, 0 :: (store (s8) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_value_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_i16
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x0, 0
+ ; CHECK-NEXT: SH $x13, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ ; CHECK-NEXT: SH $x13, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ ; CHECK-NEXT: SH $x13, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SH $x0, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ SH $x0, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ SH $x0, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x10, $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x10, killed $x11, 0 :: (volatile store (s8) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SB killed renamable $x10, renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SB killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s8) into %ir.p)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x10, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x10, killed $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = ADDI $x0, 1
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 3
+ SH killed renamable $x10, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = ADDI $x0, 5
+ SH killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_i8
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 0 :: (volatile load (s8) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ dead $x10 = LBU killed renamable $x16, 0 :: (volatile load (s8) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_s16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LH killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_u16
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10 = LHU killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: store_large_offset_i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 100
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 0 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 1 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, $x12, 2 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SB killed renamable $x11, killed $x12, 3 :: (volatile store (s8) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x10, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x10, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x10, 102 :: (volatile store (s8) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SB killed renamable $x11, killed renamable $x10, 103 :: (volatile store (s8) into %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: store_large_offset_i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: $x12 = ADDI $x10, 200
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 0 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, $x12, 2 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
+ ; CHECK-NEXT: SH killed renamable $x11, killed $x12, 2 :: (volatile store (s16) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x10, 200 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x10, 202 :: (volatile store (s16) into %ir.2)
+ renamable $x11 = ADDI $x0, 7
+ SH killed renamable $x11, killed renamable $x10, 202 :: (volatile store (s16) into %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 0 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 1 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU $x11, 2 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LBU killed $x11, 3 :: (volatile load (s8) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU renamable $x16, 102 :: (volatile load (s8) from %ir.2)
+ dead $x10 = LBU killed renamable $x16, 103 :: (volatile load (s8) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LH killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LH killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10 = LHU killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10 = LHU killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SB killed renamable $x11, renamable $x16, 100 :: (volatile store (s8) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SB killed renamable $x11, renamable $x16, 101 :: (volatile store (s8) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SB killed renamable $x11, renamable $x16, 104 :: (volatile store (s8) into %ir.2)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_i16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_i16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
+ ; CHECK-NEXT: SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = ADDI $x0, 1
+ SH killed renamable $x11, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = ADDI $x0, 3
+ SH killed renamable $x11, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = ADDI $x0, 5
+ SH killed renamable $x11, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_i8
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_i8
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LBU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LBU renamable $x16, 101 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LBU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_s16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_s16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LH renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LH renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LH killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_u16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_u16
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10 = LHU renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10 = LHU renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10 = LHU killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
index 841d0e6d65da..2cca042bebee 100644
--- a/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
+++ b/llvm/test/CodeGen/RISCV/misched-postra-direction.mir
@@ -1,5 +1,15 @@
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=topdown -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
-# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched -enable-post-misched -debug-only=machine-scheduler -misched-dump-schedule-trace -misched-postra-direction=bottomup -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=topdown \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=TOPDOWN %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bottomup \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BOTTOMUP %s
+# RUN: llc -mtriple=riscv64 -mcpu=sifive-x280 -run-pass=postmisched \
+# RUN: -enable-post-misched -debug-only=machine-scheduler \
+# RUN: -misched-dump-schedule-trace -misched-postra-direction=bidirectional \
+# RUN: -o - %s 2>&1 | FileCheck --check-prefix=BIDIRECTIONAL %s
# REQUIRES: asserts
@@ -51,3 +61,9 @@ body: |
# BOTTOMUP-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
# BOTTOMUP-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
+
+# BIDIRECTIONAL: *** Final schedule for %bb.0 ***
+# BIDIRECTIONAL-NEXT: * Schedule table (Bidirectional): not implemented
+# BIDIRECTIONAL-NEXT: SU(1): renamable $x13 = ADD renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(0): renamable $x12 = MUL renamable $x11, renamable $x10
+# BIDIRECTIONAL-NEXT: SU(2): renamable $x14 = DIVW renamable $x12, renamable $x13
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index 3731b9719445..b45ab135fa1c 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32XTHEADBB-LABEL: ctlz_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 36c107061795..7e6c3f9c87d2 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -11,8 +11,6 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a0, .LBB0_2
; RV32I-NEXT: # %bb.1: # %cond.false
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -40,12 +38,11 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
@@ -64,19 +61,16 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-LABEL: ctlz_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: lui a2, 349525
+; RV32I-NEXT: addi a4, a2, 1365
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a3, a2, 819
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi a2, a2, -241
+; RV32I-NEXT: bnez a1, .LBB1_2
+; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: srli a1, a0, 1
+; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -87,28 +81,26 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s4, a2, 1365
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s5, a1, 819
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s6, a1, -241
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s3, a1, 257
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: or a0, s2, a0
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a0, a1, 1
+; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: srli a1, a0, 2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 4
@@ -119,35 +111,21 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: and a1, a1, s4
+; RV32I-NEXT: and a1, a1, a4
; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: and a1, a0, s5
+; RV32I-NEXT: and a1, a0, a3
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s5
+; RV32I-NEXT: and a0, a0, a3
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: srli a1, a0, 4
; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s6
-; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: bnez s0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a0, s1, 24
-; RV32I-NEXT: .LBB1_3:
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctlz_i64:
@@ -275,8 +253,6 @@ declare i32 @llvm.ctpop.i32(i32)
define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-LABEL: ctpop_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -16
-; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: lui a2, 349525
; RV32I-NEXT: addi a2, a2, 1365
@@ -293,12 +269,11 @@ define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: lui a1, 61681
; RV32I-NEXT: addi a1, a1, -241
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i32:
@@ -390,58 +365,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-LABEL: ctpop_v2i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a0, a2
; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s5, a1, -241
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s2, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s4
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s5
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a1, a0, 24
-; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a4, a0
+; RV32I-NEXT: srli a4, a0, 4
+; RV32I-NEXT: add a0, a0, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a5, a0, 8
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: slli a5, a0, 16
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: srli a5, a1, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: and a3, a1, a2
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 24
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i32:
@@ -558,59 +517,44 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-LABEL: ctpop_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -32
-; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: srli a0, a1, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s2, a2, 1365
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: lui a0, 209715
-; RV32I-NEXT: addi s3, a0, 819
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: srli a2, a1, 1
+; RV32I-NEXT: lui a3, 349525
+; RV32I-NEXT: addi a3, a3, 1365
+; RV32I-NEXT: and a2, a2, a3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: addi a2, a2, 819
+; RV32I-NEXT: and a4, a1, a2
; RV32I-NEXT: srli a1, a1, 2
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s4, a1, -241
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s0, 1
-; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: sub s0, s0, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, s3
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: add a1, a4, a1
+; RV32I-NEXT: srli a4, a1, 4
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: lui a4, 61681
+; RV32I-NEXT: addi a4, a4, -241
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a5, a1, 8
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: slli a5, a1, 16
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: srli a5, a0, 1
+; RV32I-NEXT: and a3, a5, a3
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: and a3, a0, a2
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a3, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
+; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_i64:
@@ -738,99 +682,82 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-LABEL: ctpop_v2i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: addi sp, sp, -48
-; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lw a0, 4(a1)
-; RV32I-NEXT: lw s2, 8(a1)
-; RV32I-NEXT: lw s5, 12(a1)
-; RV32I-NEXT: lw s6, 0(a1)
-; RV32I-NEXT: srli a1, a0, 1
-; RV32I-NEXT: lui a2, 349525
-; RV32I-NEXT: addi s3, a2, 1365
-; RV32I-NEXT: and a1, a1, s3
-; RV32I-NEXT: sub a0, a0, a1
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi s4, a1, 819
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: lui a1, 61681
-; RV32I-NEXT: addi s7, a1, -241
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: lui a1, 4112
-; RV32I-NEXT: addi s1, a1, 257
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s8, a0, 24
-; RV32I-NEXT: srli a0, s6, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s6, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add s8, a0, s8
-; RV32I-NEXT: srli a0, s5, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s5, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli s5, a0, 24
-; RV32I-NEXT: srli a0, s2, 1
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: sub a0, s2, a0
-; RV32I-NEXT: and a1, a0, s4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s4
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: srli a1, a0, 4
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: and a0, a0, s7
-; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: add a0, a0, s5
-; RV32I-NEXT: sw zero, 12(s0)
-; RV32I-NEXT: sw zero, 4(s0)
-; RV32I-NEXT: sw a0, 8(s0)
-; RV32I-NEXT: sw s8, 0(s0)
-; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a2, 8(a1)
+; RV32I-NEXT: lw a4, 12(a1)
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: srli a5, a3, 1
+; RV32I-NEXT: lui a6, 349525
+; RV32I-NEXT: addi a6, a6, 1365
+; RV32I-NEXT: and a5, a5, a6
+; RV32I-NEXT: sub a3, a3, a5
+; RV32I-NEXT: lui a5, 209715
+; RV32I-NEXT: addi a5, a5, 819
+; RV32I-NEXT: and a7, a3, a5
+; RV32I-NEXT: srli a3, a3, 2
+; RV32I-NEXT: and a3, a3, a5
+; RV32I-NEXT: add a3, a7, a3
+; RV32I-NEXT: srli a7, a3, 4
+; RV32I-NEXT: add a3, a3, a7
+; RV32I-NEXT: lui a7, 61681
+; RV32I-NEXT: addi a7, a7, -241
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli t0, a3, 8
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: slli t0, a3, 16
+; RV32I-NEXT: add a3, a3, t0
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli t0, a1, 1
+; RV32I-NEXT: and t0, t0, a6
+; RV32I-NEXT: sub a1, a1, t0
+; RV32I-NEXT: and t0, a1, a5
+; RV32I-NEXT: srli a1, a1, 2
+; RV32I-NEXT: and a1, a1, a5
+; RV32I-NEXT: add a1, t0, a1
+; RV32I-NEXT: srli t0, a1, 4
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: and a1, a1, a7
+; RV32I-NEXT: slli t0, a1, 8
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: slli t0, a1, 16
+; RV32I-NEXT: add a1, a1, t0
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: add a1, a1, a3
+; RV32I-NEXT: srli a3, a4, 1
+; RV32I-NEXT: and a3, a3, a6
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: and a3, a4, a5
+; RV32I-NEXT: srli a4, a4, 2
+; RV32I-NEXT: and a4, a4, a5
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a4, a3, 4
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: and a3, a3, a7
+; RV32I-NEXT: slli a4, a3, 8
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: slli a4, a3, 16
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: srli a3, a3, 24
+; RV32I-NEXT: srli a4, a2, 1
+; RV32I-NEXT: and a4, a4, a6
+; RV32I-NEXT: sub a2, a2, a4
+; RV32I-NEXT: and a4, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a2, a4, a2
+; RV32I-NEXT: srli a4, a2, 4
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: and a2, a2, a7
+; RV32I-NEXT: slli a4, a2, 8
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: slli a4, a2, 16
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: sw zero, 12(a0)
+; RV32I-NEXT: sw zero, 4(a0)
+; RV32I-NEXT: sw a2, 8(a0)
+; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
index 73bfc6480b4d..acd63f24bb8f 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
@@ -317,8 +317,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +352,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
index 7feef4dad411..b0e447b71178 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
@@ -307,8 +307,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +342,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -623,8 +620,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -647,14 +642,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
new file mode 100644
index 000000000000..23eae33739c9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-typepromotion.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=riscv64 -passes=typepromotion -S %s | FileCheck %s
+
+; Test that this does not crash
+define i16 @test(i8 %a, i32 %b) {
+; CHECK-LABEL: define i16 @test(
+; CHECK-SAME: i8 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[TMP0]], 255
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i16
+; CHECK-NEXT: ret i16 [[TMP7]]
+;
+entry:
+ %0 = zext i8 %a to i32
+ %1 = trunc i32 %b to i16
+ %2 = icmp eq i16 %1, 0
+ %3 = trunc i32 %0 to i8
+ %4 = zext i8 %3 to i16
+ %5 = xor i16 %4, %1
+ ret i16 %5
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index 1f62ea9f5681..6cdab888ffcd 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -66,8 +63,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -93,14 +88,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -125,50 +119,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: log2_ceil_i32:
@@ -189,48 +178,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBB-LABEL: findLastSet_i32:
@@ -256,10 +239,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -285,14 +264,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -317,8 +295,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -354,14 +330,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index f810f51f6bc0..c81c6aeaab89 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -209,6 +209,24 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh1adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh2adduw:
; RV64I: # %bb.0:
@@ -247,6 +265,24 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh2adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh3adduw:
; RV64I: # %bb.0:
@@ -285,6 +321,24 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh3adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
@@ -335,6 +389,24 @@ define i64 @addmul6(i64 %a, i64 %b) {
ret i64 %d
}
+define i64 @disjointormul6(i64 %a, i64 %b) {
+; RV64I-LABEL: disjointormul6:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 6
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: disjointormul6:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: sh1add a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %c = mul i64 %a, 6
+ %d = or disjoint i64 %c, %b
+ ret i64 %d
+}
+
define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I: # %bb.0:
@@ -1099,6 +1171,23 @@ define i64 @add4104(i64 %a) {
ret i64 %c
}
+define i64 @add4104_2(i64 %a) {
+; RV64I-LABEL: add4104_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1
+; RV64I-NEXT: addiw a1, a1, 8
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add4104_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, 1026
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: ret
+ %c = or disjoint i64 %a, 4104
+ ret i64 %c
+}
+
define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I: # %bb.0:
@@ -1282,6 +1371,96 @@ define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
ret i32 %1
}
+define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add1:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add1:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %add, %x
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add2:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add2:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: add a0, a1, a0
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %x
+ %add2 = add nsw i64 %add, %shl
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add3:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add3:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: sh3add a1, a1, a2
+; RV64ZBA-NEXT: sh3add a0, a1, a0
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %shl1, %shl
+ %add2 = add nsw i64 %x, %add
+ ret i64 %add2
+}
+
+define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
+; RV64I-LABEL: sh6_sh3_add4:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a2, a2, 3
+; RV64I-NEXT: slli a1, a1, 6
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh6_sh3_add4:
+; RV64ZBA: # %bb.0: # %entry
+; RV64ZBA-NEXT: slli a1, a1, 6
+; RV64ZBA-NEXT: sh3add a0, a2, a0
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+entry:
+ %shl = shl i64 %z, 3
+ %shl1 = shl i64 %y, 6
+ %add = add nsw i64 %x, %shl
+ %add2 = add nsw i64 %add, %shl1
+ ret i64 %add2
+}
+
; Make sure we use sext.h+slli+srli for Zba+Zbb.
; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 2269d8d04c9c..4d5ef5db8605 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -11,8 +11,6 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB0_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -38,14 +36,13 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
@@ -64,8 +61,6 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -91,14 +86,13 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
@@ -121,50 +115,45 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_ceil_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: addiw a0, a0, -1
-; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a0, .LBB2_2
+; RV64I-NEXT: addiw a1, a0, -1
+; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: li a2, 32
+; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: srliw a1, a0, 1
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a2, a1, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
-; RV64I-NEXT: sub a0, s0, a1
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: log2_ceil_i32:
@@ -183,48 +172,42 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-LABEL: findLastSet_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: srliw a1, a0, 2
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 4
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 8
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: srliw a1, a0, 16
-; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: not a0, a0
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: srliw a1, a0, 1
+; RV64I-NEXT: or a1, a0, a1
+; RV64I-NEXT: srliw a2, a1, 2
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 4
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 8
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: srliw a2, a1, 16
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: not a1, a1
+; RV64I-NEXT: srli a2, a1, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: and a2, a0, a1
-; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: add a0, a2, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: xori a0, a0, 31
-; RV64I-NEXT: snez a1, s0
-; RV64I-NEXT: addi a1, a1, -1
-; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: lui a2, 61681
+; RV64I-NEXT: addi a2, a2, -241
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
+; RV64I-NEXT: xori a1, a1, 31
+; RV64I-NEXT: snez a0, a0
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: findLastSet_i32:
@@ -248,10 +231,6 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: .cfi_def_cfa_offset 16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -277,14 +256,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
@@ -307,8 +285,6 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -344,14 +320,13 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
@@ -544,8 +519,6 @@ declare i32 @llvm.ctpop.i32(i32)
define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-LABEL: ctpop_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -560,14 +533,13 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32:
@@ -657,8 +629,6 @@ define i1 @ctpop_i32_ne_one(i32 signext %a) nounwind {
define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-LABEL: ctpop_i32_load:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
@@ -674,14 +644,13 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: srli a1, a0, 4
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: addi a1, a1, -241
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i32_load:
@@ -699,58 +668,42 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV64I-LABEL: ctpop_v2i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw s3, a2, 1365
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw s4, a1, 819
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw s5, a1, -241
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw s2, a0, 24
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srliw a1, a0, 24
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addi a4, a4, -241
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srliw a0, a0, 24
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srliw a1, a1, 24
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i32:
@@ -875,8 +828,6 @@ declare i64 @llvm.ctpop.i64(i64)
define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-LABEL: ctpop_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: addiw a2, a2, 1365
@@ -899,14 +850,13 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3
+; RV64I-NEXT: slli a1, a0, 8
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 16
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_i64:
@@ -998,66 +948,52 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV64I-LABEL: ctpop_v2i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -64
-; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
-; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: srli a1, a0, 1
-; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
-; RV64I-NEXT: slli a3, a2, 32
-; RV64I-NEXT: add s3, a2, a3
-; RV64I-NEXT: and a1, a1, s3
-; RV64I-NEXT: sub a0, a0, a1
-; RV64I-NEXT: lui a1, 209715
-; RV64I-NEXT: addiw a1, a1, 819
-; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s4, a1, a2
-; RV64I-NEXT: and a1, a0, s4
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: lui a3, 349525
+; RV64I-NEXT: addiw a3, a3, 1365
+; RV64I-NEXT: slli a4, a3, 32
+; RV64I-NEXT: add a3, a3, a4
+; RV64I-NEXT: and a2, a2, a3
+; RV64I-NEXT: sub a0, a0, a2
+; RV64I-NEXT: lui a2, 209715
+; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: slli a4, a2, 32
+; RV64I-NEXT: add a2, a2, a4
+; RV64I-NEXT: and a4, a0, a2
; RV64I-NEXT: srli a0, a0, 2
-; RV64I-NEXT: and a0, a0, s4
-; RV64I-NEXT: add a0, a1, a0
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: lui a1, 61681
-; RV64I-NEXT: addiw a1, a1, -241
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a4, a0
+; RV64I-NEXT: srli a4, a0, 4
+; RV64I-NEXT: add a0, a0, a4
+; RV64I-NEXT: lui a4, 61681
+; RV64I-NEXT: addiw a4, a4, -241
+; RV64I-NEXT: slli a5, a4, 32
+; RV64I-NEXT: add a4, a4, a5
+; RV64I-NEXT: and a0, a0, a4
+; RV64I-NEXT: slli a5, a0, 8
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 16
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: slli a5, a0, 32
+; RV64I-NEXT: add a0, a0, a5
+; RV64I-NEXT: srli a0, a0, 56
+; RV64I-NEXT: srli a5, a1, 1
+; RV64I-NEXT: and a3, a5, a3
+; RV64I-NEXT: sub a1, a1, a3
+; RV64I-NEXT: and a3, a1, a2
+; RV64I-NEXT: srli a1, a1, 2
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: add a1, a3, a1
+; RV64I-NEXT: srli a2, a1, 4
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: and a1, a1, a4
+; RV64I-NEXT: slli a2, a1, 8
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: slli a2, a1, 16
+; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a2, a1, 32
-; RV64I-NEXT: add s5, a1, a2
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: lui a1, 4112
-; RV64I-NEXT: addiw s1, a1, 257
-; RV64I-NEXT: slli a1, s1, 32
-; RV64I-NEXT: add s1, s1, a1
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli s2, a0, 56
-; RV64I-NEXT: srli a0, s0, 1
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: sub s0, s0, a0
-; RV64I-NEXT: and a0, s0, s4
-; RV64I-NEXT: srli s0, s0, 2
-; RV64I-NEXT: and a1, s0, s4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a0, a1
-; RV64I-NEXT: and a0, a0, s5
-; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: srli a1, a0, 56
-; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
-; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 64
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 56
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctpop_v2i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
new file mode 100644
index 000000000000..ddbfbd0b59fa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -0,0 +1,343 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+;
+; SABD
+;
+
+define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: sabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, -1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, -1, v0
+; CHECK-NEXT: vmin.vv v10, v12, v8
+; CHECK-NEXT: vmax.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmin.vv v12, v8, v10
+; CHECK-NEXT: vmax.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: sabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: sabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uabd_b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
+ %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
+ %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true)
+ %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8>
+ ret <vscale x 16 x i8> %trunc
+}
+
+define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
+; CHECK-LABEL: uabd_b_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vminu.vv v10, v12, v8
+; CHECK-NEXT: vmaxu.vv v8, v12, v8
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+ %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
+ %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
+ %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
+ ret <vscale x 16 x i8> %abs
+}
+
+define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uabd_h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true)
+ %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %trunc
+}
+
+define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: uabd_h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+ %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
+ %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
+ ret <vscale x 8 x i16> %abs
+}
+
+define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uabd_s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vminu.vv v12, v8, v10
+; CHECK-NEXT: vmaxu.vv v8, v8, v10
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; FIXME: Crashes legalization if enabled
+;; define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+;; %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
+;; %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
+;; %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
+;; %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true)
+;; %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64>
+;; ret <vscale x 2 x i64> %trunc
+;; }
+
+define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: uabd_d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v10, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+ %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+ %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
+ ret <vscale x 2 x i64> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_extension:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v10
+; CHECK-NEXT: vminu.vv v10, v8, v12
+; CHECK-NEXT: vmaxu.vv v8, v8, v12
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
+ %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
+ %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true)
+ %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %trunc
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a.zext)) returns true but
+; %a and %b have differing types.
+define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: uabd_non_matching_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vminu.vv v8, v10, v9
+; CHECK-NEXT: vmaxu.vv v9, v10, v9
+; CHECK-NEXT: vsub.vv v10, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+; Test the situation where isLegal(ISD::ABD, typeof(%a)) returns true but %a and
+; %b are promoted differently.
+define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; CHECK-LABEL: uabd_non_matching_promotion:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: vwsub.wv v10, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vrsub.vi v8, v10, 0
+; CHECK-NEXT: vmax.vv v8, v10, v8
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+ %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
+ %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
+ %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
+ ret <vscale x 4 x i32> %abs
+}
+
+declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
+
+declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
+declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1)
+
+declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
+declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1)
+
+declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
+declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1)
+
+declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index 87d95d7596d4..139579b3d2a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -161,72 +161,71 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-LABEL: fv128:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: lui a0, %hi(.LCPI10_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0)
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v0, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 2
; CHECK-NEXT: lui a0, %hi(.LCPI10_1)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_1)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 6, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 4
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v10, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v8, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: lui a0, %hi(.LCPI10_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2)
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 6
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_3)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3)
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 10, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 8
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 6
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 12, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 10
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vid.v v16
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v0, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v9, 2
; CHECK-NEXT: lui a0, %hi(.LCPI10_5)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5)
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 14, e8, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 12
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_6)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6)
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 14
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v0, v9, 6
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v0, v8, 8
; CHECK-NEXT: ret
%mask = call <128 x i1> @llvm.get.active.lane.mask.v128i1.i64(i64 %index, i64 %tc)
ret <128 x i1> %mask
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
index 1d025a2f776f..1fe91c721f4d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll
@@ -18,15 +18,15 @@ define void @test(ptr %addr) {
; CHECK-NEXT: add a2, a0, a1
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: slli a2, a1, 1
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v9, (a3)
+; CHECK-NEXT: vl1re64.v v9, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v10, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v9, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v9, (a2)
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vs1r.v v8, (a1)
-; CHECK-NEXT: vs1r.v v10, (a0)
+; CHECK-NEXT: vs1r.v v10, (a2)
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index 64031f8a9359..a9a680d54d58 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -16,13 +16,13 @@ define <vscale x 1 x double> @test(ptr %addr, i64 %vl) {
; CHECK-NEXT: sub sp, sp, a2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; CHECK-NEXT: csrrs a2, vlenb, zero
-; CHECK-NEXT: add a3, a0, a2
-; CHECK-NEXT: vl1re64.v v8, (a3)
+; CHECK-NEXT: vl1re64.v v8, (a0)
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: vs1r.v v8, (a2)
-; CHECK-NEXT: vs1r.v v9, (a0)
+; CHECK-NEXT: vs1r.v v9, (a2)
; CHECK-NEXT: vl1re64.v v8, (a2)
; CHECK-NEXT: vl1re64.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
new file mode 100644
index 000000000000..2d5258fbabfa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/binop-zext.ll
@@ -0,0 +1,154 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+; Check that we perform binary arithmetic in a narrower type where possible, via
+; combineBinOpOfZExt or otherwise.
+
+define <vscale x 8 x i32> @add(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %add = add <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %add
+}
+
+define <vscale x 8 x i32> @sub(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwsubu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sub
+}
+
+define <vscale x 8 x i32> @mul(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: mul:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %mul = mul <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %mul
+}
+
+define <vscale x 8 x i32> @sdiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: sdiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %sdiv = sdiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %sdiv
+}
+
+define <vscale x 8 x i32> @udiv(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: udiv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vdivu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %udiv = udiv <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %udiv
+}
+
+define <vscale x 8 x i32> @srem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: srem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %srem = srem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %srem
+}
+
+define <vscale x 8 x i32> @urem(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: urem:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vzext.vf2 v12, v8
+; CHECK-NEXT: vremu.vv v12, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %urem = urem <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %urem
+}
+
+define <vscale x 8 x i32> @and(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vand.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %shl = and <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %shl
+}
+
+define <vscale x 8 x i32> @or(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %or = or <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %or
+}
+
+define <vscale x 8 x i32> @xor(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: xor:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vxor.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
+; CHECK-NEXT: ret
+ %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i32>
+ %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i32>
+ %xor = xor <vscale x 8 x i32> %a.zext, %b.zext
+ ret <vscale x 8 x i32> %xor
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
new file mode 100644
index 000000000000..84936d88e187
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/callee-saved-regs.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m -mattr=+v -O2 < %s \
+; RUN: | FileCheck --check-prefix=SPILL-O2 %s
+
+define <vscale x 1 x i32> @test_vector_std(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_std:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 1
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
+
+define riscv_vector_cc <vscale x 1 x i32> @test_vector_callee(<vscale x 1 x i32> %va) nounwind {
+; SPILL-O2-LABEL: test_vector_callee:
+; SPILL-O2: # %bb.0: # %entry
+; SPILL-O2-NEXT: addi sp, sp, -16
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: sub sp, sp, a0
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs1r.v v1, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs2r.v v2, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs4r.v v4, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: addi a0, sp, 16
+; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; SPILL-O2-NEXT: #APP
+; SPILL-O2-NEXT: #NO_APP
+; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 4
+; SPILL-O2-NEXT: sub a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl1r.v v1, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: li a1, 13
+; SPILL-O2-NEXT: mul a0, a0, a1
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl2r.v v2, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a1, a0, 3
+; SPILL-O2-NEXT: add a0, a1, a0
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: add a0, sp, a0
+; SPILL-O2-NEXT: addi a0, a0, 16
+; SPILL-O2-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: csrr a0, vlenb
+; SPILL-O2-NEXT: slli a0, a0, 4
+; SPILL-O2-NEXT: add sp, sp, a0
+; SPILL-O2-NEXT: addi sp, sp, 16
+; SPILL-O2-NEXT: ret
+entry:
+ call void asm sideeffect "",
+ "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
+
+ ret <vscale x 1 x i32> %va
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 78385a80b47e..90edb994ce82 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -86,3 +86,166 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
%a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
ret <vscale x 32 x i32> %a
}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
+; RV32-LABEL: caller_tuple_return:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee_tuple_return
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_return:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee_tuple_return
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
+ %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %c, 0
+ %e = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %d, <vscale x 4 x i32> %b, 1
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %e
+}
+
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+
+define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) {
+; RV32-LABEL: caller_tuple_argument:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v8, v10
+; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: call callee_tuple_argument
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple_argument:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: call callee_tuple_argument
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
+ %c = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %b, 0
+ %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, <vscale x 4 x i32> %a, 1
+ call void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %d)
+ ret void
+}
+
+declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
+
+; %0 -> v8
+; %1 -> v9
+define <vscale x 1 x i64> @case1(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1) {
+; CHECK-LABEL: case1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %1
+ ret <vscale x 1 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> v10-v11
+; %2 -> v9
+define <vscale x 1 x i64> @case2_1(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %a
+}
+define <vscale x 2 x i64> @case2_2(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v10
+; CHECK-NEXT: ret
+ %a = add <vscale x 2 x i64> %1, %1
+ ret <vscale x 2 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> {v10-v11, v12-v13}
+; %2 -> v9
+define <vscale x 1 x i64> @case3_1(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
+define <vscale x 2 x i64> @case3_2(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 0
+ %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 1
+ %add = add <vscale x 2 x i64> %a, %b
+ ret <vscale x 2 x i64> %add
+}
+
+; %0 -> v8
+; %1 -> {by-ref, by-ref}
+; %2 -> v9
+define <vscale x 8 x i64> @case4_1(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v8, (a1)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 0
+ %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 1
+ %add = add <vscale x 8 x i64> %a, %b
+ ret <vscale x 8 x i64> %add
+}
+define <vscale x 1 x i64> @case4_2(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
index c64216180c2a..ed434deea1a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
@@ -19,7 +19,7 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV32-NEXT: th.swia a0, (a1), 4, 0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vle8.v v10, (a3)
-; RV32-NEXT: vsetivli zero, 8, e8, m1, tu, ma
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vslideup.vi v10, v9, 4
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v10
@@ -42,7 +42,7 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV64-NEXT: th.swia a0, (a1), 4, 0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vle8.v v10, (a3)
-; RV64-NEXT: vsetivli zero, 8, e8, m1, tu, ma
+; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vslideup.vi v10, v9, 4
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 76aa2b913c65..e15e6452163b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -469,9 +469,8 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vx v13, v10, a0
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vx v13, v10, a0
; CHECK-NEXT: vslidedown.vx v12, v9, a0
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
new file mode 100644
index 000000000000..bd1209a17b53
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -0,0 +1,727 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+;
+; SABD
+;
+
+define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8b_as_32b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i32>
+ %b.sext = sext <8 x i8> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <16 x i8> %a to <16 x i16>
+ %b.sext = sext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.sext, %b.sext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: sabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i8> %a to <4 x i16>
+ %b.sext = sext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.sext, %b.sext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i16> %a to <8 x i32>
+ %b.sext = sext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.sext, %b.sext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: sabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <8 x i8> %a to <8 x i16>
+ %b.sext = sext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.sext, %b.sext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: sabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i16> %a to <2 x i32>
+ %b.sext = sext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.sext, %b.sext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i32> %a to <4 x i64>
+ %b.sext = sext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.sext, %b.sext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: sabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <4 x i16> %a to <4 x i32>
+ %b.sext = sext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.sext, %b.sext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: sabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i64> %a to <2 x i128>
+ %b.sext = sext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.sext, %b.sext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: sabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.sext = sext <2 x i32> %a to <2 x i64>
+ %b.sext = sext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.sext, %b.sext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+;
+; UABD
+;
+
+define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ %trunc = trunc <8 x i16> %abs to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_16b:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <16 x i8> %a to <16 x i16>
+ %b.zext = zext <16 x i8> %b to <16 x i16>
+ %sub = sub <16 x i16> %a.zext, %b.zext
+ %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true)
+ %trunc = trunc <16 x i16> %abs to <16 x i8>
+ ret <16 x i8> %trunc
+}
+
+define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ %trunc = trunc <4 x i32> %abs to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
+;
+; CHECK-LABEL: uabd_4h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i8> %a to <4 x i16>
+ %b.zext = zext <4 x i8> %b to <4 x i16>
+ %sub = sub <4 x i16> %a.zext, %b.zext
+ %abs = call <4 x i16> @llvm.abs.v4i16(<4 x i16> %sub, i1 true)
+ ret <4 x i16> %abs
+}
+
+define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_8h:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i16> %a to <8 x i32>
+ %b.zext = zext <8 x i16> %b to <8 x i32>
+ %sub = sub <8 x i32> %a.zext, %b.zext
+ %abs = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %sub, i1 true)
+ %trunc = trunc <8 x i32> %abs to <8 x i16>
+ ret <8 x i16> %trunc
+}
+
+define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
+;
+; CHECK-LABEL: uabd_8h_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <8 x i8> %a to <8 x i16>
+ %b.zext = zext <8 x i8> %b to <8 x i16>
+ %sub = sub <8 x i16> %a.zext, %b.zext
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ %trunc = trunc <2 x i64> %abs to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
+define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
+;
+; CHECK-LABEL: uabd_2s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i16> %a to <2 x i32>
+ %b.zext = zext <2 x i16> %b to <2 x i32>
+ %sub = sub <2 x i32> %a.zext, %b.zext
+ %abs = call <2 x i32> @llvm.abs.v2i32(<2 x i32> %sub, i1 true)
+ ret <2 x i32> %abs
+}
+
+define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_4s:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i32> %a to <4 x i64>
+ %b.zext = zext <4 x i32> %b to <4 x i64>
+ %sub = sub <4 x i64> %a.zext, %b.zext
+ %abs = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %sub, i1 true)
+ %trunc = trunc <4 x i64> %abs to <4 x i32>
+ ret <4 x i32> %trunc
+}
+
+define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
+;
+; CHECK-LABEL: uabd_4s_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <4 x i16> %a to <4 x i32>
+ %b.zext = zext <4 x i16> %b to <4 x i32>
+ %sub = sub <4 x i32> %a.zext, %b.zext
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: uabd_2d:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i64> %a to <2 x i128>
+ %b.zext = zext <2 x i64> %b to <2 x i128>
+ %sub = sub <2 x i128> %a.zext, %b.zext
+ %abs = call <2 x i128> @llvm.abs.v2i128(<2 x i128> %sub, i1 true)
+ %trunc = trunc <2 x i128> %abs to <2 x i64>
+ ret <2 x i64> %trunc
+}
+
+define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
+;
+; CHECK-LABEL: uabd_2d_promoted_ops:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v9, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %a.zext = zext <2 x i32> %a to <2 x i64>
+ %b.zext = zext <2 x i32> %b to <2 x i64>
+ %sub = sub <2 x i64> %a.zext, %b.zext
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: uabd_v16i8_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: uabd_v8i16_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: uabd_v4i32_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: uabd_v2i64_nuw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: vrsub.vi v9, v8, 0
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %sub = sub nuw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
+;
+; CHECK-LABEL: sabd_v16i8_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <16 x i8> %a, %b
+ %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
+ ret <16 x i8> %abs
+}
+
+define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
+;
+; CHECK-LABEL: sabd_v8i16_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <8 x i16> %a, %b
+ %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
+ ret <8 x i16> %abs
+}
+
+define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
+;
+; CHECK-LABEL: sabd_v4i32_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <4 x i32> %a, %b
+ %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
+ ret <4 x i32> %abs
+}
+
+define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
+;
+; CHECK-LABEL: sabd_v2i64_nsw:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %sub = sub nsw <2 x i64> %a, %b
+ %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
+ ret <2 x i64> %abs
+}
+
+define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: smaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: smaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: smaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: smaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmin.vv v10, v8, v9
+; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
+;
+; CHECK-LABEL: umaxmin_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
+ %sub = sub <8 x i16> %a, %b
+ ret <8 x i16> %sub
+}
+
+define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
+;
+; CHECK-LABEL: umaxmin_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
+ %sub = sub <4 x i32> %a, %b
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
+;
+; CHECK-LABEL: umaxmin_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
+ %sub = sub <2 x i64> %a, %b
+ ret <2 x i64> %sub
+}
+
+define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
+;
+; CHECK-LABEL: umaxmin_v16i8_com1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
+ %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
+ %sub = sub <16 x i8> %a, %b
+ ret <16 x i8> %sub
+}
+
+declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1)
+declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
+
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+
+declare <2 x i32> @llvm.abs.v2i32(<2 x i32>, i1)
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
+
+declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
+declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
+
+declare <2 x i128> @llvm.abs.v2i128(<2 x i128>, i1)
+
+declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
+declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
+declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
+declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
+declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index b7afee754f68..5252eb71c383 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -416,8 +416,8 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
; RV32ELEN32: # %bb.0:
; RV32ELEN32-NEXT: addi sp, sp, -16
; RV32ELEN32-NEXT: .cfi_def_cfa_offset 16
-; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: sw a0, 8(sp)
+; RV32ELEN32-NEXT: sw a1, 12(sp)
; RV32ELEN32-NEXT: fld fa0, 8(sp)
; RV32ELEN32-NEXT: addi sp, sp, 16
; RV32ELEN32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
new file mode 100644
index 000000000000..a4ab67f41595
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i16>
+ %3 = zext <4 x i8> %b to <4 x i16>
+ %4 = sub nsw <4 x i16> %1, %3
+ %5 = tail call <4 x i16> @llvm.abs.v4i16(<4 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
+; CHECK-LABEL: sad_4x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v9, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v9, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <4 x i8> %a to <4 x i32>
+ %3 = zext <4 x i8> %b to <4 x i32>
+ %4 = sub nsw <4 x i32> %1, %3
+ %5 = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
+ ret i32 %6
+}
+
+define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwredsumu.vs v8, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i16>
+ %3 = zext <16 x i8> %b to <16 x i16>
+ %4 = sub nsw <16 x i16> %1, %3
+ %5 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %4, i1 true)
+ %6 = tail call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %5)
+ ret i16 %6
+}
+
+define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: sad_16x8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v10, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v12, v8
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v12, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %1 = zext <16 x i8> %a to <16 x i32>
+ %3 = zext <16 x i8> %b to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ ret i32 %6
+}
+
+define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea, i32 signext %strideb) {
+; CHECK-LABEL: sad_2block_16xi8_as_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a1)
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v11, (a1)
+; CHECK-NEXT: vminu.vv v12, v8, v9
+; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vminu.vv v9, v10, v11
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vle8.v v13, (a1)
+; CHECK-NEXT: vmaxu.vv v10, v10, v11
+; CHECK-NEXT: vsub.vv v9, v10, v9
+; CHECK-NEXT: vwaddu.vv v10, v9, v8
+; CHECK-NEXT: vminu.vv v8, v12, v13
+; CHECK-NEXT: vmaxu.vv v9, v12, v13
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v12, (a1)
+; CHECK-NEXT: vzext.vf2 v14, v8
+; CHECK-NEXT: vwaddu.vv v16, v14, v10
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vminu.vv v8, v9, v12
+; CHECK-NEXT: vmaxu.vv v9, v9, v12
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vwaddu.wv v16, v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmv.s.x v8, zero
+; CHECK-NEXT: vredsum.vs v8, v16, v8
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+entry:
+ %idx.ext8 = sext i32 %strideb to i64
+ %idx.ext = sext i32 %stridea to i64
+ %0 = load <16 x i8>, ptr %a, align 1
+ %1 = zext <16 x i8> %0 to <16 x i32>
+ %2 = load <16 x i8>, ptr %b, align 1
+ %3 = zext <16 x i8> %2 to <16 x i32>
+ %4 = sub nsw <16 x i32> %1, %3
+ %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+ %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+ %add.ptr = getelementptr inbounds i8, ptr %a, i64 %idx.ext
+ %add.ptr9 = getelementptr inbounds i8, ptr %b, i64 %idx.ext8
+ %7 = load <16 x i8>, ptr %add.ptr, align 1
+ %8 = zext <16 x i8> %7 to <16 x i32>
+ %9 = load <16 x i8>, ptr %add.ptr9, align 1
+ %10 = zext <16 x i8> %9 to <16 x i32>
+ %11 = sub nsw <16 x i32> %8, %10
+ %12 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %11, i1 true)
+ %13 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
+ %op.rdx.1 = add i32 %13, %6
+ %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+ %add.ptr9.1 = getelementptr inbounds i8, ptr %add.ptr9, i64 %idx.ext8
+ %14 = load <16 x i8>, ptr %add.ptr.1, align 1
+ %15 = zext <16 x i8> %14 to <16 x i32>
+ %16 = load <16 x i8>, ptr %add.ptr9.1, align 1
+ %17 = zext <16 x i8> %16 to <16 x i32>
+ %18 = sub nsw <16 x i32> %15, %17
+ %19 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %18, i1 true)
+ %20 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %19)
+ %op.rdx.2 = add i32 %20, %op.rdx.1
+ %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+ %add.ptr9.2 = getelementptr inbounds i8, ptr %add.ptr9.1, i64 %idx.ext8
+ %21 = load <16 x i8>, ptr %add.ptr.2, align 1
+ %22 = zext <16 x i8> %21 to <16 x i32>
+ %23 = load <16 x i8>, ptr %add.ptr9.2, align 1
+ %24 = zext <16 x i8> %23 to <16 x i32>
+ %25 = sub nsw <16 x i32> %22, %24
+ %26 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %25, i1 true)
+ %27 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %26)
+ %op.rdx.3 = add i32 %27, %op.rdx.2
+ ret i32 %op.rdx.3
+}
+
+declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare <4 x i16> @llvm.abs.v4i16(<4 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
+
+declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index e5bef20fd9e2..98e6b8f2dd76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -19,14 +19,11 @@ define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; CHECK-LABEL: concat_4xv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v11
-; CHECK-NEXT: vmv1r.v v14, v9
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v14, 2
-; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v11, 2
+; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: ret
%ab = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%cd = shufflevector <2 x i32> %c, <2 x i32> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -37,24 +34,18 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x i32> %d, <1 x i32> %e, <1 x i32> %f, <1 x i32> %g, <1 x i32> %h) {
; CHECK-LABEL: concat_8xv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v15
-; CHECK-NEXT: vmv1r.v v18, v13
-; CHECK-NEXT: vmv1r.v v20, v11
-; CHECK-NEXT: vmv1r.v v22, v9
-; CHECK-NEXT: vsetivli zero, 2, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v22, 1
-; CHECK-NEXT: vsetivli zero, 3, e32, m2, tu, ma
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v14, v15, 1
+; CHECK-NEXT: vslideup.vi v12, v13, 1
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v12, v14, 2
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v11, 1
+; CHECK-NEXT: vslideup.vi v8, v9, 1
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v10, 2
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v20, 3
-; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 4
-; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v18, 5
-; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v14, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 7
+; CHECK-NEXT: vslideup.vi v8, v12, 4
; CHECK-NEXT: ret
%ab = shufflevector <1 x i32> %a, <1 x i32> %b, <2 x i32> <i32 0, i32 1>
%cd = shufflevector <1 x i32> %c, <1 x i32> %d, <2 x i32> <i32 0, i32 1>
@@ -80,15 +71,14 @@ define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; CHECK-LABEL: concat_4xv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v11
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v20, v9
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v20, 4
-; CHECK-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 8
+; CHECK-NEXT: vmv1r.v v14, v11
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v12, v14, 4
+; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 12
+; CHECK-NEXT: vslideup.vi v8, v12, 8
; CHECK-NEXT: ret
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%cd = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -99,26 +89,18 @@ define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x
define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d, <2 x i32> %e, <2 x i32> %f, <2 x i32> %g, <2 x i32> %h) {
; CHECK-LABEL: concat_8xv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v15
-; CHECK-NEXT: vmv1r.v v20, v14
-; CHECK-NEXT: vmv1r.v v24, v13
-; CHECK-NEXT: vmv1r.v v28, v11
-; CHECK-NEXT: vmv1r.v v4, v10
-; CHECK-NEXT: vmv1r.v v0, v9
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v0, 2
-; CHECK-NEXT: vsetivli zero, 6, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v4, 4
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v28, 6
-; CHECK-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 8
-; CHECK-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v24, 10
-; CHECK-NEXT: vsetivli zero, 14, e32, m4, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v20, 12
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v14, v15, 2
+; CHECK-NEXT: vslideup.vi v12, v13, 2
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v12, v14, 4
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v11, 2
+; CHECK-NEXT: vslideup.vi v8, v9, 2
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v16, 14
+; CHECK-NEXT: vslideup.vi v8, v12, 8
; CHECK-NEXT: ret
%ab = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%cd = shufflevector <2 x i32> %c, <2 x i32> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -152,29 +134,27 @@ define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; VLA-LABEL: concat_4xv8i32:
; VLA: # %bb.0:
-; VLA-NEXT: vmv2r.v v16, v14
-; VLA-NEXT: vmv2r.v v24, v12
-; VLA-NEXT: vmv2r.v v0, v10
-; VLA-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; VLA-NEXT: vslideup.vi v8, v0, 8
-; VLA-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; VLA-NEXT: vslideup.vi v8, v24, 16
+; VLA-NEXT: vmv2r.v v20, v14
+; VLA-NEXT: vmv2r.v v16, v12
+; VLA-NEXT: vmv2r.v v12, v10
+; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLA-NEXT: vslideup.vi v16, v20, 8
+; VLA-NEXT: vslideup.vi v8, v12, 8
; VLA-NEXT: li a0, 32
; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; VLA-NEXT: vslideup.vi v8, v16, 24
+; VLA-NEXT: vslideup.vi v8, v16, 16
; VLA-NEXT: ret
;
; VLS-LABEL: concat_4xv8i32:
; VLS: # %bb.0:
-; VLS-NEXT: vmv2r.v v16, v14
-; VLS-NEXT: vmv2r.v v24, v12
-; VLS-NEXT: vmv2r.v v0, v10
-; VLS-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; VLS-NEXT: vslideup.vi v8, v0, 8
-; VLS-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; VLS-NEXT: vslideup.vi v8, v24, 16
+; VLS-NEXT: vmv2r.v v20, v14
+; VLS-NEXT: vmv2r.v v16, v12
+; VLS-NEXT: vmv2r.v v12, v10
+; VLS-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLS-NEXT: vslideup.vi v16, v20, 8
+; VLS-NEXT: vslideup.vi v8, v12, 8
; VLS-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; VLS-NEXT: vslideup.vi v8, v16, 24
+; VLS-NEXT: vslideup.vi v8, v16, 16
; VLS-NEXT: ret
%ab = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%cd = shufflevector <8 x i32> %c, <8 x i32> %d, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -185,123 +165,49 @@ define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
; VLA-LABEL: concat_8xv4i32:
; VLA: # %bb.0:
-; VLA-NEXT: addi sp, sp, -16
-; VLA-NEXT: .cfi_def_cfa_offset 16
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 5
-; VLA-NEXT: sub sp, sp, a0
-; VLA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; VLA-NEXT: vmv1r.v v16, v15
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 3
-; VLA-NEXT: mv a1, a0
-; VLA-NEXT: slli a0, a0, 1
-; VLA-NEXT: add a0, a0, a1
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLA-NEXT: vmv1r.v v16, v14
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 4
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLA-NEXT: vmv1r.v v16, v13
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 3
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLA-NEXT: vmv1r.v v18, v15
+; VLA-NEXT: vmv1r.v v20, v14
+; VLA-NEXT: vmv1r.v v22, v13
; VLA-NEXT: vmv1r.v v16, v12
-; VLA-NEXT: addi a0, sp, 16
-; VLA-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLA-NEXT: vmv1r.v v0, v11
-; VLA-NEXT: vmv1r.v v24, v10
-; VLA-NEXT: vmv1r.v v16, v9
-; VLA-NEXT: vsetivli zero, 8, e32, m8, tu, ma
-; VLA-NEXT: vslideup.vi v8, v16, 4
-; VLA-NEXT: vsetivli zero, 12, e32, m8, tu, ma
-; VLA-NEXT: vslideup.vi v8, v24, 8
-; VLA-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; VLA-NEXT: vslideup.vi v8, v0, 12
-; VLA-NEXT: vsetivli zero, 20, e32, m8, tu, ma
-; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLA-NEXT: vslideup.vi v8, v16, 16
-; VLA-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 3
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLA-NEXT: vslideup.vi v8, v16, 20
-; VLA-NEXT: vsetivli zero, 28, e32, m8, tu, ma
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 4
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLA-NEXT: vslideup.vi v8, v16, 24
+; VLA-NEXT: vmv1r.v v14, v11
+; VLA-NEXT: vmv1r.v v12, v10
+; VLA-NEXT: vmv1r.v v10, v9
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vslideup.vi v20, v18, 4
+; VLA-NEXT: vslideup.vi v16, v22, 4
+; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLA-NEXT: vslideup.vi v16, v20, 8
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vslideup.vi v12, v14, 4
+; VLA-NEXT: vslideup.vi v8, v10, 4
+; VLA-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLA-NEXT: vslideup.vi v8, v12, 8
; VLA-NEXT: li a0, 32
; VLA-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 3
-; VLA-NEXT: mv a1, a0
-; VLA-NEXT: slli a0, a0, 1
-; VLA-NEXT: add a0, a0, a1
-; VLA-NEXT: add a0, sp, a0
-; VLA-NEXT: addi a0, a0, 16
-; VLA-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLA-NEXT: vslideup.vi v8, v16, 28
-; VLA-NEXT: csrr a0, vlenb
-; VLA-NEXT: slli a0, a0, 5
-; VLA-NEXT: add sp, sp, a0
-; VLA-NEXT: addi sp, sp, 16
+; VLA-NEXT: vslideup.vi v8, v16, 16
; VLA-NEXT: ret
;
; VLS-LABEL: concat_8xv4i32:
; VLS: # %bb.0:
-; VLS-NEXT: addi sp, sp, -16
-; VLS-NEXT: .cfi_def_cfa_offset 16
-; VLS-NEXT: addi sp, sp, -512
-; VLS-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; VLS-NEXT: vmv1r.v v16, v15
-; VLS-NEXT: addi a0, sp, 400
-; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLS-NEXT: vmv1r.v v16, v14
-; VLS-NEXT: addi a0, sp, 272
-; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLS-NEXT: vmv1r.v v16, v13
-; VLS-NEXT: addi a0, sp, 144
-; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; VLS-NEXT: vmv1r.v v18, v15
+; VLS-NEXT: vmv1r.v v20, v14
+; VLS-NEXT: vmv1r.v v22, v13
; VLS-NEXT: vmv1r.v v16, v12
-; VLS-NEXT: addi a0, sp, 16
-; VLS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; VLS-NEXT: vmv1r.v v0, v11
-; VLS-NEXT: vmv1r.v v24, v10
-; VLS-NEXT: vmv1r.v v16, v9
-; VLS-NEXT: vsetivli zero, 8, e32, m8, tu, ma
-; VLS-NEXT: vslideup.vi v8, v16, 4
-; VLS-NEXT: vsetivli zero, 12, e32, m8, tu, ma
-; VLS-NEXT: vslideup.vi v8, v24, 8
-; VLS-NEXT: vsetivli zero, 16, e32, m8, tu, ma
-; VLS-NEXT: vslideup.vi v8, v0, 12
-; VLS-NEXT: vsetivli zero, 20, e32, m8, tu, ma
-; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLS-NEXT: vslideup.vi v8, v16, 16
-; VLS-NEXT: vsetivli zero, 24, e32, m8, tu, ma
-; VLS-NEXT: addi a0, sp, 144
-; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLS-NEXT: vslideup.vi v8, v16, 20
-; VLS-NEXT: vsetivli zero, 28, e32, m8, tu, ma
-; VLS-NEXT: addi a0, sp, 272
-; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLS-NEXT: vslideup.vi v8, v16, 24
+; VLS-NEXT: vmv1r.v v14, v11
+; VLS-NEXT: vmv1r.v v12, v10
+; VLS-NEXT: vmv1r.v v10, v9
+; VLS-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLS-NEXT: vslideup.vi v20, v18, 4
+; VLS-NEXT: vslideup.vi v16, v22, 4
+; VLS-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLS-NEXT: vslideup.vi v16, v20, 8
+; VLS-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLS-NEXT: vslideup.vi v12, v14, 4
+; VLS-NEXT: vslideup.vi v8, v10, 4
+; VLS-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; VLS-NEXT: vslideup.vi v8, v12, 8
; VLS-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; VLS-NEXT: addi a0, sp, 400
-; VLS-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; VLS-NEXT: vslideup.vi v8, v16, 28
-; VLS-NEXT: addi sp, sp, 512
-; VLS-NEXT: addi sp, sp, 16
+; VLS-NEXT: vslideup.vi v8, v16, 16
; VLS-NEXT: ret
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%cd = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 4ec2e59672ad..657d52354aa3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -27,13 +27,14 @@ define void @widen_3xv4i16(ptr %x, ptr %z) {
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: addi a2, a0, 8
-; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vle16.v v9, (a2)
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vle16.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 8
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v9, 4
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 8
+; CHECK-NEXT: vsetivli zero, 12, e16, m2, ta, ma
; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
@@ -72,20 +73,18 @@ define void @widen_4xv4i16(ptr %x, ptr %z) {
define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
; CHECK-NO-MISALIGN-LABEL: widen_4xv4i16_unaligned:
; CHECK-NO-MISALIGN: # %bb.0:
-; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NO-MISALIGN-NEXT: vle8.v v8, (a0)
-; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 8
-; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a2)
; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 16
-; CHECK-NO-MISALIGN-NEXT: vle8.v v12, (a2)
+; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a2)
+; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 8
; CHECK-NO-MISALIGN-NEXT: addi a0, a0, 24
-; CHECK-NO-MISALIGN-NEXT: vle8.v v14, (a0)
-; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v12, 8
+; CHECK-NO-MISALIGN-NEXT: vle8.v v9, (a0)
+; CHECK-NO-MISALIGN-NEXT: vle8.v v11, (a2)
+; CHECK-NO-MISALIGN-NEXT: vslideup.vi v10, v9, 4
+; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v11, 4
; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v14, 12
+; CHECK-NO-MISALIGN-NEXT: vslideup.vi v8, v10, 8
; CHECK-NO-MISALIGN-NEXT: vse16.v v8, (a1)
; CHECK-NO-MISALIGN-NEXT: ret
;
@@ -187,18 +186,17 @@ define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: addi a2, a0, 2
-; CHECK-NEXT: vle16.v v10, (a2)
; CHECK-NEXT: addi a2, a0, 6
-; CHECK-NEXT: vle16.v v12, (a2)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: addi a2, a0, 2
; CHECK-NEXT: addi a0, a0, 8
-; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
-; CHECK-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v12, 8
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v11, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslideup.vi v10, v9, 4
+; CHECK-NEXT: vslideup.vi v8, v11, 4
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v14, 12
+; CHECK-NEXT: vslideup.vi v8, v10, 8
; CHECK-NEXT: vse16.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i16>, ptr %x
@@ -258,17 +256,16 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV32-NEXT: vle16.v v8, (a0)
; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: add a0, a0, a4
-; RV32-NEXT: vle16.v v12, (a0)
-; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: vle16.v v14, (a0)
-; RV32-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v10, 4
-; RV32-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; RV32-NEXT: vslideup.vi v8, v12, 8
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: vle16.v v10, (a4)
+; RV32-NEXT: add a2, a4, a2
+; RV32-NEXT: vle16.v v9, (a2)
+; RV32-NEXT: vle16.v v11, (a0)
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslideup.vi v10, v9, 4
+; RV32-NEXT: vslideup.vi v8, v11, 4
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v14, 12
+; RV32-NEXT: vslideup.vi v8, v10, 8
; RV32-NEXT: vse16.v v8, (a1)
; RV32-NEXT: ret
;
@@ -277,17 +274,16 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV64-NEXT: vle16.v v8, (a0)
; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: add a0, a0, a3
-; RV64-NEXT: vle16.v v12, (a0)
-; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: vle16.v v14, (a0)
-; RV64-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; RV64-NEXT: vslideup.vi v8, v10, 4
-; RV64-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; RV64-NEXT: vslideup.vi v8, v12, 8
+; RV64-NEXT: add a3, a0, a3
+; RV64-NEXT: vle16.v v10, (a3)
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: vle16.v v9, (a2)
+; RV64-NEXT: vle16.v v11, (a0)
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslideup.vi v10, v9, 4
+; RV64-NEXT: vslideup.vi v8, v11, 4
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslideup.vi v8, v14, 12
+; RV64-NEXT: vslideup.vi v8, v10, 8
; RV64-NEXT: vse16.v v8, (a1)
; RV64-NEXT: ret
;
@@ -296,17 +292,16 @@ define void @strided_runtime_mismatch_4xv4i16(ptr %x, ptr %z, i64 %s, i64 %t) {
; ZVE64F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVE64F-NEXT: vle16.v v8, (a0)
; ZVE64F-NEXT: add a0, a0, a2
-; ZVE64F-NEXT: vle16.v v10, (a0)
-; ZVE64F-NEXT: add a0, a0, a3
-; ZVE64F-NEXT: vle16.v v12, (a0)
-; ZVE64F-NEXT: add a0, a0, a2
-; ZVE64F-NEXT: vle16.v v14, (a0)
-; ZVE64F-NEXT: vsetivli zero, 8, e16, m2, tu, ma
-; ZVE64F-NEXT: vslideup.vi v8, v10, 4
-; ZVE64F-NEXT: vsetivli zero, 12, e16, m2, tu, ma
-; ZVE64F-NEXT: vslideup.vi v8, v12, 8
+; ZVE64F-NEXT: add a3, a0, a3
+; ZVE64F-NEXT: vle16.v v10, (a3)
+; ZVE64F-NEXT: add a2, a3, a2
+; ZVE64F-NEXT: vle16.v v9, (a2)
+; ZVE64F-NEXT: vle16.v v11, (a0)
+; ZVE64F-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVE64F-NEXT: vslideup.vi v10, v9, 4
+; ZVE64F-NEXT: vslideup.vi v8, v11, 4
; ZVE64F-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; ZVE64F-NEXT: vslideup.vi v8, v14, 12
+; ZVE64F-NEXT: vslideup.vi v8, v10, 8
; ZVE64F-NEXT: vse16.v v8, (a1)
; ZVE64F-NEXT: ret
%a = load <4 x i16>, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index 57a72c639b33..bc0bf5dd76ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -385,12 +385,12 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
define <2 x i32> @vwaddu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -912,12 +912,12 @@ define <4 x i64> @crash(<4 x i16> %x, <4 x i16> %y) {
define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i32_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -930,12 +930,12 @@ define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -948,12 +948,12 @@ define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) {
; CHECK-LABEL: vwaddu_v2i64_of_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i16>, ptr %x
%b = load <2 x i16>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index bff7ef86c289..b97c9654ad3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -391,12 +391,12 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
define <2 x i32> @vwmulu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwmulu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwmulu.vv v8, v10, v11
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index eb7894ede046..b3bda5973eb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -441,57 +441,50 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
@@ -609,57 +602,50 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
@@ -787,60 +773,53 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
@@ -1404,90 +1383,125 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -1682,90 +1696,125 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -1982,94 +2031,129 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -3723,57 +3807,50 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
@@ -3889,57 +3966,50 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
@@ -4066,60 +4136,53 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; CHECK-V-NEXT: lhu s0, 24(a0)
-; CHECK-V-NEXT: lhu s1, 16(a0)
-; CHECK-V-NEXT: lhu s2, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu a0, 24(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
+; CHECK-V-NEXT: vslideup.vi v8, v10, 2
; CHECK-V-NEXT: li a0, -1
; CHECK-V-NEXT: srli a0, a0, 32
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
@@ -4671,90 +4734,125 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -4947,90 +5045,125 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-V-NEXT: vmv.s.x v10, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -5246,94 +5379,129 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s5, -56
; CHECK-V-NEXT: .cfi_offset s6, -64
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a1, a1, 1
+; CHECK-V-NEXT: slli a1, a1, 2
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 2 * vlenb
-; CHECK-V-NEXT: lhu s0, 56(a0)
-; CHECK-V-NEXT: lhu s1, 48(a0)
-; CHECK-V-NEXT: lhu s2, 40(a0)
-; CHECK-V-NEXT: lhu s3, 32(a0)
-; CHECK-V-NEXT: lhu s4, 24(a0)
-; CHECK-V-NEXT: lhu s5, 16(a0)
-; CHECK-V-NEXT: lhu s6, 0(a0)
-; CHECK-V-NEXT: lhu a0, 8(a0)
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
+; CHECK-V-NEXT: lhu s0, 0(a0)
+; CHECK-V-NEXT: lhu s1, 8(a0)
+; CHECK-V-NEXT: lhu s2, 16(a0)
+; CHECK-V-NEXT: lhu s3, 24(a0)
+; CHECK-V-NEXT: lhu s4, 32(a0)
+; CHECK-V-NEXT: lhu s5, 40(a0)
+; CHECK-V-NEXT: lhu s6, 48(a0)
+; CHECK-V-NEXT: lhu a0, 56(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v10, 1
-; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 3
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 4
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 5
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-V-NEXT: fmv.w.x fa0, s0
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 6
-; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 1
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
+; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vslideup.vi v8, v9, 2
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-V-NEXT: csrr a0, vlenb
+; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: add a0, sp, a0
+; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 7
+; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
-; CHECK-V-NEXT: vmin.vx v8, v10, a0
+; CHECK-V-NEXT: vmin.vx v8, v8, a0
; CHECK-V-NEXT: vmax.vx v10, v8, zero
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnsrl.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a0, a0, 1
+; CHECK-V-NEXT: slli a0, a0, 2
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index f3ae03af7c78..0b236f6d3ff3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -2136,17 +2136,18 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v16, a1
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v16, v10
-; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
+; RV64-NEXT: vslidedown.vx v8, v16, a1
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a2
+; RV64-NEXT: vslidedown.vx v0, v8, a2
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf8 v16, v10
+; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
index c27488b18a01..d13d67fd0a88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -9,39 +9,38 @@ define <4 x float> @foo(ptr %0) nounwind {
; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
-; CHECK-NEXT: lhu s0, 6(a0)
-; CHECK-NEXT: lhu s1, 4(a0)
-; CHECK-NEXT: lhu s2, 0(a0)
-; CHECK-NEXT: lhu a0, 2(a0)
+; CHECK-NEXT: lhu s0, 0(a0)
+; CHECK-NEXT: lhu s1, 2(a0)
+; CHECK-NEXT: lhu s2, 4(a0)
+; CHECK-NEXT: lhu a0, 6(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
; CHECK-NEXT: call __extendhfsf2
-; CHECK-NEXT: fsw fa0, 8(sp)
+; CHECK-NEXT: fsw fa0, 4(sp)
; CHECK-NEXT: fmv.w.x fa0, s2
; CHECK-NEXT: call __extendhfsf2
-; CHECK-NEXT: fsw fa0, 0(sp)
+; CHECK-NEXT: fsw fa0, 12(sp)
; CHECK-NEXT: fmv.w.x fa0, s1
; CHECK-NEXT: call __extendhfsf2
-; CHECK-NEXT: fsw fa0, 12(sp)
+; CHECK-NEXT: fsw fa0, 8(sp)
; CHECK-NEXT: fmv.w.x fa0, s0
; CHECK-NEXT: call __extendhfsf2
-; CHECK-NEXT: fsw fa0, 4(sp)
-; CHECK-NEXT: addi a0, sp, 8
+; CHECK-NEXT: fsw fa0, 0(sp)
+; CHECK-NEXT: addi a0, sp, 4
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: mv a0, sp
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, sp, 12
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: addi a0, sp, 4
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v9, v8, 1
+; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: mv a0, sp
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 3
+; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 0544204cce79..52bd15742ef4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -16,8 +16,8 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
@@ -37,8 +37,8 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; CHECK-NEXT: $v0 = COPY [[COPY1]]
- ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */
- ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store unknown-size into %ir.p, align 8)
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
+ ; CHECK-NEXT: VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
; CHECK-NEXT: PseudoRET
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
%mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index a4aef577bc9a..7cc4a9da3d42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -1187,3 +1187,30 @@ define <vscale x 2 x i32> @vmerge_larger_vl_false_becomes_tail(<vscale x 2 x i32
%b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %false, <vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i64 3)
ret <vscale x 2 x i32> %b
}
+
+; Test widening pseudos with their TIED variant (passthru same as first op).
+define <vscale x 2 x i64> @vpmerge_vwsub.w_tied(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %passthru, <vscale x 2 x i32> %y, i64 %vl.zext)
+ %b = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %a, <vscale x 2 x i64> %passthru, i32 %vl)
+ ret <vscale x 2 x i64> %b
+}
+
+define <vscale x 2 x double> @vpmerge_vfwsub.w_tied(<vscale x 2 x double> %passthru, <vscale x 2 x double> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
+; CHECK-LABEL: vpmerge_vfwsub.w_tied:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+ %vl.zext = zext i32 %vl to i64
+ %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(<vscale x 2 x double> %passthru, <vscale x 2 x double> %passthru, <vscale x 2 x float> %y, i64 1, i64 %vl.zext)
+ %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %mask, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
+ ret <vscale x 2 x double> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index a320aecc6fce..6a712080fda7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -18,10 +18,10 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_load_nxv16i
; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v8, v12, 0
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i1>, ptr %p
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index ef4baf34d23f..d98597fabcd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,18 +8,18 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v12, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v12, 0
-; CHECK-NEXT: vnsrl.wi v12, v8, 8
-; CHECK-NEXT: vmsne.vi v8, v12, 0
+; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vnsrl.wi v10, v12, 0
+; CHECK-NEXT: vmsne.vi v8, v10, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: ret
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
ret {<vscale x 16 x i1>, <vscale x 16 x i1>} %retval
@@ -102,12 +102,13 @@ define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v28, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v0, v24, 0
+; CHECK-NEXT: vmsne.vi v7, v24, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 8
; CHECK-NEXT: vnsrl.wi v28, v8, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v24, 0
+; CHECK-NEXT: vmsne.vi v9, v24, 0
+; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.experimental.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 4aae8b8bd1dc..9a5e86d61c26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -101,40 +101,36 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
; CHECK-NEXT: vand.vi v26, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a2, v0.t
+; CHECK-NEXT: vmsne.vi v28, v26, 0
+; CHECK-NEXT: vsrl.vi v24, v24, 1
+; CHECK-NEXT: vmv1r.v v0, v28
+; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v24, v8, v6
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v6
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index 972fa66917a5..e56dca0732bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -283,18 +283,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_sext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vsext.vf4 v11, v8
-; FOLDING-NEXT: vsext.vf4 v8, v9
-; FOLDING-NEXT: vsext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vsext.vf2 v11, v8
+; FOLDING-NEXT: vsext.vf2 v8, v9
+; FOLDING-NEXT: vsext.vf2 v9, v10
+; FOLDING-NEXT: vwmul.vv v10, v11, v8
+; FOLDING-NEXT: vwadd.vv v8, v11, v9
+; FOLDING-NEXT: vwsub.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
@@ -563,18 +564,19 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i8i32_multiple_users(ptr %x, ptr %y,
;
; FOLDING-LABEL: vwop_vscale_zext_i8i32_multiple_users:
; FOLDING: # %bb.0:
-; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
; FOLDING-NEXT: vle8.v v8, (a0)
; FOLDING-NEXT: vle8.v v9, (a1)
; FOLDING-NEXT: vle8.v v10, (a2)
-; FOLDING-NEXT: vzext.vf4 v11, v8
-; FOLDING-NEXT: vzext.vf4 v8, v9
-; FOLDING-NEXT: vzext.vf4 v9, v10
-; FOLDING-NEXT: vmul.vv v8, v11, v8
-; FOLDING-NEXT: vadd.vv v10, v11, v9
-; FOLDING-NEXT: vsub.vv v9, v11, v9
-; FOLDING-NEXT: vor.vv v8, v8, v10
-; FOLDING-NEXT: vor.vv v8, v8, v9
+; FOLDING-NEXT: vzext.vf2 v11, v8
+; FOLDING-NEXT: vzext.vf2 v8, v9
+; FOLDING-NEXT: vzext.vf2 v9, v10
+; FOLDING-NEXT: vwmulu.vv v10, v11, v8
+; FOLDING-NEXT: vwaddu.vv v8, v11, v9
+; FOLDING-NEXT: vwsubu.vv v12, v11, v9
+; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; FOLDING-NEXT: vor.vv v8, v10, v8
+; FOLDING-NEXT: vor.vv v8, v8, v12
; FOLDING-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = load <vscale x 2 x i8>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index a559fbf2bc8a..66e6883dd1d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i32:
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -435,10 +435,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -468,11 +468,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -485,9 +483,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -497,9 +495,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -511,9 +509,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -527,9 +525,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -541,10 +539,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -555,10 +553,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -571,10 +569,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -588,11 +586,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -605,9 +601,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -617,9 +613,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -631,9 +627,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -647,9 +643,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -661,10 +657,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -675,10 +671,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -691,10 +687,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -708,11 +704,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -725,9 +719,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -737,9 +731,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -751,9 +745,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -767,9 +761,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -781,10 +775,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -795,10 +789,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -811,10 +805,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -828,11 +822,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -845,9 +837,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -857,9 +849,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -871,9 +863,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -887,9 +879,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -901,10 +893,10 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -915,10 +907,10 @@ define <vscale x 1 x i64> @vwadd_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwaddu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -931,10 +923,10 @@ define <vscale x 1 x i64> @vwadd_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -948,11 +940,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -965,9 +955,9 @@ define <vscale x 1 x i64> @vwaddu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -977,9 +967,9 @@ define <vscale x 1 x i64> @vwadd_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwaddu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = add <vscale x 1 x i64> %va, %vc
@@ -991,9 +981,9 @@ define <vscale x 1 x i64> @vwadd_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwadd.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1007,9 +997,9 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwaddu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1021,10 +1011,10 @@ define <vscale x 1 x i64> @vwaddu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1035,10 +1025,10 @@ define <vscale x 2 x i64> @vwadd_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwaddu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwaddu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1051,10 +1041,10 @@ define <vscale x 2 x i64> @vwadd_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwadd.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1068,11 +1058,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1085,9 +1073,9 @@ define <vscale x 2 x i64> @vwaddu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1097,9 +1085,9 @@ define <vscale x 2 x i64> @vwadd_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwaddu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = add <vscale x 2 x i64> %va, %vc
@@ -1111,9 +1099,9 @@ define <vscale x 2 x i64> @vwadd_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwadd.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1127,9 +1115,9 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwaddu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1141,10 +1129,10 @@ define <vscale x 2 x i64> @vwaddu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1155,10 +1143,10 @@ define <vscale x 4 x i64> @vwadd_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwaddu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwaddu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1171,10 +1159,10 @@ define <vscale x 4 x i64> @vwadd_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwadd.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1188,11 +1176,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vadd.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1205,9 +1191,9 @@ define <vscale x 4 x i64> @vwaddu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1203,9 @@ define <vscale x 4 x i64> @vwadd_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwaddu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = add <vscale x 4 x i64> %va, %vc
@@ -1231,9 +1217,9 @@ define <vscale x 4 x i64> @vwadd_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwadd.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,9 +1233,9 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwaddu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1261,10 +1247,10 @@ define <vscale x 4 x i64> @vwaddu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1275,10 +1261,10 @@ define <vscale x 8 x i64> @vwadd_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwaddu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwaddu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1291,10 +1277,10 @@ define <vscale x 8 x i64> @vwadd_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwadd.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1308,11 +1294,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwaddu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwaddu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vadd.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1325,9 +1309,9 @@ define <vscale x 8 x i64> @vwaddu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwadd_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1337,9 +1321,9 @@ define <vscale x 8 x i64> @vwadd_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwaddu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwaddu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = add <vscale x 8 x i64> %va, %vc
@@ -1351,9 +1335,9 @@ define <vscale x 8 x i64> @vwadd_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwadd.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1367,9 +1351,9 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vadd.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwaddu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1377,3 +1361,108 @@ define <vscale x 8 x i64> @vwaddu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
%vc = add <vscale x 8 x i64> %va, %vb
ret <vscale x 8 x i64> %vc
}
+
+; Make sure that we don't introduce any V{S,Z}EXT_VL nodes with i1 types from
+; combineBinOp_VLToVWBinOp_VL, since they can't be selected.
+define <vscale x 1 x i64> @i1_zext(<vscale x 1 x i1> %va, <vscale x 1 x i64> %vb, ptr %p) {
+; RV32-LABEL: i1_zext:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vmv.v.i v9, 0
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: li a1, 42
+; RV32-NEXT: sh a1, 0(a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: i1_zext:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; RV64-NEXT: vadd.vi v8, v8, 1, v0.t
+; RV64-NEXT: li a1, 42
+; RV64-NEXT: sh a1, 0(a0)
+; RV64-NEXT: ret
+ %vc = zext <vscale x 1 x i1> %va to <vscale x 1 x i64>
+ %vd = add <vscale x 1 x i64> %vc, %vb
+
+; Introduce an illegal type so that the DAG changes after legalizing
+; types. Otherwise the legalize vector ops phase will be run immediately after
+; the legalize types phase, and the zext will already be in non-i1 form by the
+; time combineBinOp_VLToVWBinOp_VL is called.
+ store i9 42, ptr %p
+ ret <vscale x 1 x i64> %vd
+}
+
+; %x.i32 and %y.i32 are disjoint, so DAGCombiner will combine it into an or.
+; FIXME: We should be able to recover the or into vwaddu.vv if the disjoint
+; flag is set.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or_add(<vscale x 2 x i8> %x.i8, <vscale x 2 x i8> %y.i8) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v8
+; CHECK-NEXT: vsll.vi v10, v10, 8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwaddu.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %x.i16 = zext <vscale x 2 x i8> %x.i8 to <vscale x 2 x i16>
+ %x.shl = shl <vscale x 2 x i16> %x.i16, shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 8, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+ %x.i32 = zext <vscale x 2 x i16> %x.shl to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i8> %y.i8 to <vscale x 2 x i32>
+ %add = add <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %add
+}
+
+; TODO: We could select vwaddu.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwaddu_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = zext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+; TODO: We could select vwadd.vv, but when both arms of the or are the same
+; DAGCombiner::hoistLogicOpWithSameOpcodeHands moves the zext above the or.
+define <vscale x 2 x i32> @vwadd_vv_disjoint_or(<vscale x 2 x i16> %x.i16, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_vv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v9, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v9
+; CHECK-NEXT: ret
+ %x.i32 = sext <vscale x 2 x i16> %x.i16 to <vscale x 2 x i32>
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwaddu_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwaddu_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwaddu.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = zext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
+
+define <vscale x 2 x i32> @vwadd_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vscale x 2 x i16> %y.i16) {
+; CHECK-LABEL: vwadd_wv_disjoint_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwadd.wv v8, v8, v9
+; CHECK-NEXT: ret
+ %y.i32 = sext <vscale x 2 x i16> %y.i16 to <vscale x 2 x i32>
+ %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
+ ret <vscale x 2 x i32> %or
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
index 3634162eefd6..28fc53f37ba1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
@@ -341,10 +341,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i32(<vscale x 8 x i32> %va, i3
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -355,10 +355,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vsc
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -369,10 +369,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vs
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -385,10 +385,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -402,11 +402,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf2 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -421,10 +419,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
@@ -437,10 +435,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i1
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -451,10 +449,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vsc
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -465,10 +463,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vs
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -481,10 +479,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -498,11 +496,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf2 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -517,10 +513,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vzext.vf2 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
@@ -533,10 +529,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i1
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -547,10 +543,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vsc
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -561,10 +557,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vs
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -577,10 +573,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -594,11 +590,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf2 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -613,10 +607,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vzext.vf4 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vzext.vf2 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -629,10 +623,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i1
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -643,10 +637,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vsc
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -657,10 +651,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vs
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -673,10 +667,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -690,11 +684,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v10, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf2 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -709,10 +701,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vzext.vf4 v24, v10
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vzext.vf2 v20, v10
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
@@ -725,10 +717,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i1
define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -739,10 +731,10 @@ define <vscale x 1 x i64> @vwmul_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscal
define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -753,10 +745,10 @@ define <vscale x 1 x i64> @vwmulu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vsca
define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -769,10 +761,10 @@ define <vscale x 1 x i64> @vwmul_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -786,11 +778,9 @@ define <vscale x 1 x i64> @vwmulu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v9, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vzext.vf4 v8, v9
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -805,10 +795,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v8, v9
-; CHECK-NEXT: vmul.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
@@ -821,10 +811,10 @@ define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %
define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -835,10 +825,10 @@ define <vscale x 2 x i64> @vwmul_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscal
define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vwmulu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -849,10 +839,10 @@ define <vscale x 2 x i64> @vwmulu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vsca
define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -865,10 +855,10 @@ define <vscale x 2 x i64> @vwmul_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwmul.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -882,11 +872,9 @@ define <vscale x 2 x i64> @vwmulu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vzext.vf4 v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -901,10 +889,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vzext.vf8 v12, v9
-; CHECK-NEXT: vmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vzext.vf4 v11, v9
+; CHECK-NEXT: vwmulsu.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
@@ -917,10 +905,10 @@ define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %
define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -931,10 +919,10 @@ define <vscale x 4 x i64> @vwmul_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscal
define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vwmulu.vv v12, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -945,10 +933,10 @@ define <vscale x 4 x i64> @vwmulu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vsca
define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -961,10 +949,10 @@ define <vscale x 4 x i64> @vwmul_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwmul.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -978,11 +966,9 @@ define <vscale x 4 x i64> @vwmulu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v12, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vzext.vf4 v8, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -997,10 +983,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vzext.vf8 v16, v9
-; CHECK-NEXT: vmul.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vzext.vf4 v14, v9
+; CHECK-NEXT: vwmulsu.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
@@ -1013,10 +999,10 @@ define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %
define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmul_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1027,10 +1013,10 @@ define <vscale x 8 x i64> @vwmul_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscal
define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vv v16, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1041,10 +1027,10 @@ define <vscale x 8 x i64> @vwmulu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vsca
define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwmulsu_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1057,10 +1043,10 @@ define <vscale x 8 x i64> @vwmul_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwmul.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1074,11 +1060,9 @@ define <vscale x 8 x i64> @vwmulu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
; CHECK-LABEL: vwmulu_vx_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vzext.vf4 v8, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -1093,10 +1077,10 @@ define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vzext.vf8 v24, v9
-; CHECK-NEXT: vmul.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vzext.vf4 v20, v9
+; CHECK-NEXT: vwmulsu.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
index 123469ade0ed..852814d648bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll
@@ -421,10 +421,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i32(<vscale x 8 x i64> %va, i32
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
@@ -451,10 +451,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -483,9 +483,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i16(<vscale x 1 x i16> %va, i16
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -495,9 +495,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vsc
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, <vscale x 1 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i16> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -509,9 +509,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -525,9 +525,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf4 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf2 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
@@ -539,10 +539,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i16(<vscale x 1 x i64> %va, i16
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
@@ -569,10 +569,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v10, v8
-; CHECK-NEXT: vsext.vf4 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v10, v8
+; CHECK-NEXT: vsext.vf2 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -601,9 +601,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i16(<vscale x 2 x i16> %va, i16
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -613,9 +613,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vsc
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, <vscale x 2 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i16> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -627,9 +627,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -643,9 +643,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf4 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf2 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
@@ -657,10 +657,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i16(<vscale x 2 x i64> %va, i16
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
@@ -687,10 +687,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vsext.vf4 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v12, v8
+; CHECK-NEXT: vsext.vf2 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -719,9 +719,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i16(<vscale x 4 x i16> %va, i16
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -731,9 +731,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vsc
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, <vscale x 4 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i16> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -745,9 +745,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf2 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -761,9 +761,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf4 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf2 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
@@ -775,10 +775,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i16(<vscale x 4 x i64> %va, i16
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
@@ -805,10 +805,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v16, v8
-; CHECK-NEXT: vsext.vf4 v24, v10
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v16, v8
+; CHECK-NEXT: vsext.vf2 v20, v10
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -837,9 +837,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i16(<vscale x 8 x i16> %va, i16
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -849,9 +849,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vsc
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i16> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -863,9 +863,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf2 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -879,9 +879,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf4 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf2 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> poison, i16 %b, i16 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
@@ -893,10 +893,10 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i16(<vscale x 8 x i64> %va, i16
define <vscale x 1 x i64> @vwsub_vv_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %va to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
@@ -923,10 +923,10 @@ define <vscale x 1 x i64> @vwsub_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v8, v9
-; CHECK-NEXT: vsub.vv v8, v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -955,9 +955,9 @@ define <vscale x 1 x i64> @vwsubu_vx_nxv1i64_nxv1i8(<vscale x 1 x i8> %va, i8 %b
define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -967,9 +967,9 @@ define <vscale x 1 x i64> @vwsub_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vsca
define <vscale x 1 x i64> @vwsubu_wv_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, <vscale x 1 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%vc = zext <vscale x 1 x i8> %vb to <vscale x 1 x i64>
%vd = sub <vscale x 1 x i64> %va, %vc
@@ -981,9 +981,9 @@ define <vscale x 1 x i64> @vwsub_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v9
+; CHECK-NEXT: vwsub.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -997,9 +997,9 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vzext.vf8 v10, v9
-; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vzext.vf4 v10, v9
+; CHECK-NEXT: vwsubu.wv v8, v8, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
@@ -1011,10 +1011,10 @@ define <vscale x 1 x i64> @vwsubu_wx_nxv1i64_nxv1i8(<vscale x 1 x i64> %va, i8 %
define <vscale x 2 x i64> @vwsub_vv_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %va to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
@@ -1041,10 +1041,10 @@ define <vscale x 2 x i64> @vwsub_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v10, v8
-; CHECK-NEXT: vsext.vf8 v12, v9
-; CHECK-NEXT: vsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v10, v8
+; CHECK-NEXT: vsext.vf4 v11, v9
+; CHECK-NEXT: vwsub.vv v8, v10, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1073,9 +1073,9 @@ define <vscale x 2 x i64> @vwsubu_vx_nxv2i64_nxv2i8(<vscale x 2 x i8> %va, i8 %b
define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1085,9 +1085,9 @@ define <vscale x 2 x i64> @vwsub_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vsca
define <vscale x 2 x i64> @vwsubu_wv_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, <vscale x 2 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%vc = zext <vscale x 2 x i8> %vb to <vscale x 2 x i64>
%vd = sub <vscale x 2 x i64> %va, %vc
@@ -1099,9 +1099,9 @@ define <vscale x 2 x i64> @vwsub_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v11, v10
+; CHECK-NEXT: vwsub.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1115,9 +1115,9 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf8 v12, v10
-; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vzext.vf4 v11, v10
+; CHECK-NEXT: vwsubu.wv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
@@ -1129,10 +1129,10 @@ define <vscale x 2 x i64> @vwsubu_wx_nxv2i64_nxv2i8(<vscale x 2 x i64> %va, i8 %
define <vscale x 4 x i64> @vwsub_vv_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %va to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
@@ -1159,10 +1159,10 @@ define <vscale x 4 x i64> @vwsub_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v12, v8
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsub.vv v8, v12, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v12, v8
+; CHECK-NEXT: vsext.vf4 v14, v9
+; CHECK-NEXT: vwsub.vv v8, v12, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1191,9 +1191,9 @@ define <vscale x 4 x i64> @vwsubu_vx_nxv4i64_nxv4i8(<vscale x 4 x i8> %va, i8 %b
define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1203,9 +1203,9 @@ define <vscale x 4 x i64> @vwsub_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vsca
define <vscale x 4 x i64> @vwsubu_wv_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, <vscale x 4 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%vc = zext <vscale x 4 x i8> %vb to <vscale x 4 x i64>
%vd = sub <vscale x 4 x i64> %va, %vc
@@ -1217,9 +1217,9 @@ define <vscale x 4 x i64> @vwsub_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vsext.vf4 v14, v12
+; CHECK-NEXT: vwsub.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1233,9 +1233,9 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vzext.vf8 v16, v12
-; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vzext.vf4 v14, v12
+; CHECK-NEXT: vwsubu.wv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
@@ -1247,10 +1247,10 @@ define <vscale x 4 x i64> @vwsubu_wx_nxv4i64_nxv4i8(<vscale x 4 x i64> %va, i8 %
define <vscale x 8 x i64> @vwsub_vv_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_vv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %va to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
@@ -1277,10 +1277,10 @@ define <vscale x 8 x i64> @vwsub_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsext.vf8 v24, v9
-; CHECK-NEXT: vsub.vv v8, v16, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v16, v8
+; CHECK-NEXT: vsext.vf4 v20, v9
+; CHECK-NEXT: vwsub.vv v8, v16, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1309,9 +1309,9 @@ define <vscale x 8 x i64> @vwsubu_vx_nxv8i64_nxv8i8(<vscale x 8 x i8> %va, i8 %b
define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsub_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1321,9 +1321,9 @@ define <vscale x 8 x i64> @vwsub_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vsca
define <vscale x 8 x i64> @vwsubu_wv_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: vwsubu_wv_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%vc = zext <vscale x 8 x i8> %vb to <vscale x 8 x i64>
%vd = sub <vscale x 8 x i64> %va, %vc
@@ -1335,9 +1335,9 @@ define <vscale x 8 x i64> @vwsub_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %b
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsext.vf4 v20, v16
+; CHECK-NEXT: vwsub.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
@@ -1351,9 +1351,9 @@ define <vscale x 8 x i64> @vwsubu_wx_nxv8i64_nxv8i8(<vscale x 8 x i64> %va, i8 %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vzext.vf8 v24, v16
-; CHECK-NEXT: vsub.vv v8, v8, v24
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vzext.vf4 v20, v16
+; CHECK-NEXT: vwsubu.wv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> poison, i8 %b, i8 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
index a9a0cc5cf94d..8cf5f55ad5c5 100644
--- a/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fill-fold.ll
@@ -290,8 +290,8 @@ define double @spill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: sw a1, 20(sp)
; RV32ID-NEXT: fld fa5, 16(sp)
; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
; RV32ID-NEXT: #APP
@@ -804,13 +804,15 @@ define double @fill_i64_to_double(i64 %a) nounwind {
; RV32ID-NEXT: fsd fs9, 40(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs10, 32(sp) # 8-byte Folded Spill
; RV32ID-NEXT: fsd fs11, 24(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: sw a1, 20(sp)
-; RV32ID-NEXT: sw a0, 16(sp)
-; RV32ID-NEXT: fld fa5, 16(sp)
-; RV32ID-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
+; RV32ID-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
+; RV32ID-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: #APP
; RV32ID-NEXT: #NO_APP
-; RV32ID-NEXT: fld fa0, 8(sp) # 8-byte Folded Reload
+; RV32ID-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 16(sp)
+; RV32ID-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: sw a0, 20(sp)
+; RV32ID-NEXT: fld fa0, 16(sp)
; RV32ID-NEXT: lw ra, 172(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s0, 168(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s1, 164(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/strip-w-suffix.ll b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
new file mode 100644
index 000000000000..4124b3d0d360
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=STRIP %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+no-strip-w-suffix -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=NO-STRIP %s
+
+define i32 @addiw(i32 %a) {
+; STRIP-LABEL: addiw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: lui a1, 1
+; STRIP-NEXT: addi a1, a1, -1
+; STRIP-NEXT: addw a0, a0, a1
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addiw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: lui a1, 1
+; NO-STRIP-NEXT: addiw a1, a1, -1
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: ret
+ %ret = add i32 %a, 4095
+ ret i32 %ret
+}
+
+define i32 @addw(i32 %a, i32 %b) {
+; STRIP-LABEL: addw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: add a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: addw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %add = add i32 %a, %b
+ %ret = add i32 %add, 1024
+ ret i32 %ret
+}
+
+define i32 @mulw(i32 %a, i32 %b) {
+; STRIP-LABEL: mulw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: mul a0, a0, a1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: mulw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: mulw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %mul = mul i32 %a, %b
+ %ret = add i32 %mul, 1024
+ ret i32 %ret
+}
+
+define i32 @slliw(i32 %a) {
+; STRIP-LABEL: slliw:
+; STRIP: # %bb.0:
+; STRIP-NEXT: slli a0, a0, 1
+; STRIP-NEXT: addiw a0, a0, 1024
+; STRIP-NEXT: ret
+;
+; NO-STRIP-LABEL: slliw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: slliw a0, a0, 1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+ %shl = shl i32 %a, 1
+ %ret = add i32 %shl, 1024
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index e5c2e0180ee0..73ace2033985 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -3,7 +3,8 @@
define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-LABEL: func:
; RV32: # %bb.0: # %entry
-; RV32-NEXT: cm.push {ra, s0-s1}, -24
+; RV32-NEXT: cm.push {ra, s0-s1}, -16
+; RV32-NEXT: addi sp, sp, -8
; RV32-NEXT: .cfi_def_cfa_offset 24
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset s0, -8
@@ -31,7 +32,8 @@ define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
; RV32-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: sb a0, 0(s0)
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: cm.popret {ra, s0-s1}, 24
+; RV32-NEXT: addi sp, sp, 8
+; RV32-NEXT: cm.popret {ra, s0-s1}, 16
entry:
br label %while.body
diff --git a/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
new file mode 100644
index 000000000000..8596a65c378c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zdinx-large-spill.mir
@@ -0,0 +1,74 @@
+# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+# RUN: llc %s -mtriple=riscv32 -mattr=+zdinx -start-before=prologepilog -o - | FileCheck %s
+
+# We want to make sure eliminateFrameIndex doesn't fold sp+2044 as an offset in
+# a GPR pair spill/reload instruction. When we split the pair spill, we would be
+# unable to add 4 to the immediate without overflowing simm12.
+
+--- |
+ define void @foo() {
+ ; CHECK-LABEL: foo:
+ ; CHECK: # %bb.0:
+ ; CHECK-NEXT: addi sp, sp, -2048
+ ; CHECK-NEXT: addi sp, sp, -16
+ ; CHECK-NEXT: .cfi_def_cfa_offset 2064
+ ; CHECK-NEXT: lui t0, 1
+ ; CHECK-NEXT: add t0, sp, t0
+ ; CHECK-NEXT: sw a0, -2040(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a1, -2036(t0) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: sw a2, -2048(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a3, -2044(a0) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a4, 2040(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a5, 2044(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a6, 2032(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: sw a7, 2036(sp) # 4-byte Folded Spill
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a1, -2036(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a0, -2040(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lui a0, 1
+ ; CHECK-NEXT: add a0, sp, a0
+ ; CHECK-NEXT: lw a2, -2048(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a3, -2044(a0) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a4, 2040(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a5, 2044(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a6, 2032(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: lw a7, 2036(sp) # 4-byte Folded Reload
+ ; CHECK-NEXT: addi sp, sp, 2032
+ ; CHECK-NEXT: addi sp, sp, 32
+ ; CHECK-NEXT: ret
+ ret void
+ }
+...
+---
+name: foo
+tracksRegLiveness: true
+tracksDebugUserValues: true
+frameInfo:
+ maxAlignment: 4
+stack:
+ - { id: 0, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 1, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 2, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 3, type: spill-slot, size: 8, alignment: 4 }
+ - { id: 4, type: spill-slot, size: 2024, alignment: 4 }
+machineFunctionInfo:
+ varArgsFrameIndex: 0
+ varArgsSaveSize: 0
+body: |
+ bb.0:
+ liveins: $x10_x11, $x12_x13, $x14_x15, $x16_x17
+
+ PseudoRV32ZdinxSD killed renamable $x10_x11, %stack.0, 0 :: (store (s64) into %stack.0, align 4)
+ PseudoRV32ZdinxSD killed renamable $x12_x13, %stack.1, 0 :: (store (s64) into %stack.1, align 4)
+ PseudoRV32ZdinxSD killed renamable $x14_x15, %stack.2, 0 :: (store (s64) into %stack.2, align 4)
+ PseudoRV32ZdinxSD killed renamable $x16_x17, %stack.3, 0 :: (store (s64) into %stack.3, align 4)
+ renamable $x10_x11 = PseudoRV32ZdinxLD %stack.0, 0 :: (load (s64) from %stack.0, align 4)
+ renamable $x12_x13 = PseudoRV32ZdinxLD %stack.1, 0 :: (load (s64) from %stack.1, align 4)
+ renamable $x14_x15 = PseudoRV32ZdinxLD %stack.2, 0 :: (load (s64) from %stack.2, align 4)
+ renamable $x16_x17 = PseudoRV32ZdinxLD %stack.3, 0 :: (load (s64) from %stack.3, align 4)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
index 3dfdeac7adaa..ec660b77368e 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODR.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
index 7505c3fc277e..42170dce5d13 100644
--- a/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
+++ b/llvm/test/CodeGen/SPIRV/LinkOnceODRFun.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_linkonce_odr %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV-EXT: Capability Linkage
; CHECK-SPIRV-EXT: Extension "SPV_KHR_linkonce_odr"
diff --git a/llvm/test/CodeGen/SPIRV/assume.ll b/llvm/test/CodeGen/SPIRV/assume.ll
index 6099955e4afb..fbf12ef184a8 100644
--- a/llvm/test/CodeGen/SPIRV/assume.ll
+++ b/llvm/test/CodeGen/SPIRV/assume.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=EXT,CHECK %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=NOEXT,CHECK %s
diff --git a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
index 473794a1ac97..721e825a1c98 100644
--- a/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
+++ b/llvm/test/CodeGen/SPIRV/exec_mode_float_control_khr.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=SPV
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-extensions=SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --mattr=+spirv1.3 --spirv-ext=+SPV_KHR_float_controls -o - | FileCheck %s --check-prefixes=SPVEXT
define dso_local dllexport spir_kernel void @k_float_controls_0(i32 %ibuf, i32 %obuf) local_unnamed_addr {
entry:
diff --git a/llvm/test/CodeGen/SPIRV/expect.ll b/llvm/test/CodeGen/SPIRV/expect.ll
index 51555cd15552..82c1ec7dc916 100644
--- a/llvm/test/CodeGen/SPIRV/expect.ll
+++ b/llvm/test/CodeGen/SPIRV/expect.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
-; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
+; RUN: llc -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_expect_assume < %s | FileCheck --check-prefixes=CHECK,EXT %s
; RUN: llc -mtriple=spirv32-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
; RUN: llc -mtriple=spirv64-unknown-unknown < %s | FileCheck --check-prefixes=CHECK,NOEXT %s
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
index 1bfa556affce..e7b66798d3b3 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
index 627b59fee1a6..4fb99d9dfc76 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
index fffda4bd1128..2f536dc53843 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_half.ll
@@ -1,7 +1,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR1
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR2
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_add --spirv-extensions=SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_EXT_shader_atomic_float16_add %s -o - | FileCheck %s
; CHECK-ERROR1: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_add
; CHECK-ERROR2: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float16_add
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
index 3c6fa276cb76..7654c3618fd4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_double.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
index cc52e4c72055..8a3599002822 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_float.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
index b406aee528b2..45baaa8e9749 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_min_max/atomicrmw_fminfmax_half.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_shader_atomic_float_min_max %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: The atomic float instruction requires the following SPIR-V extension: SPV_EXT_shader_atomic_float_min_max
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
index b68fb363ad85..f49367c50e0e 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_arbitrary_precision_integers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
define i6 @getConstantI6() {
ret i6 2
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
index 2f3c859db346..4326d8df8820 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative1.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
index 8ab84d63d485..57f52b97c4cc 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative2.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
index 20a8042ad9c2..2cb229eb7ef7 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative3.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
index efbd50b77dc6..eb5a2c729102 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv-negative4.ll
@@ -1,4 +1,4 @@
-; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: result and argument must have the same number of components
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
index 2bd59b22322f..91fa340e4611 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bfloat16_conversion/bfloat16-conv.ll
@@ -1,5 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bfloat16_conversion %s -o - -filetype=obj | spirv-val %}
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
; CHECK-ERROR: the builtin requires the following SPIR-V extension: SPV_INTEL_bfloat16_conversion
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
index 0bd1b5d776a9..5f073e95cb68 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
index 89de098dead9..b7fecefe9a58 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_two_calls.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpCapability Int8
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
index afbcaec9c5c5..a611be8eb6ee 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_optnone.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_optnone %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability OptNoneINTEL
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
index 0e0b2a4dd6ec..df17ec435ad3 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_subgroups/cl_intel_sub_groups.ll
@@ -37,7 +37,7 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_subgroups %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_subgroups %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: intel_sub_group_shuffle: the builtin requires the following SPIR-V extension: SPV_INTEL_subgroups
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
index 30c16350bf2b..b5df462bd8fa 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_usm_storage_classes/intel_usm_addrspaces.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-WITHOUT
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
index 897aab70852d..8a54d22a539d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr.ll
@@ -1,8 +1,8 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/basic.ll
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: array allocation: this instruction requires the following SPIR-V extension: SPV_INTEL_variable_length_array
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
index fbac43e51f39..7b9f75d74db9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_variable_length_array/vararr_spec_const.ll
@@ -1,7 +1,7 @@
; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_variable_length_array/vla_spec_const.ll
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_variable_length_array %s -o - -filetype=obj | spirv-val %}
; CHECK-SPIRV: Capability VariableLengthArrayINTEL
; CHECK-SPIRV: Extension "SPV_INTEL_variable_length_array"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
index 95395d5efb55..100f02faba85 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-extensions=SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --spirv-ext=+SPV_KHR_bit_instructions -o - | FileCheck %s --check-prefix=CHECK-EXTENSION
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-NO-EXTENSION
; CHECK-EXTENSION: OpCapability BitInstructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
index e74dd99617f9..0d9ab4ab65ce 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_no_integer_wrap_decoration.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s
; CHECK-DAG: OpExtension "SPV_KHR_no_integer_wrap_decoration"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
index b1d6a09c7fe3..63aade4f7f8d 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
; CHECK-ERROR: LLVM ERROR: OpGroupNonUniformRotateKHR instruction requires the following SPIR-V extension: SPV_KHR_subgroup_rotate
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
index 39bf63ddae4f..0de654be8ed7 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_uniform_group_instructions/uniform-group-instructions.ll
@@ -1,6 +1,6 @@
; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_uniform_group_instructions %s -o - | FileCheck %s
; CHECK-ERROR: LLVM ERROR: __spirv_GroupBitwiseAndKHR: the builtin requires the following SPIR-V extension: SPV_KHR_uniform_group_instructions
diff --git a/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
new file mode 100644
index 000000000000..fc07cca4dd24
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/both-allowed-disallowed-extension-error.ll
@@ -0,0 +1,7 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_arbitrary_precision_integers,-SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=-SPV_INTEL_arbitrary_precision_integers,+SPV_INTEL_arbitrary_precision_integers %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Extension cannot be allowed and disallowed at the same time: SPV_INTEL_arbitrary_precision_integers
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
new file mode 100644
index 000000000000..973a5e6f6056
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions-but-one.ll
@@ -0,0 +1,9 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers %s -o - | FileCheck %s
+
+define i6 @foo() {
+ %call = tail call i32 @llvm.bitreverse.i32(i32 42)
+ ret i6 2
+}
+
+; CHECK-NOT: OpExtension "SPV_INTEL_arbitrary_precision_integers"
+; CHECK-DAG: OpExtension "SPV_KHR_bit_instructions"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
new file mode 100644
index 000000000000..a5b979469b93
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/enable-all-extensions.ll
@@ -0,0 +1,7 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=all %s -o - | FileCheck %s
+
+define i6 @getConstantI6() {
+ ret i6 2
+}
+
+; CHECK: OpExtension "SPV_INTEL_arbitrary_precision_integers"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
new file mode 100644
index 000000000000..207ed4bf2572
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/invalid-extension-list-format.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Invalid extension list format: UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
new file mode 100644
index 000000000000..f4f54247c2ad
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/unknown-extension-name.ll
@@ -0,0 +1,6 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+UNKNOWN_EXTENSION %s -o %t.spvt 2>&1 | FileCheck %s
+; CHECK: Unknown SPIR-V extension: +UNKNOWN_EXTENSION
+
+define i8 @foo() {
+ ret i8 2
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
new file mode 100644
index 000000000000..ec35690ac154
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/WaveGetLaneIndex.ll
@@ -0,0 +1,68 @@
+; RUN: llc -O0 -mtriple=spirv-vulkan-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-unknown %s -o - -filetype=obj | spirv-val %}
+
+; This file generated from the following command:
+; clang -cc1 -triple spirv-vulkan-compute -x hlsl -emit-llvm -finclude-default-header -o - - <<EOF
+; [numthreads(1, 1, 1)]
+; void main() {
+; int idx = WaveGetLaneIndex();
+; }
+; EOF
+
+; CHECK-DAG: OpCapability Shader
+; CHECK-DAG: OpCapability GroupNonUniform
+; CHECK-DAG: OpDecorate %[[#var:]] BuiltIn SubgroupLocalInvocationId
+; CHECK-DAG: %[[#int:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#ptri:]] = OpTypePointer Input %[[#int]]
+; CHECK-DAG: %[[#ptrf:]] = OpTypePointer Function %[[#int]]
+; CHECK-DAG: %[[#var]] = OpVariable %[[#ptri]] Input
+
+; CHECK-NOT: OpDecorate %[[#var]] LinkageAttributes
+
+
+; ModuleID = '-'
+source_filename = "-"
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
+target triple = "spirv-unknown-vulkan-compute"
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define internal spir_func void @main() #0 {
+entry:
+ %0 = call token @llvm.experimental.convergence.entry()
+ %idx = alloca i32, align 4
+; CHECK: %[[#idx:]] = OpVariable %[[#ptrf]] Function
+
+ %1 = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %0) ]
+; CHECK: %[[#tmp:]] = OpLoad %[[#int]] %[[#var]]
+
+ store i32 %1, ptr %idx, align 4
+; CHECK: OpStore %[[#idx]] %[[#tmp]]
+
+ ret void
+}
+
+; Function Attrs: norecurse
+define void @main.1() #1 {
+entry:
+ call void @main()
+ ret void
+}
+
+; Function Attrs: convergent
+declare i32 @__hlsl_wave_get_lane_index() #2
+
+; Function Attrs: convergent nocallback nofree nosync nounwind willreturn memory(none)
+declare token @llvm.experimental.convergence.entry() #3
+
+attributes #0 = { convergent noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #1 = { norecurse "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #2 = { convergent }
+attributes #3 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 4, !"dx.disable_optimizations", i32 1}
+!2 = !{!"clang version 19.0.0git (/usr/local/google/home/nathangauer/projects/llvm-project/clang bc6fd04b73a195981ee77823cf1382d04ab96c44)"}
+
diff --git a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
index 641e2bf0649c..31cd8bd45929 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
@@ -1,7 +1,13 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - | FileCheck %s --check-prefix=CHECK-COMPAT
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s --translator-compatibility-mode -o - -filetype=obj | spirv-val %}
; CHECK-DAG: OpName [[EQ:%.*]] "test_eq"
; CHECK-DAG: OpName [[NE:%.*]] "test_ne"
+; CHECK-COMPAT-DAG: OpName [[EQ:%.*]] "test_eq"
+; CHECK-COMPAT-DAG: OpName [[NE:%.*]] "test_ne"
; CHECK-DAG: OpName [[ULT:%.*]] "test_ult"
; CHECK-DAG: OpName [[SLT:%.*]] "test_slt"
; CHECK-DAG: OpName [[ULE:%.*]] "test_ule"
@@ -19,6 +25,9 @@
; CHECK-NEXT: [[R:%.*]] = OpPtrEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[EQ]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_eq(i16* %a, i16* %b) {
%r = icmp eq i16* %a, %b
ret i1 %r
@@ -31,6 +40,9 @@ define i1 @test_eq(i16* %a, i16* %b) {
; CHECK-NEXT: [[R:%.*]] = OpPtrNotEqual {{%.+}} [[A]] [[B]]
; CHECK-NEXT: OpReturnValue [[R]]
; CHECK-NEXT: OpFunctionEnd
+; CHECK-COMPAT: [[NE]] = OpFunction
+; CHECK-COMPAT-NOT: OpPtrNotEqual
+; CHECK-COMPAT: OpFunctionEnd
define i1 @test_ne(i16* %a, i16* %b) {
%r = icmp ne i16* %a, %b
ret i1 %r
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
index 93190f972d9b..e0c84ee3a3f1 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/add.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
index aa879b22e057..12a4a86fa4a8 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
index a0d18d5a750f..459bc6bdcdaf 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fadd.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
index 247ebcca2dd8..4f9cd29cd05d 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
index 13f44108733c..837bea0fbe62 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmaximum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
index 13ef11862166..475da2e1ec31 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
index 93ef79affdda..b525c849c3b5 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fminimum.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
index afe30d5b51e0..0985be992ca7 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/fmul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
index 9b397aee30cb..1a700577e46b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
index a62bb0c376f3..90c6cf5562a9 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
index 3fc2bcc86054..4551fa31681d 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
index 9459946a408f..a0d257bb1318 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/smin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
index cce7189c173c..ba5dba76aeec 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umax.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
index bcc49c50d480..e16bde88ef50 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/umin.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
index 26bc96b41a98..cf887bb358ac 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/xor.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
target triple = "spir64-unknown-unknown"
diff --git a/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
new file mode 100644
index 000000000000..77b895c7762f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/nested-struct-opaque-pointers.ll
@@ -0,0 +1,20 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-NOT: OpTypeInt 8 0
+
+@GI = addrspace(1) constant i64 42
+
+@GS = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GI, ptr addrspace(1) @GI }
+@GS2 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS, ptr addrspace(1) @GS }
+@GS3 = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @GS2, ptr addrspace(1) @GS2 }
+
+@GPS = addrspace(1) global ptr addrspace(1) @GS3
+
+@GPI1 = addrspace(1) global ptr addrspace(1) @GI
+@GPI2 = addrspace(1) global ptr addrspace(1) @GPI1
+@GPI3 = addrspace(1) global ptr addrspace(1) @GPI2
+
+define spir_kernel void @foo() {
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
index d426fc4dfd4e..6d4913f802c2 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
@@ -1,14 +1,14 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
-; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[TyInt8:.*]] = OpTypeInt 8 0
-; CHECK: %[[TyInt8Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt8]]
-; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt8Ptr]] %[[TyInt8Ptr]]
+; CHECK: %[[TyInt64:.*]] = OpTypeInt 64 0
+; CHECK: %[[TyInt64Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt64]]
+; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt64Ptr]] %[[TyInt64Ptr]]
; CHECK: %[[ConstStruct:.*]] = OpConstantComposite %[[TyStruct]] %[[ConstField:.*]] %[[ConstField]]
; CHECK: %[[TyStructPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyStruct]]
; CHECK: OpVariable %[[TyStructPtr]] {{[a-zA-Z]+}} %[[ConstStruct]]
-@a = addrspace(1) constant i32 123
+@a = addrspace(1) constant i64 42
@struct = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @a, ptr addrspace(1) @a }
define spir_kernel void @foo() {
diff --git a/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
new file mode 100644
index 000000000000..1071d3443056
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/type-deduce-by-call-chain.ll
@@ -0,0 +1,57 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-DAG: OpName %[[ArgCum:.*]] "_arg_cum"
+; CHECK-SPIRV-DAG: OpName %[[FunTest:.*]] "test"
+; CHECK-SPIRV-DAG: OpName %[[Addr:.*]] "addr"
+; CHECK-SPIRV-DAG: OpName %[[StubObj:.*]] "stub_object"
+; CHECK-SPIRV-DAG: OpName %[[MemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooStub:.*]] "foo_stub"
+; CHECK-SPIRV-DAG: OpName %[[FooObj:.*]] "foo_object"
+; CHECK-SPIRV-DAG: OpName %[[FooMemOrder:.*]] "mem_order"
+; CHECK-SPIRV-DAG: OpName %[[FooFunc:.*]] "foo"
+; CHECK-SPIRV-DAG: %[[TyLong:.*]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[TyVoid:.*]] = OpTypeVoid
+; CHECK-SPIRV-DAG: %[[TyPtrLong:.*]] = OpTypePointer CrossWorkgroup %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunPtrLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyPtrLong]]
+; CHECK-SPIRV-DAG: %[[TyGenPtrLong:.*]] = OpTypePointer Generic %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[TyFunGenPtrLongLong:.*]] = OpTypeFunction %[[TyVoid]] %[[TyGenPtrLong]] %[[TyLong]]
+; CHECK-SPIRV-DAG: %[[Const3:.*]] = OpConstant %[[TyLong]] 3
+; CHECK-SPIRV: %[[FunTest]] = OpFunction %[[TyVoid]] None %[[TyFunPtrLong]]
+; CHECK-SPIRV: %[[ArgCum]] = OpFunctionParameter %[[TyPtrLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooFunc]] %[[Addr]] %[[Const3]]
+; CHECK-SPIRV: %[[FooStub]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[StubObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[MemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: %[[FooFunc]] = OpFunction %[[TyVoid]] None %[[TyFunGenPtrLongLong]]
+; CHECK-SPIRV: %[[FooObj]] = OpFunctionParameter %[[TyGenPtrLong]]
+; CHECK-SPIRV: %[[FooMemOrder]] = OpFunctionParameter %[[TyLong]]
+; CHECK-SPIRV: OpFunctionCall %[[TyVoid]] %[[FooStub]] %[[FooObj]] %[[FooMemOrder]]
+
+define spir_kernel void @test(ptr addrspace(1) noundef align 4 %_arg_cum) {
+entry:
+ %lptr = getelementptr inbounds i32, ptr addrspace(1) %_arg_cum, i64 1
+ %addr = addrspacecast ptr addrspace(1) %lptr to ptr addrspace(4)
+ %object = bitcast ptr addrspace(4) %addr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %object, i32 3)
+ %halfptr = getelementptr inbounds half, ptr addrspace(1) %_arg_cum, i64 1
+ %halfaddr = addrspacecast ptr addrspace(1) %halfptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %halfaddr, i32 3)
+ %dblptr = getelementptr inbounds double, ptr addrspace(1) %_arg_cum, i64 1
+ %dbladdr = addrspacecast ptr addrspace(1) %dblptr to ptr addrspace(4)
+ call spir_func void @foo(ptr addrspace(4) %dbladdr, i32 3)
+ ret void
+}
+
+define void @foo_stub(ptr addrspace(4) noundef %stub_object, i32 noundef %mem_order) {
+entry:
+ %object.addr = alloca ptr addrspace(4)
+ %object.addr.ascast = addrspacecast ptr %object.addr to ptr addrspace(4)
+ store ptr addrspace(4) %stub_object, ptr addrspace(4) %object.addr.ascast
+ ret void
+}
+
+define void @foo(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order) {
+ tail call void @foo_stub(ptr addrspace(4) noundef %foo_object, i32 noundef %mem_order)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
index 329399bab3e5..2ea5c767730e 100644
--- a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
+++ b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
@@ -1,5 +1,6 @@
; RUN: llc -mtriple=spirv-unknown-unknown -O0 %s -o - | FileCheck %s
+; CHECK-DAG: OpDecorate %[[#SubgroupLocalInvocationId:]] BuiltIn SubgroupLocalInvocationId
; CHECK-DAG: %[[#bool:]] = OpTypeBool
; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
@@ -37,10 +38,10 @@ l1_continue:
; CHECK-NEXT: OpBranch %[[#l1_header]]
l1_end:
- %call = call spir_func i32 @_Z3absi(i32 0) [ "convergencectrl"(token %tl1) ]
+ %call = call i32 @__hlsl_wave_get_lane_index() [ "convergencectrl"(token %tl1) ]
br label %end
; CHECK-DAG: %[[#l1_end]] = OpLabel
-; CHECK-DAG: %[[#]] = OpFunctionCall
+; CHECK-DAG: %[[#]] = OpLoad %[[#]] %[[#SubgroupLocalInvocationId]]
; CHECK-NEXT: OpBranch %[[#end:]]
l2:
@@ -76,6 +77,4 @@ declare token @llvm.experimental.convergence.entry()
declare token @llvm.experimental.convergence.control()
declare token @llvm.experimental.convergence.loop()
-; This intrinsic is not convergent. This is only because the backend doesn't
-; support convergent operations yet.
-declare spir_func i32 @_Z3absi(i32) convergent
+declare i32 @__hlsl_wave_get_lane_index() convergent
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
index 02d1250399f9..e405ef0ed58a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
@@ -7,7 +7,7 @@
;;
;; Positive tests:
;;
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
;;
;; Negative tests:
;;
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
index 3551030843d0..e0172ec3c1bd 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
@@ -1,6 +1,5 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
;
-; CHECK-SPIRV-DAG: %[[#i8:]] = OpTypeInt 8 0
; CHECK-SPIRV-DAG: %[[#i32:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#one:]] = OpConstant %[[#i32]] 1
; CHECK-SPIRV-DAG: %[[#two:]] = OpConstant %[[#i32]] 2
@@ -13,7 +12,6 @@
; CHECK-SPIRV: %[[#test_arr2:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
; CHECK-SPIRV: %[[#test_arr:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
-; CHECK-SPIRV-DAG: %[[#const_i8_ptr:]] = OpTypePointer UniformConstant %[[#i8]]
; CHECK-SPIRV-DAG: %[[#i32x3_ptr:]] = OpTypePointer Function %[[#i32x3]]
; CHECK-SPIRV: %[[#arr:]] = OpVariable %[[#i32x3_ptr]] Function
diff --git a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
index 3b308ce3d0d2..adeec15b1755 100644
--- a/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
+++ b/llvm/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
@@ -25,6 +25,8 @@
name: autogen_SD21418
alignment: 4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
registers:
- { id: 0, class: vr128bit }
- { id: 1, class: vr128bit }
diff --git a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
index 8290dbfe2310..81aedc1a1d7f 100644
--- a/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
+++ b/llvm/test/CodeGen/SystemZ/call-zos-vararg.ll
@@ -88,13 +88,15 @@ entry:
ret i64 %retval
}
+;; TODO: The extra COPY after LGDR is unnecessary (machine-scheduler introduces the overlap).
; CHECK-LABEL: call_vararg_both0:
; CHECK: stmg 6, 7, 1872(4)
; CHECK-NEXT: aghi 4, -192
; CHECK-NEXT: lg 6, 40(5)
; CHECK-NEXT: lg 5, 32(5)
+; CHECK-NEXT: lgdr 0, 0
; CHECK-NEXT: lgr 2, 1
-; CHECK-NEXT: lgdr 1, 0
+; CHECK-NEXT: lgr 1, 0
; CHECK-NEXT: basr 7, 6
; CHECK-NEXT: bcr 0, 0
; CHECK-NEXT: lg 7, 2072(4)
diff --git a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
index 7ff7d9b8b709..197c3d8551fc 100644
--- a/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
+++ b/llvm/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
@@ -157,6 +157,7 @@ registers:
- { id: 129, class: grx32bit }
- { id: 130, class: fp64bit }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0:
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-04.mir b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
index 23fd2739698a..ab4a14cfaee8 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-04.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-04.mir
@@ -65,12 +65,10 @@ body: |
CHIMux %3, 0, implicit-def $cc
%0 = LOCRMux undef %0, %5, 14, 6, implicit $cc
%0 = LOCRMux %0, %2, 14, 6, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%7 = LGFR %0
$r3d = LGHI 0
$r4d = COPY %7
CallBRASL @foo, undef $r2d, killed $r3d, killed $r4d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-08.mir b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
index 64c6d0697992..2ea67dcce067 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-08.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-08.mir
@@ -155,9 +155,7 @@ body: |
J %bb.4
bb.4.bb33:
- ADJCALLSTACKDOWN 0, 0
CallBRASL @fun, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
STRL %4, @globvar :: (store (s32) into @globvar)
CLFIMux undef %23:grx32bit, 1, implicit-def $cc
%25:grx32bit = LHIMux 0
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
index 2701a1dc034a..8a7929c9eb2c 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints-02.mir
@@ -45,11 +45,9 @@ body: |
%11:gr32bit = SELRMux %8, %9:grx32bit, 14, 6, implicit killed $cc
CHIMux %6, 2, implicit-def $cc
%0:gr32bit = SELRMux %11, %5, 14, 8, implicit killed $cc
- ADJCALLSTACKDOWN 0, 0
%10:gr64bit = LGFR %0
$r2d = COPY %10
CallBRASL @foo, killed $r2d, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc
- ADJCALLSTACKUP 0, 0
J %bb.1
...
diff --git a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
index c98ffda83727..009fd6ce8267 100644
--- a/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
+++ b/llvm/test/CodeGen/SystemZ/cond-move-regalloc-hints.mir
@@ -200,18 +200,12 @@ body: |
%32:gr64bit = COPY $r3d
%0:gr64bit = COPY $r2d
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%1:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
- ADJCALLSTACKDOWN 0, 0
CallBRASL @sre_malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
%2:addr64bit = COPY $r2d
- ADJCALLSTACKUP 0, 0
%3:gr32bit = AHIMuxK %0.subreg_l32, -1, implicit-def dead $cc
- ADJCALLSTACKDOWN 0, 0
CallBRASL @malloc, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc
- ADJCALLSTACKUP 0, 0
%55:gr32bit = AHIMuxK %0.subreg_l32, 3, implicit-def dead $cc
%56:addr64bit = LGHI 0
%57:gr64bit = COPY %0
diff --git a/llvm/test/CodeGen/SystemZ/frame-28.mir b/llvm/test/CodeGen/SystemZ/frame-28.mir
index 13337dba6ec5..254b8a2cf246 100644
--- a/llvm/test/CodeGen/SystemZ/frame-28.mir
+++ b/llvm/test/CodeGen/SystemZ/frame-28.mir
@@ -179,9 +179,7 @@ body: |
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.0, 0, $noreg
VST64 renamable $f16d, %stack.1, 0, $noreg
- ADJCALLSTACKDOWN 0, 0
CallBRASL @foo, csr_systemz_elf, implicit-def dead $r14d, implicit-def dead $cc, implicit $fpc, implicit-def $r2l
- ADJCALLSTACKUP 0, 0
$f17d = IMPLICIT_DEF
VST64 renamable $f17d, %stack.1, 0, $noreg
Return
diff --git a/llvm/test/CodeGen/SystemZ/frame-adjstack.ll b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
new file mode 100644
index 000000000000..7edacaa3d7d7
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/frame-adjstack.ll
@@ -0,0 +1,16 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -verify-machineinstrs | FileCheck %s
+;
+; Test that inserting a new MBB near a call during finalize isel custom
+; insertion does not cause all frame instructions to be missed. That would
+; result in a missing to set the AdjustsStack flag.
+
+; CHECK-LABEL: fun
+define void @fun(i1 %cc) {
+ %sel = select i1 %cc, i32 5, i32 0
+ tail call void @input_report_abs(i32 %sel)
+ %sel2 = select i1 %cc, i32 6, i32 1
+ tail call void @input_report_abs(i32 %sel2)
+ ret void
+}
+
+declare void @input_report_abs(i32)
diff --git a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
index e52fd44ae47d..3e00b6065eb9 100644
--- a/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
+++ b/llvm/test/CodeGen/SystemZ/int-cmp-56.mir
@@ -48,6 +48,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -125,6 +126,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -202,6 +204,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
@@ -279,6 +282,7 @@ liveins:
- { reg: '$r2d', virtual-reg: '%0' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/SystemZ/readcyclecounter.ll b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
new file mode 100644
index 000000000000..34b6d34143f8
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/readcyclecounter.ll
@@ -0,0 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=s390x-ibm-linux | FileCheck %s
+
+; Verify that we correctly lower ISD::READCYCLECOUNTER.
+
+define i64 @test_builtin_readcyclecounter1() {
+; CHECK-LABEL: test_builtin_readcyclecounter1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: aghi %r15, -168
+; CHECK-NEXT: .cfi_def_cfa_offset 328
+; CHECK-NEXT: stckf 160(%r15)
+; CHECK-NEXT: lg %r2, 160(%r15)
+; CHECK-NEXT: aghi %r15, 168
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ ret i64 %1
+}
+
+define void @test_builtin_readcyclecounter2(ptr %ptr) {
+; CHECK-LABEL: test_builtin_readcyclecounter2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stckf 0(%r2)
+; CHECK-NEXT: br %r14
+ %1 = tail call i64 @llvm.readcyclecounter()
+ store i64 %1, ptr %ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
index f709b70ff1b7..bf5855010bf9 100644
--- a/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
+++ b/llvm/test/CodeGen/SystemZ/regcoal-subranges-update.mir
@@ -49,6 +49,8 @@ body: |
---
name: segfault
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
liveins: []
body: |
; CHECK-LABEL: name: segfault
diff --git a/llvm/test/CodeGen/SystemZ/swifterror.ll b/llvm/test/CodeGen/SystemZ/swifterror.ll
index 3ea29f1d830e..1b18287cac14 100644
--- a/llvm/test/CodeGen/SystemZ/swifterror.ll
+++ b/llvm/test/CodeGen/SystemZ/swifterror.ll
@@ -30,8 +30,8 @@ entry:
define float @caller(ptr %error_ref) {
; CHECK-LABEL: caller:
; Make a copy of error_ref because r2 is getting clobbered
-; CHECK: lgr %r[[REG1:[0-9]+]], %r2
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: %r2, %r9
; CHECK: jlh
@@ -197,7 +197,7 @@ define void @foo_sret(ptr sret(%struct.S) %agg.result, i32 %val1, ptr swifterror
; CHECK-LABEL: foo_sret:
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lr %r[[REG2:[0-9]+]], %r3
-; CHECK: lghi %r2, 16
+; CHECK-DAG: lghi %r2, 16
; CHECK: brasl %r14, malloc
; CHECK: mvi 8(%r2), 1
; CHECK: st %r[[REG2]], 4(%r[[REG1]])
@@ -280,7 +280,7 @@ define float @caller_with_multiple_swifterror_values(ptr %error_ref, ptr %error_
; CHECK-DAG: lgr %r[[REG1:[0-9]+]], %r2
; CHECK-DAG: lgr %r[[REG2:[0-9]+]], %r3
; The first swifterror value:
-; CHECK: lghi %r9, 0
+; CHECK-DAG: lghi %r9, 0
; CHECK: brasl %r14, foo
; CHECK: ltgr %r2, %r9
; CHECK: jlh
diff --git a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
index 69e1c2f4aa0a..9d77744f18ca 100644
--- a/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/SystemZ/vector-constrained-fp-intrinsics.ll
@@ -1649,8 +1649,8 @@ define <2 x double> @constrained_vector_powi_v2f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI36_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f8
@@ -1707,14 +1707,14 @@ define <3 x float> @constrained_vector_powi_v3f32() #0 {
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_1
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f8, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: larl %r1, .LCPI37_2
; S390X-NEXT: le %f1, 0(%r1)
-; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ler %f9, %f0
; S390X-NEXT: ler %f0, %f1
; S390X-NEXT: brasl %r14, __powisf2@PLT
; S390X-NEXT: ler %f2, %f9
@@ -1784,14 +1784,14 @@ define void @constrained_vector_powi_v3f64(ptr %a) #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI38_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: std %f0, 16(%r13)
@@ -1865,20 +1865,20 @@ define <4 x double> @constrained_vector_powi_v4f64() #0 {
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_1
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f8, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_2
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f9, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: larl %r1, .LCPI39_3
; S390X-NEXT: ld %f1, 0(%r1)
-; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: lghi %r2, 3
+; S390X-NEXT: ldr %f10, %f0
; S390X-NEXT: ldr %f0, %f1
; S390X-NEXT: brasl %r14, __powidf2@PLT
; S390X-NEXT: ldr %f2, %f10
diff --git a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
index 767b7028a967..a0f8374e074d 100644
--- a/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
+++ b/llvm/test/CodeGen/Thumb2/aligned-nonfallthrough.ll
@@ -42,9 +42,8 @@ define i64 @loopif(ptr nocapture readonly %x, i32 %y, i32 %n) {
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: blt .LBB1_4
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
-; CHECK-NEXT: mov lr, r2
-; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: dls lr, r2
+; CHECK-NEXT: mov r12, r0
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: movs r3, #0
; CHECK-NEXT: .p2align 2
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 4ab569777b2a..93cab25c2cb7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -542,9 +542,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #28
; CHECK-NEXT: sub sp, #28
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #4] @ 8-byte Folded Spill
; CHECK-NEXT: blt .LBB11_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload
@@ -661,9 +659,7 @@ define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture read
; CHECK-NEXT: .pad #136
; CHECK-NEXT: sub sp, #136
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #68] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #64] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB12_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
@@ -952,11 +948,9 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture read
; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill
; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill
-; CHECK-NEXT: vmov q0, q2
-; CHECK-NEXT: vmov q3, q5
-; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload
+; CHECK-NEXT: vadd.i32 q1, q1, r0
; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill
; CHECK-NEXT: vadd.i32 q0, q0, r0
; CHECK-NEXT: subs.w r11, r11, #16
@@ -1243,9 +1237,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: .pad #64
; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: cmp r2, #1
-; CHECK-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
-; CHECK-NEXT: str r2, [sp, #60] @ 4-byte Spill
+; CHECK-NEXT: strd r1, r2, [sp, #56] @ 8-byte Folded Spill
; CHECK-NEXT: blt.w .LBB14_5
; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
; CHECK-NEXT: adr r5, .LCPI14_3
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index 18c8a8a22ef2..7b8b884576d1 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -609,7 +609,6 @@ define dso_local void @arm_mat_mult_q15(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: strd r0, r2, [sp, #24] @ 8-byte Folded Spill
; CHECK-NEXT: cmp r3, #0
; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: mov r0, r3
; CHECK-NEXT: itt ne
; CHECK-NEXT: ldrne r0, [sp, #136]
; CHECK-NEXT: cmpne r0, #0
diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
index 9987ff940b5a..77980be90520 100644
--- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving-reduct.ll
@@ -108,9 +108,7 @@ define void @correlate(ptr nocapture noundef readonly %ID, ptr nocapture noundef
; CHECK-NEXT: .pad #12
; CHECK-NEXT: sub sp, #12
; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: strd r0, r1, [sp] @ 8-byte Folded Spill
-; CHECK-NEXT: mov r1, r3
-; CHECK-NEXT: str r3, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: stm.w sp, {r0, r1, r3} @ 12-byte Folded Spill
; CHECK-NEXT: blt .LBB4_12
; CHECK-NEXT: @ %bb.1: @ %for.body.lr.ph
; CHECK-NEXT: ldr r1, [sp, #48]
diff --git a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
index 82a186bcc73d..c03339b52f26 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
@@ -1062,9 +1062,8 @@ define arm_aapcs_vfpcc void @_Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cf
; CHECK-NEXT: .pad #40
; CHECK-NEXT: sub sp, #40
; CHECK-NEXT: cmp r2, #8
-; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: vstr s0, [sp] @ 4-byte Spill
-; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: blo .LBB7_9
; CHECK-NEXT: @ %bb.1:
diff --git a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
index 219541cffb94..2e51e9e059f6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vldst4.ll
@@ -95,14 +95,13 @@ define void @vldst4(ptr nocapture readonly %pIn, ptr nocapture %pOut, i32 %numRo
; CHECK-NEXT: vmovx.f16 s8, s27
; CHECK-NEXT: vins.f16 s12, s24
; CHECK-NEXT: vins.f16 s13, s25
+; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s3, s11
; CHECK-NEXT: vins.f16 s1, s9
-; CHECK-NEXT: vins.f16 s2, s10
; CHECK-NEXT: vins.f16 s22, s8
; CHECK-NEXT: vmov q2, q3
-; CHECK-NEXT: vmov.f32 s17, s0
-; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov q6, q0
+; CHECK-NEXT: vmov.f32 s10, s4
; CHECK-NEXT: vmov.f32 s11, s7
; CHECK-NEXT: vmov.f32 s9, s0
; CHECK-NEXT: vmov.f32 s17, s2
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
index f28311e6563f..f9b175ed80fb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-optimisations.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts %s -o - | FileCheck %s
+# RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode -run-pass arm-mve-vpt-opts -verify-machineinstrs %s -o - | FileCheck %s
---
name: vcmp_with_opposite_cond
@@ -1021,3 +1021,26 @@ body: |
%16:mqpr = MVE_VORR %15, %15, 1, %10, $noreg, undef %16
%17:mqpr = MVE_VORR %16, %16, 1, %11, $noreg, undef %17
...
+---
+name: reuse_kill_flags
+alignment: 4
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: reuse_kill_flags
+ ; CHECK: [[t2MOVi:%[0-9]+]]:tgpreven = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vccr = COPY [[t2MOVi]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR:%[0-9]+]]:mqpr = MVE_VORR [[DEF]], [[DEF]], 1, [[COPY]], $noreg, undef [[MVE_VORR]]
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:mqpr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[MVE_VORR1:%[0-9]+]]:mqpr = MVE_VORR [[DEF1]], [[DEF1]], 1, killed [[COPY]], $noreg, undef [[MVE_VORR1]]
+ ; CHECK-NEXT: tBX_RET 14 /* CC::al */, $noreg, implicit [[DEF1]]
+ %0:tgpreven = t2MOVi 0, 14, $noreg, $noreg
+ %1:vccr = COPY %0:tgpreven
+ %2:mqpr = IMPLICIT_DEF
+ %3:mqpr = MVE_VORR %2:mqpr, %2:mqpr, 1, killed %1, $noreg, undef %3
+ %4:vccr = COPY %0:tgpreven
+ %5:mqpr = IMPLICIT_DEF
+ %6:mqpr = MVE_VORR %5:mqpr, %5:mqpr, 1, killed %4, $noreg, undef %6
+ tBX_RET 14 /* CC::al */, $noreg, implicit %5:mqpr
+
+...
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
index aa4d87756c87..4a63c812d6ae 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
@@ -59,12 +59,12 @@ entry:
%call = call i32 @setjmp(ptr %buf) #0
call void @longjmp(ptr %buf, i32 1) #1
unreachable
-; SJLJ: call saveSetjmp
+; SJLJ: call __wasm_setjmp
; SJLJ: i32.const emscripten_longjmp
; SJLJ-NOT: i32.const emscripten_longjmp_jmpbuf
; SJLJ: call invoke_vii
; SJLJ-NOT: call "__invoke_void_ptr_i32"
-; SJLJ: call testSetjmp
+; SJLJ: call __wasm_setjmp_test
; NONE: call setjmp
; NONE: call longjmp
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
index 7cf05cc922cd..32942cd92e68 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj.ll
@@ -49,7 +49,7 @@ try.cont: ; preds = %lpad, %entry
; longjmp checking part
; CHECK: if.then1:
-; CHECK: call i32 @testSetjmp
+; CHECK: call i32 @__wasm_setjmp_test
}
; @foo can either throw an exception or longjmp. Because this function doesn't
@@ -117,7 +117,6 @@ if.end: ; preds = %entry
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
@@ -147,7 +146,6 @@ throw: ; preds = %if.end, %entry
unreachable
; CHECK: throw:
-; CHECK-NEXT: call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__cxa_throw(ptr null, ptr null, ptr null)
; CHECK-NEXT: unreachable
}
@@ -208,7 +206,6 @@ return: ; preds = %entry, %if.end
; CHECK: rethrow.exn:
; CHECK-NEXT: %exn = call ptr @__cxa_find_matching_catch_2()
-; CHECK-NEXT: tail call void @free(ptr %setjmpTable{{.*}})
; CHECK-NEXT: call void @__resumeException(ptr %exn)
; CHECK-NEXT: unreachable
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
index 1a85a63e44ad..79ae16191d6b 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
@@ -12,7 +12,7 @@ target triple = "wasm32-unknown-emscripten"
; CHECK-LABEL: @malloc_test
define void @malloc_test() {
entry:
- ; CHECK: call ptr @malloc
+ ; CHECK: alloca i32
%retval = alloca i32, align 4
%jmp = alloca [1 x %struct.__jmp_buf_tag], align 16
store i32 0, ptr %retval, align 4
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
index 4f694151c761..fec9836a1607 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-debuginfo.ll
@@ -16,25 +16,22 @@ entry:
call void @foo(), !dbg !7
ret void, !dbg !8
; CHECK: entry:
- ; CHECK-NEXT: call ptr @malloc(i32 40), !dbg ![[DL0:.*]]
+ ; CHECK-NEXT: %functionInvocationId = alloca i32, align 4, !dbg ![[DL0:.*]]
; CHECK: entry.split:
; CHECK: alloca {{.*}}, !dbg ![[DL0]]
- ; CHECK: call ptr @saveSetjmp{{.*}}, !dbg ![[DL1:.*]]
- ; CHECK-NEXT: call i32 @getTempRet0{{.*}}, !dbg ![[DL1]]
+ ; CHECK: call void @__wasm_setjmp{{.*}}, !dbg ![[DL1:.*]]
; CHECK-NEXT: br {{.*}}, !dbg ![[DL2:.*]]
; CHECK: entry.split.split:
; CHECK: call {{.*}} void @__invoke_void{{.*}}, !dbg ![[DL2]]
; CHECK: entry.split.split.split:
- ; CHECK-NEXT: call void @free{{.*}}, !dbg ![[DL3:.*]]
; CHECK: if.then1:
- ; CHECK: call i32 @testSetjmp{{.*}}, !dbg ![[DL2]]
+ ; CHECK: call i32 @__wasm_setjmp_test{{.*}}, !dbg ![[DL2]]
; CHECK: if.end:
- ; CHECK: call i32 @getTempRet0{{.*}}, !dbg ![[DL2]]
; CHECK: call.em.longjmp:
; CHECK: call void @emscripten_longjmp{{.*}}, !dbg ![[DL2]]
@@ -43,26 +40,6 @@ entry:
; CHECK: call void @setTempRet0{{.*}}, !dbg ![[DL2]]
}
-; No instruction has debug info but the current function (setjmp_debug_info2)
-; and the called function (malloc / free) have DISubprograms, so the newly
-; generated calls should have debug info attached. We don't have an instruction
-; to take debug info from, so we create dummy debug info.
-define void @setjmp_debug_info1() !dbg !9 {
-; CHECK-LABEL: @setjmp_debug_info1
-entry:
- %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
- %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], ptr %buf, i32 0, i32 0
- %call = call i32 @setjmp(ptr %arraydecay) #0
- call void @foo()
- ret void
- ; CHECK: call ptr @malloc(i32 40), !dbg ![[DL_DUMMY:.*]]
- ; CHECK: call void @free{{.*}}, !dbg ![[DL_DUMMY]]
-}
-
-; Note that these functions have DISubprograms.
-declare !dbg !10 ptr @malloc(i32)
-declare !dbg !11 void @free(ptr)
-
declare void @foo()
; Function Attrs: returns_twice
declare i32 @setjmp(ptr) #0
@@ -79,9 +56,3 @@ declare i32 @setjmp(ptr) #0
!6 = !DILocation(line:4, scope: !3)
!7 = !DILocation(line:5, scope: !3)
!8 = !DILocation(line:6, scope: !3)
-!9 = distinct !DISubprogram(name: "setjmp_debug_info1", unit:!2, file: !1, line: 50)
-!10 = !DISubprogram(name: "malloc", file: !1, line: 10, isDefinition: false)
-!11 = !DISubprogram(name: "free", file: !1, line: 20, isDefinition: false)
-
-; Dummy debug info generated
-; CHECK: ![[DL_DUMMY]] = !DILocation(line: 50, column: 1, scope: !9)
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
index 7115b01ed161..27ec95a2c462 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj.ll
@@ -22,15 +22,12 @@ entry:
call void @longjmp(ptr %buf, i32 1) #1
unreachable
; CHECK: entry:
-; CHECK-NEXT: %[[MALLOCCALL:.*]] = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %[[MALLOCCALL]]
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE:.*]] = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %entry.split
; CHECK: entry.split
; CHECK-NEXT: %[[BUF:.*]] = alloca [1 x %struct.__jmp_buf_tag]
-; CHECK-NEXT: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(ptr %[[BUF]], i32 1, ptr %[[MALLOCCALL]], i32 %[[SETJMP_TABLE_SIZE]])
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %[[BUF]], i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
@@ -51,8 +48,7 @@ entry:
; CHECK: if.then1:
; CHECK-NEXT: %[[__THREW__VAL_P:.*]] = inttoptr [[PTR]] %[[__THREW__VAL]] to ptr
-; CHECK-NEXT: %[[__THREW__VAL_P_LOADED:.*]] = load [[PTR]], ptr %[[__THREW__VAL_P]]
-; CHECK-NEXT: %[[LABEL:.*]] = call i32 @testSetjmp([[PTR]] %[[__THREW__VAL_P_LOADED]], ptr %[[SETJMP_TABLE1]], i32 %[[SETJMP_TABLE_SIZE1]])
+; CHECK-NEXT: %[[LABEL:.*]] = call i32 @__wasm_setjmp_test(ptr %[[__THREW__VAL_P]], ptr %functionInvocationId)
; CHECK-NEXT: %[[CMP:.*]] = icmp eq i32 %[[LABEL]], 0
; CHECK-NEXT: br i1 %[[CMP]], label %call.em.longjmp, label %if.end2
@@ -69,7 +65,6 @@ entry:
; CHECK: call.em.longjmp:
; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %[[__THREW__VAL]], %if.then1 ]
; CHECK-NEXT: %threwvalue.phi = phi i32 [ %[[THREWVALUE_VAL]], %if.then1 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
@@ -87,13 +82,12 @@ entry:
call void @foo()
ret void
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE:.*]] = call ptr @saveSetjmp(
+; CHECK: call void @__wasm_setjmp(
; CHECK: entry.split.split:
; CHECK: @__invoke_void(ptr @foo)
; CHECK: entry.split.split.split:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE]])
; CHECK-NEXT: ret void
}
@@ -110,9 +104,8 @@ entry:
call void @foo()
ret void
; CHECK: call.em.longjmp:
-; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val4, %if.then15 ]
-; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val8, %if.then15 ]
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMP_TABLE1]])
+; CHECK-NEXT: %threw.phi = phi [[PTR]] [ %__THREW__.val, %if.then1 ], [ %__THREW__.val2, %if.then13 ]
+; CHECK-NEXT: %threwvalue.phi = phi i32 [ %__threwValue.val, %if.then1 ], [ %__threwValue.val6, %if.then13 ]
; CHECK-NEXT: call void @emscripten_longjmp([[PTR]] %threw.phi, i32 %threwvalue.phi)
; CHECK-NEXT: unreachable
}
@@ -145,7 +138,6 @@ entry:
%cmp = icmp sgt i32 %n, 5
br i1 %cmp, label %if.then, label %if.end
; CHECK: entry:
-; CHECK: %[[SETJMP_TABLE_SIZE0:.*]] = add i32 4, 0
if.then: ; preds = %entry
%0 = load i32, ptr @global_var, align 4
@@ -154,13 +146,10 @@ if.then: ; preds = %entry
br label %if.end
; CHECK: if.then:
; CHECK: %[[VAR0:.*]] = load i32, ptr @global_var, align 4
-; CHECK: %[[SETJMP_TABLE1:.*]] = call ptr @saveSetjmp(
-; CHECK-NEXT: %[[SETJMP_TABLE_SIZE1:.*]] = call i32 @getTempRet0()
+; CHECK: call void @__wasm_setjmp(
; CHECK: if.then.split:
-; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end3 ], [ %[[VAR0]], %if.then ]
-; CHECK: %[[SETJMP_TABLE_SIZE2:.*]] = phi i32 [ %[[SETJMP_TABLE_SIZE1]], %if.then ], [ %[[SETJMP_TABLE_SIZE3:.*]], %if.end3 ]
-; CHECK: %[[SETJMP_TABLE2:.*]] = phi ptr [ %[[SETJMP_TABLE1]], %if.then ], [ %[[SETJMP_TABLE3:.*]], %if.end3 ]
+; CHECK: %[[VAR1:.*]] = phi i32 [ %[[VAR2:.*]], %if.end1 ], [ %[[VAR0]], %if.then ]
; CHECK: store i32 %[[VAR1]], ptr @global_var, align 4
if.end: ; preds = %if.then, %entry
@@ -168,8 +157,6 @@ if.end: ; preds = %if.then, %entry
unreachable
; CHECK: if.end:
; CHECK: %[[VAR2]] = phi i32 [ %[[VAR1]], %if.then.split ], [ undef, %entry.split ]
-; CHECK: %[[SETJMP_TABLE_SIZE3]] = phi i32 [ %[[SETJMP_TABLE_SIZE2]], %if.then.split ], [ %[[SETJMP_TABLE_SIZE0]], %entry.split ]
-; CHECK: %[[SETJMP_TABLE3]] = phi ptr [ %[[SETJMP_TABLE2]], %if.then.split ], [ %setjmpTable, %entry.split ]
}
; Test a case when a function only calls other functions that are neither setjmp nor longjmp
@@ -296,8 +283,8 @@ declare void @free(ptr)
; JS glue functions and invoke wrappers declaration
; CHECK-DAG: declare i32 @getTempRet0()
; CHECK-DAG: declare void @setTempRet0(i32)
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @emscripten_longjmp([[PTR]], i32)
; CHECK-DAG: declare void @__invoke_void(ptr)
@@ -308,8 +295,8 @@ attributes #3 = { allocsize(0) }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="getTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { nounwind "wasm-import-module"="env" "wasm-import-name"="setTempRet0" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_void" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="saveSetjmp" }
-; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="testSetjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp" }
+; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__wasm_setjmp_test" }
; CHECK-DAG: attributes #{{[0-9]+}} = { noreturn "wasm-import-module"="env" "wasm-import-name"="emscripten_longjmp" }
; CHECK-DAG: attributes #{{[0-9]+}} = { "wasm-import-module"="env" "wasm-import-name"="__invoke_ptr_i32_ptr" }
; CHECK-DAG: attributes #[[ALLOCSIZE_ATTR]] = { allocsize(1) }
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
index 25471eb50081..bd8db83a0e57 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-ehsjlj.ll
@@ -108,8 +108,8 @@ catch: ; preds = %catch.start
call void @__cxa_end_catch() [ "funclet"(token %2) ]
catchret from %2 to label %catchret.dest
; CHECK: catch: ; preds = %catch.start
-; CHECK-NEXT: %exn = load ptr, ptr %exn.slot15, align 4
-; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #7 [ "funclet"(token %2) ]
+; CHECK-NEXT: %exn = load ptr, ptr %exn.slot6, align 4
+; CHECK-NEXT: %5 = call ptr @__cxa_begin_catch(ptr %exn) #6 [ "funclet"(token %2) ]
; CHECK-NEXT: invoke void @__cxa_end_catch() [ "funclet"(token %2) ]
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -265,7 +265,7 @@ ehcleanup: ; preds = %entry
; (cleanuppad), whose parent is 'none', so we should unwind directly to
; %catch.dispatch.longjmp.
%call2 = call noundef ptr @_ZN4TempD2Ev(ptr noundef %t) #2 [ "funclet"(token %0) ]
-; CHECK: %call13 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
+; CHECK: %call11 = invoke {{.*}} ptr @_ZN4TempD2Ev(ptr
; CHECK-NEXT: to label {{.*}} unwind label %catch.dispatch.longjmp
cleanupret from %0 unwind to caller
}
diff --git a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
index b8d2230fac9f..82c04e24b72f 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-wasm-sjlj.ll
@@ -25,16 +25,12 @@ entry:
unreachable
; CHECK: entry:
-; CHECK-NEXT: %setjmpTable = tail call ptr @malloc([[PTR]] 40)
-; CHECK-NEXT: store i32 0, ptr %setjmpTable, align 4
-; CHECK-NEXT: %setjmpTableSize = add i32 4, 0
+; CHECK-NEXT: %functionInvocationId = alloca i32, align 4
; CHECK-NEXT: br label %setjmp.dispatch
; CHECK: setjmp.dispatch:
; CHECK-NEXT: %[[VAL2:.*]] = phi i32 [ %val, %if.end ], [ undef, %entry ]
; CHECK-NEXT: %[[BUF:.*]] = phi ptr [ %[[BUF2:.*]], %if.end ], [ undef, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE2:.*]] = phi i32 [ %[[SETJMPTABLESIZE3:.*]], %if.end ], [ %setjmpTableSize, %entry ]
-; CHECK-NEXT: %[[SETJMPTABLE2:.*]] = phi ptr [ %[[SETJMPTABLE3:.*]], %if.end ], [ %setjmpTable, %entry ]
; CHECK-NEXT: %label.phi = phi i32 [ %label, %if.end ], [ -1, %entry ]
; CHECK-NEXT: switch i32 %label.phi, label %entry.split [
; CHECK-NEXT: i32 1, label %entry.split.split
@@ -42,14 +38,11 @@ entry:
; CHECK: entry.split:
; CHECK-NEXT: %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-; CHECK-NEXT: %[[SETJMPTABLE4:.*]] = call ptr @saveSetjmp(ptr %buf, i32 1, ptr %[[SETJMPTABLE2]], i32 %[[SETJMPTABLESIZE2]])
-; CHECK-NEXT: %[[SETJMPTABLESIZE4:.*]] = call i32 @getTempRet0()
+; CHECK-NEXT: call void @__wasm_setjmp(ptr %buf, i32 1, ptr %functionInvocationId)
; CHECK-NEXT: br label %entry.split.split
; CHECK: entry.split.split:
; CHECK-NEXT: %[[BUF2]] = phi ptr [ %[[BUF]], %setjmp.dispatch ], [ %buf, %entry.split ]
-; CHECK-NEXT: %[[SETJMPTABLESIZE3]] = phi i32 [ %[[SETJMPTABLESIZE4]], %entry.split ], [ %[[SETJMPTABLESIZE2]], %setjmp.dispatch ]
-; CHECK-NEXT: %[[SETJMPTABLE3]] = phi ptr [ %[[SETJMPTABLE4]], %entry.split ], [ %[[SETJMPTABLE2]], %setjmp.dispatch ]
; CHECK-NEXT: %setjmp.ret = phi i32 [ 0, %entry.split ], [ %[[VAL2]], %setjmp.dispatch ]
; CHECK-NEXT: invoke void @__wasm_longjmp(ptr %[[BUF2]], i32 1)
; CHECK-NEXT: to label %.noexc unwind label %catch.dispatch.longjmp
@@ -67,13 +60,11 @@ entry:
; CHECK-NEXT: %val_gep = getelementptr { ptr, i32 }, ptr %thrown, i32 0, i32 1
; CHECK-NEXT: %env = load ptr, ptr %env_gep, align {{.*}}
; CHECK-NEXT: %val = load i32, ptr %val_gep, align 4
-; CHECK-NEXT: %setjmp.id = load [[PTR]], ptr %env, align {{.*}}
-; CHECK-NEXT: %label = call i32 @testSetjmp([[PTR]] %setjmp.id, ptr %[[SETJMPTABLE3]], i32 %[[SETJMPTABLESIZE3]]) [ "funclet"(token %1) ]
+; CHECK-NEXT: %label = call i32 @__wasm_setjmp_test(ptr %env, ptr %functionInvocationId) [ "funclet"(token %1) ]
; CHECK-NEXT: %2 = icmp eq i32 %label, 0
; CHECK-NEXT: br i1 %2, label %if.then, label %if.end
; CHECK: if.then:
-; CHECK-NEXT: tail call void @free(ptr %[[SETJMPTABLE3]]) [ "funclet"(token %1) ]
; CHECK-NEXT: call void @__wasm_longjmp(ptr %env, i32 %val) [ "funclet"(token %1) ]
; CHECK-NEXT: unreachable
@@ -142,10 +133,9 @@ declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
declare void @free(ptr)
-; JS glue function declarations
-; CHECK-DAG: declare i32 @getTempRet0()
-; CHECK-DAG: declare ptr @saveSetjmp(ptr, i32, ptr, i32)
-; CHECK-DAG: declare i32 @testSetjmp([[PTR]], ptr, i32)
+; Runtime glue function declarations
+; CHECK-DAG: declare void @__wasm_setjmp(ptr, i32, ptr)
+; CHECK-DAG: declare i32 @__wasm_setjmp_test(ptr, ptr)
; CHECK-DAG: declare void @__wasm_longjmp(ptr, i32)
attributes #0 = { returns_twice }
diff --git a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
index 535450a52ff6..695a2d0cd806 100644
--- a/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
+++ b/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
@@ -9,11 +9,11 @@ define <2 x i64> @_mm_insert_epi16(<2 x i64> %a, i32 %b, i32 %imm) nounwind read
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-16, %esp
; X86-NEXT: subl $32, %esp
-; X86-NEXT: movzwl 8(%ebp), %eax
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: andl $7, %ecx
+; X86-NEXT: movl 12(%ebp), %eax
+; X86-NEXT: movzwl 8(%ebp), %ecx
+; X86-NEXT: andl $7, %eax
; X86-NEXT: movaps %xmm0, (%esp)
-; X86-NEXT: movw %ax, (%esp,%ecx,2)
+; X86-NEXT: movw %cx, (%esp,%eax,2)
; X86-NEXT: movaps (%esp), %xmm0
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
diff --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
new file mode 100644
index 000000000000..e0fb0fc9d11d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-icmp-vec.mir
@@ -0,0 +1,25 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - 2>%t | FileCheck %s
+# RUN: FileCheck -check-prefix=ILLEGAL %s < %t
+
+# ILLEGAL: remark: <unknown>:0:0: unable to legalize instruction: %2:_(<4 x s1>) = G_ICMP intpred(sle), %0:_(<4 x s64>), %1:_ (in function: test_icmp_v4i64)
+
+# PR86203
+---
+name: test_icmp_v4i64
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: test_icmp_v4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<4 x s1>) = G_ICMP intpred(sle), [[DEF]](<4 x s64>), [[DEF1]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[ICMP]](<4 x s1>)
+ ; CHECK-NEXT: $xmm0 = COPY [[ANYEXT]](<4 x s32>)
+ ; CHECK-NEXT: RET 0, implicit $xmm0
+ %0:_(<4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<4 x s64>) = G_IMPLICIT_DEF
+ %3:_(<4 x s1>) = G_ICMP intpred(sle), %0(<4 x s64>), %1
+ %4:_(<4 x s32>) = G_ANYEXT %3(<4 x s1>)
+ $xmm0 = COPY %4(<4 x s32>)
+ RET 0, implicit $xmm0
+...
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
index ea548c296dca..20b8b671ac5a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-trap.mir
@@ -23,6 +23,6 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: trap
; CHECK: TRAP
- G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.trap)
+ G_TRAP
...
diff --git a/llvm/test/CodeGen/X86/addcarry.ll b/llvm/test/CodeGen/X86/addcarry.ll
index 3fc4ed99fad0..f8d32fc2d292 100644
--- a/llvm/test/CodeGen/X86/addcarry.ll
+++ b/llvm/test/CodeGen/X86/addcarry.ll
@@ -1490,3 +1490,26 @@ define { i64, i64 } @addcarry_commutative_2(i64 %x0, i64 %x1, i64 %y0, i64 %y1)
%r1 = insertvalue { i64, i64 } %r0, i64 %b1s, 1
ret { i64, i64 } %r1
}
+
+define i1 @pr84831(i64 %arg) {
+; CHECK-LABEL: pr84831:
+; CHECK: # %bb.0:
+; CHECK-NEXT: testq %rdi, %rdi
+; CHECK-NEXT: setne %al
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: addb $-1, %al
+; CHECK-NEXT: adcq $1, %rcx
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: retq
+ %a = icmp ult i64 0, %arg
+ %add1 = add i64 0, 1
+ %carryout1 = icmp ult i64 %add1, 0
+ %b = zext i1 %a to i64
+ %add2 = add i64 %add1, %b
+ %carryout2 = icmp ult i64 %add2, %add1
+ %zc1 = zext i1 %carryout1 to i63
+ %zc2 = zext i1 %carryout2 to i63
+ %or = or i63 %zc1, %zc2
+ %trunc = trunc i63 %or to i1
+ ret i1 %trunc
+}
diff --git a/llvm/test/CodeGen/X86/allow-check.ll b/llvm/test/CodeGen/X86/allow-check.ll
index 3f0bb1837b22..602e5a94ee3e 100644
--- a/llvm/test/CodeGen/X86/allow-check.ll
+++ b/llvm/test/CodeGen/X86/allow-check.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64 -global-isel | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64 -fast-isel | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=1 -fast-isel=0 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -global-isel=0 -fast-isel=1 | FileCheck %s
define i1 @test_runtime() local_unnamed_addr {
; CHECK-LABEL: test_runtime:
diff --git a/llvm/test/CodeGen/X86/domain-reassignment-ndd.mir b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
index dcd435619990..7352aa2b307f 100644
--- a/llvm/test/CodeGen/X86/domain-reassignment-ndd.mir
+++ b/llvm/test/CodeGen/X86/apx/domain-reassignment.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -run-pass x86-domain-reassignment -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq -o - %s | FileCheck %s
+# RUN: llc -run-pass x86-domain-reassignment -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+ndd -o - %s | FileCheck %s
--- |
; ModuleID = '../test/CodeGen/X86/gpr-to-mask.ll'
source_filename = "../test/CodeGen/X86/gpr-to-mask.ll"
@@ -302,13 +302,13 @@ body: |
%6 = COPY %5
%7 = COPY %6.sub_8bit
- %12 = SHR8ri %7, 2, implicit-def dead $eflags
- %13 = SHL8ri %12, 1, implicit-def dead $eflags
- %14 = NOT8r %13
- %15 = OR8rr %14, %12, implicit-def dead $eflags
- %16 = AND8rr %15, %13, implicit-def dead $eflags
- %17 = XOR8rr %16, %12, implicit-def dead $eflags
- %18 = ADD8rr %17, %14, implicit-def dead $eflags
+ %12 = SHR8ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL8ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT8r_ND %13
+ %15 = OR8rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND8rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR8rr_ND %16, %12, implicit-def dead $eflags
+ %18 = ADD8rr_ND %17, %14, implicit-def dead $eflags
%8 = IMPLICIT_DEF
%9 = INSERT_SUBREG %8, %18, %subreg.sub_8bit_hi
@@ -421,12 +421,12 @@ body: |
%6 = COPY %5
%7 = COPY %6.sub_16bit
- %12 = SHR16ri %7, 2, implicit-def dead $eflags
- %13 = SHL16ri %12, 1, implicit-def dead $eflags
- %14 = NOT16r %13
- %15 = OR16rr %14, %12, implicit-def dead $eflags
- %16 = AND16rr %15, %13, implicit-def dead $eflags
- %17 = XOR16rr %16, %12, implicit-def dead $eflags
+ %12 = SHR16ri_ND %7, 2, implicit-def dead $eflags
+ %13 = SHL16ri_ND %12, 1, implicit-def dead $eflags
+ %14 = NOT16r_ND %13
+ %15 = OR16rr_ND %14, %12, implicit-def dead $eflags
+ %16 = AND16rr_ND %15, %13, implicit-def dead $eflags
+ %17 = XOR16rr_ND %16, %12, implicit-def dead $eflags
%8 = IMPLICIT_DEF
%9 = INSERT_SUBREG %8, %17, %subreg.sub_16bit
@@ -524,14 +524,14 @@ body: |
%2 = COPY $zmm1
%5 = MOV32rm %0, 1, $noreg, 0, $noreg
- %6 = SHR32ri %5, 2, implicit-def dead $eflags
- %7 = SHL32ri %6, 1, implicit-def dead $eflags
- %8 = NOT32r %7
- %9 = OR32rr %8, %6, implicit-def dead $eflags
- %10 = AND32rr %9, %7, implicit-def dead $eflags
- %11 = XOR32rr %10, %6, implicit-def dead $eflags
+ %6 = SHR32ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL32ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT32r_ND %7
+ %9 = OR32rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND32rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR32rr_ND %10, %6, implicit-def dead $eflags
%12 = ANDN32rr %11, %9, implicit-def dead $eflags
- %13 = ADD32rr %12, %11, implicit-def dead $eflags
+ %13 = ADD32rr_ND %12, %11, implicit-def dead $eflags
%3 = COPY %13
%4 = VMOVDQU16Zrrk %2, killed %3, %1
@@ -627,14 +627,14 @@ body: |
%2 = COPY $zmm1
%5 = MOV64rm %0, 1, $noreg, 0, $noreg
- %6 = SHR64ri %5, 2, implicit-def dead $eflags
- %7 = SHL64ri %6, 1, implicit-def dead $eflags
- %8 = NOT64r %7
- %9 = OR64rr %8, %6, implicit-def dead $eflags
- %10 = AND64rr %9, %7, implicit-def dead $eflags
- %11 = XOR64rr %10, %6, implicit-def dead $eflags
+ %6 = SHR64ri_ND %5, 2, implicit-def dead $eflags
+ %7 = SHL64ri_ND %6, 1, implicit-def dead $eflags
+ %8 = NOT64r_ND %7
+ %9 = OR64rr_ND %8, %6, implicit-def dead $eflags
+ %10 = AND64rr_ND %9, %7, implicit-def dead $eflags
+ %11 = XOR64rr_ND %10, %6, implicit-def dead $eflags
%12 = ANDN64rr %11, %9, implicit-def dead $eflags
- %13 = ADD64rr %12, %11, implicit-def dead $eflags
+ %13 = ADD64rr_ND %12, %11, implicit-def dead $eflags
%3 = COPY %13
%4 = VMOVDQU8Zrrk %2, killed %3, %1
@@ -712,7 +712,7 @@ body: |
%2 = COPY $zmm1
%5 = MOVZX16rm8 %0, 1, $noreg, 0, $noreg
- %6 = NOT16r %5
+ %6 = NOT16r_ND %5
%3 = COPY %6
%4 = VMOVAPSZrrk %2, killed %3, %1
@@ -785,7 +785,7 @@ body: |
%5 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg
%6 = MOVZX32rm16 %0, 1, $noreg, 0, $noreg
- %7 = ADD32rr %5, %6, implicit-def dead $eflags
+ %7 = ADD32rr_ND %5, %6, implicit-def dead $eflags
%3 = COPY %7
%4 = VMOVDQU16Zrrk %2, killed %3, %1
@@ -858,7 +858,7 @@ body: |
%5 = MOVZX64rm8 %0, 1, $noreg, 0, $noreg
%6 = MOVZX64rm16 %0, 1, $noreg, 0, $noreg
- %7 = ADD64rr %5, %6, implicit-def dead $eflags
+ %7 = ADD64rr_ND %5, %6, implicit-def dead $eflags
%3 = COPY %7
%4 = VMOVDQU8Zrrk %2, killed %3, %1
diff --git a/llvm/test/CodeGen/X86/apx/foldimmediate.mir b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
new file mode 100644
index 000000000000..310fc64841f7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/apx/foldimmediate.mir
@@ -0,0 +1,70 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt %s -o - | FileCheck %s
+--- |
+ define void @foldImmediate() { ret void }
+...
+---
+# Check that immediates can be folded into ALU instructions.
+name: foldImmediate
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+ - { id: 4, class: gr32 }
+ - { id: 5, class: gr32 }
+ - { id: 6, class: gr32 }
+ - { id: 7, class: gr64 }
+ - { id: 8, class: gr64 }
+ - { id: 9, class: gr64 }
+ - { id: 10, class: gr64 }
+ - { id: 11, class: gr64 }
+ - { id: 12, class: gr64 }
+ - { id: 13, class: gr64 }
+ - { id: 14, class: gr64 }
+ - { id: 15, class: gr64 }
+ - { id: 16, class: gr32 }
+ - { id: 17, class: gr64 }
+ - { id: 18, class: gr32 }
+
+body: |
+ bb.0:
+ liveins: $rdi, $rsi
+
+ ; CHECK-LABEL: name: foldImmediate
+ ; CHECK: liveins: $rdi, $rsi
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 81
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK-NEXT: CTEST32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP32ri [[COPY]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[MOV32ri]], %subreg.sub_32bit
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK-NEXT: CTEST64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64ri32 [[COPY1]], 81, 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ ; CHECK-NEXT: CCMP64rr [[SUBREG_TO_REG]], [[COPY1]], 2, 10, implicit-def $eflags, implicit $eflags
+ ; CHECK-NEXT: NOOP implicit $eflags
+ %0 = MOV32ri 81
+ %1 = COPY $edi
+
+ CTEST32rr %0, %1, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP32rr %1, %0, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ %7 = SUBREG_TO_REG 0, killed %0:gr32, %subreg.sub_32bit
+ %8 = COPY $rsi
+
+ CTEST64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+
+ CCMP64rr %8, %7, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+ CCMP64rr %7, %8, 2, 10, implicit-def $eflags, implicit $eflags
+ NOOP implicit $eflags
+...
diff --git a/llvm/test/CodeGen/X86/avgceilu.ll b/llvm/test/CodeGen/X86/avgceilu.ll
index 3a74fca23773..dee1a5a720f9 100644
--- a/llvm/test/CodeGen/X86/avgceilu.ll
+++ b/llvm/test/CodeGen/X86/avgceilu.ll
@@ -319,18 +319,11 @@ define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) {
;
; AVX1-LABEL: test_fixed_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i8:
@@ -392,15 +385,11 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) {
;
; AVX1-LABEL: test_fixed_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpavgw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i16:
@@ -959,29 +948,16 @@ define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) {
;
; AVX1-LABEL: test_fixed_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm6
-; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubb %xmm6, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v64i8:
@@ -1054,24 +1030,16 @@ define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) {
;
; AVX1-LABEL: test_fixed_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubw %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpavgw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpavgw %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpavgw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i16:
diff --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index 86c58c4715ed..0dded37c97af 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -45,6 +45,7 @@ liveins:
- { reg: '$rsi', virtual-reg: '%3' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/CodeGen/X86/combine-pavg.ll b/llvm/test/CodeGen/X86/combine-pavg.ll
index 9bb7fec7eeac..7a8ddf5178d3 100644
--- a/llvm/test/CodeGen/X86/combine-pavg.ll
+++ b/llvm/test/CodeGen/X86/combine-pavg.ll
@@ -80,3 +80,33 @@ define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
%trunc = trunc <16 x i16> %shuffle to <16 x i8>
ret <16 x i8> %trunc
}
+
+define <8 x i16> @combine_pavgw_demandedelts(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_pavgw_demandedelts:
+; SSE: # %bb.0:
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; SSE-NEXT: pavgw %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: combine_pavgw_demandedelts:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,0,1,0,1,8,9,8,9,12,13,12,13]
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_pavgw_demandedelts:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %s0 = shufflevector <8 x i16> %a0, <8 x i16> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %avg = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %s0, <8 x i16> %a1)
+ %shuffle = shufflevector <8 x i16> %avg, <8 x i16> poison, <8 x i32> zeroinitializer
+ ret <8 x i16> %shuffle
+}
+
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 42b325dd4c22..734abfe55a4e 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -322,5 +322,132 @@ define void @g(i32 %a) nounwind {
ret void
}
+define i32 @shift_zext_shl(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i16
+ %c = shl i16 %b, 9
+ %d = zext i16 %c to i32
+ ret i32 %d
+}
+
+define i32 @shift_zext_shl2(i8 zeroext %x) {
+; X86-LABEL: shift_zext_shl2:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $64, %eax
+; X86-NEXT: shll $9, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shift_zext_shl2:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $64, %eax
+; X64-NEXT: shll $9, %eax
+; X64-NEXT: retq
+ %a = and i8 %x, 64
+ %b = zext i8 %a to i32
+ %c = shl i32 %b, 9
+ ret i32 %c
+}
+
+define <4 x i32> @shift_zext_shl_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i16>
+ %c = shl <4 x i16> %b, <i16 9, i16 8, i16 7, i16 6>
+ %d = zext <4 x i16> %c to <4 x i32>
+ ret <4 x i32> %d
+}
+
+define <4 x i32> @shift_zext_shl2_vec(<4 x i8> %x) nounwind {
+; X86-LABEL: shift_zext_shl2_vec:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: andl $23, %edi
+; X86-NEXT: andl $31, %esi
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: andl $64, %ecx
+; X86-NEXT: shll $9, %ecx
+; X86-NEXT: shll $8, %edx
+; X86-NEXT: shll $7, %esi
+; X86-NEXT: shll $6, %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X64-LABEL: shift_zext_shl2_vec:
+; X64: # %bb.0:
+; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pxor %xmm1, %xmm1
+; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT: retq
+ %a = and <4 x i8> %x, <i8 64, i8 63, i8 31, i8 23>
+ %b = zext <4 x i8> %a to <4 x i32>
+ %c = shl <4 x i32> %b, <i32 9, i32 8, i32 7, i32 6>
+ ret <4 x i32> %c
+}
+
declare dso_local void @f(i64)
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 9d573ef2a8fa..022b25a24153 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
@@ -7,23 +7,16 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define i32 @t(ptr %val) nounwind {
-; X32-SSE2-LABEL: t:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl 8(%eax), %eax
+; X86-SSE2-NEXT: retl
;
-; X64-SSSE3-LABEL: t:
-; X64-SSSE3: # %bb.0:
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: retq
-;
-; X64-AVX-LABEL: t:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movl 8(%rdi), %eax
-; X64-AVX-NEXT: retq
+; X64-LABEL: t:
+; X64: # %bb.0:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: retq
%tmp2 = load <2 x i64>, ptr %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
@@ -33,9 +26,9 @@ define i32 @t(ptr %val) nounwind {
; Case where extractelement of load ends up as undef.
; (Making sure this doesn't crash.)
define i32 @t2(ptr %xp) {
-; X32-SSE2-LABEL: t2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
@@ -51,12 +44,12 @@ define i32 @t2(ptr %xp) {
; narrow load.
define void @t3(ptr %a0) {
-; X32-SSE2-LABEL: t3:
-; X32-SSE2: # %bb.0: # %bb
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movups (%eax), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t3:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movups (%eax), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t3:
; X64-SSSE3: # %bb.0: # %bb
@@ -81,14 +74,12 @@ bb:
; This is testing for an assertion - the extraction was assuming that the undef
; second shuffle operand was a post-bitcast type instead of a pre-bitcast type.
define i64 @t4(ptr %a) {
-; X32-SSE2-LABEL: t4:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm0
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %edx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t4:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movl 4(%ecx), %edx
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
@@ -103,13 +94,13 @@ define i64 @t4(ptr %a) {
; Don't extract from a volatile.
define void @t5(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: t5:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps (%ecx), %xmm0
-; X32-SSE2-NEXT: movhps %xmm0, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t5:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps (%ecx), %xmm0
+; X86-SSE2-NEXT: movhps %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t5:
; X64-SSSE3: # %bb.0:
@@ -130,24 +121,24 @@ define void @t5(ptr%a0, ptr%a1) {
; Check for multiuse.
define float @t6(ptr%a0) {
-; X32-SSE2-LABEL: t6:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: .cfi_def_cfa_offset 4
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: t6:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: t6:
; X64-SSSE3: # %bb.0:
@@ -184,20 +175,20 @@ define float @t6(ptr%a0) {
}
define void @PR43971(ptr%a0, ptr%a1) {
-; X32-SSE2-LABEL: PR43971:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movaps 16(%ecx), %xmm0
-; X32-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpltss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%eax)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movaps 16(%ecx), %xmm0
+; X86-SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpltss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%eax)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971:
; X64-SSSE3: # %bb.0: # %entry
@@ -231,22 +222,22 @@ entry:
}
define float @PR43971_1(ptr%a0) nounwind {
-; X32-SSE2-LABEL: PR43971_1:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: pushl %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movaps (%eax), %xmm0
-; X32-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: xorps %xmm1, %xmm1
-; X32-SSE2-NEXT: cmpeqss %xmm0, %xmm1
-; X32-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
-; X32-SSE2-NEXT: andps %xmm1, %xmm2
-; X32-SSE2-NEXT: andnps %xmm0, %xmm1
-; X32-SSE2-NEXT: orps %xmm2, %xmm1
-; X32-SSE2-NEXT: movss %xmm1, (%esp)
-; X32-SSE2-NEXT: flds (%esp)
-; X32-SSE2-NEXT: popl %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR43971_1:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movaps (%eax), %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: xorps %xmm1, %xmm1
+; X86-SSE2-NEXT: cmpeqss %xmm0, %xmm1
+; X86-SSE2-NEXT: movss {{.*#+}} xmm2 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: andps %xmm1, %xmm2
+; X86-SSE2-NEXT: andnps %xmm0, %xmm1
+; X86-SSE2-NEXT: orps %xmm2, %xmm1
+; X86-SSE2-NEXT: movss %xmm1, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: PR43971_1:
; X64-SSSE3: # %bb.0: # %entry
@@ -283,17 +274,48 @@ entry:
ret float %cond
}
+define i32 @PR85419(ptr %p0) {
+; X86-SSE2-LABEL: PR85419:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %edx
+; X86-SSE2-NEXT: xorl %eax, %eax
+; X86-SSE2-NEXT: orl 4(%ecx), %edx
+; X86-SSE2-NEXT: je .LBB8_2
+; X86-SSE2-NEXT: # %bb.1:
+; X86-SSE2-NEXT: movl 8(%ecx), %eax
+; X86-SSE2-NEXT: .LBB8_2:
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: PR85419:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq $0, (%rdi)
+; X64-NEXT: je .LBB8_2
+; X64-NEXT: # %bb.1:
+; X64-NEXT: movl 8(%rdi), %eax
+; X64-NEXT: .LBB8_2:
+; X64-NEXT: retq
+ %load = load <2 x i64>, ptr %p0, align 16
+ %vecext.i = extractelement <2 x i64> %load, i64 0
+ %cmp = icmp eq i64 %vecext.i, 0
+ %.cast = bitcast <2 x i64> %load to <4 x i32>
+ %vecext.i2 = extractelement <4 x i32> %.cast, i64 2
+ %retval.0 = select i1 %cmp, i32 0, i32 %vecext.i2
+ ret i32 %retval.0
+}
+
; Test for bad extractions from a VBROADCAST_LOAD of the <2 x i16> non-uniform constant bitcast as <4 x i32>.
define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture %1, ptr nocapture %2) nounwind {
-; X32-SSE2-LABEL: subextract_broadcast_load_constant:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
-; X32-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
-; X32-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: subextract_broadcast_load_constant:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl $-1583308898, (%edx) # imm = 0xA1A09F9E
+; X86-SSE2-NEXT: movw $-24674, (%ecx) # imm = 0x9F9E
+; X86-SSE2-NEXT: movw $-24160, (%eax) # imm = 0xA1A0
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: subextract_broadcast_load_constant:
; X64: # %bb.0:
@@ -319,15 +341,15 @@ define void @subextract_broadcast_load_constant(ptr nocapture %0, ptr nocapture
; A scalar load is favored over a XMM->GPR register transfer in this example.
define i32 @multi_use_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl (%ecx), %eax
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %eax
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -354,15 +376,15 @@ define i32 @multi_use_load_scalarization(ptr %p) nounwind {
}
define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
-; X32-SSE2-LABEL: multi_use_volatile_load_scalarization:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movdqu (%ecx), %xmm0
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: psubd %xmm1, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, (%ecx)
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: multi_use_volatile_load_scalarization:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-SSE2-NEXT: movd %xmm0, %eax
+; X86-SSE2-NEXT: psubd %xmm1, %xmm0
+; X86-SSE2-NEXT: movdqa %xmm0, (%ecx)
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: multi_use_volatile_load_scalarization:
; X64-SSSE3: # %bb.0:
@@ -398,41 +420,41 @@ define i32 @multi_use_volatile_load_scalarization(ptr %p) nounwind {
@zero = internal unnamed_addr global <8 x i32> zeroinitializer, align 32
define i32 @main() nounwind {
-; X32-SSE2-LABEL: main:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebp
-; X32-SSE2-NEXT: movl %esp, %ebp
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: andl $-32, %esp
-; X32-SSE2-NEXT: subl $64, %esp
-; X32-SSE2-NEXT: movdqa zero, %xmm0
-; X32-SSE2-NEXT: movaps n1+16, %xmm1
-; X32-SSE2-NEXT: movaps n1, %xmm2
-; X32-SSE2-NEXT: movaps %xmm2, zero
-; X32-SSE2-NEXT: movaps %xmm1, zero+16
-; X32-SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X32-SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: movaps %xmm1, (%esp)
-; X32-SSE2-NEXT: movdqa (%esp), %xmm1
-; X32-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X32-SSE2-NEXT: movd %xmm2, %ecx
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %ecx
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X32-SSE2-NEXT: movd %xmm0, %esi
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: divl %esi
-; X32-SSE2-NEXT: addl %ecx, %eax
-; X32-SSE2-NEXT: leal -4(%ebp), %esp
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %ebp
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: main:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: andl $-32, %esp
+; X86-SSE2-NEXT: subl $64, %esp
+; X86-SSE2-NEXT: movaps n1+16, %xmm0
+; X86-SSE2-NEXT: movaps n1, %xmm1
+; X86-SSE2-NEXT: movl zero+4, %ecx
+; X86-SSE2-NEXT: movl zero+8, %eax
+; X86-SSE2-NEXT: movaps %xmm1, zero
+; X86-SSE2-NEXT: movaps %xmm0, zero+16
+; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X86-SSE2-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movaps %xmm0, (%esp)
+; X86-SSE2-NEXT: movdqa (%esp), %xmm0
+; X86-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X86-SSE2-NEXT: movd %xmm1, %esi
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %esi
+; X86-SSE2-NEXT: movl %eax, %esi
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE2-NEXT: movd %xmm0, %edi
+; X86-SSE2-NEXT: movl %ecx, %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: divl %edi
+; X86-SSE2-NEXT: addl %esi, %eax
+; X86-SSE2-NEXT: leal -8(%ebp), %esp
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
;
; X64-SSSE3-LABEL: main:
; X64-SSSE3: # %bb.0:
@@ -440,31 +462,29 @@ define i32 @main() nounwind {
; X64-SSSE3-NEXT: movq %rsp, %rbp
; X64-SSSE3-NEXT: andq $-32, %rsp
; X64-SSSE3-NEXT: subq $64, %rsp
-; X64-SSSE3-NEXT: movdqa zero(%rip), %xmm0
; X64-SSSE3-NEXT: movq n1@GOTPCREL(%rip), %rax
-; X64-SSSE3-NEXT: movaps (%rax), %xmm1
-; X64-SSSE3-NEXT: movaps 16(%rax), %xmm2
-; X64-SSSE3-NEXT: movaps %xmm1, zero(%rip)
-; X64-SSSE3-NEXT: movaps %xmm2, zero+16(%rip)
-; X64-SSSE3-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
-; X64-SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
-; X64-SSSE3-NEXT: movaps %xmm1, (%rsp)
-; X64-SSSE3-NEXT: movdqa (%rsp), %xmm1
-; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X64-SSSE3-NEXT: movd %xmm2, %ecx
+; X64-SSSE3-NEXT: movaps (%rax), %xmm0
+; X64-SSSE3-NEXT: movaps 16(%rax), %xmm1
+; X64-SSSE3-NEXT: movl zero+4(%rip), %ecx
+; X64-SSSE3-NEXT: movl zero+8(%rip), %eax
+; X64-SSSE3-NEXT: movaps %xmm0, zero(%rip)
+; X64-SSSE3-NEXT: movaps %xmm1, zero+16(%rip)
+; X64-SSSE3-NEXT: movaps {{.*#+}} xmm0 = [2,2,2,2]
+; X64-SSSE3-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSSE3-NEXT: movaps %xmm0, (%rsp)
+; X64-SSSE3-NEXT: movdqa (%rsp), %xmm0
+; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; X64-SSSE3-NEXT: movd %xmm1, %esi
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %ecx
-; X64-SSSE3-NEXT: movl %eax, %ecx
+; X64-SSSE3-NEXT: divl %esi
+; X64-SSSE3-NEXT: movl %eax, %esi
; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %eax
-; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; X64-SSSE3-NEXT: movd %xmm0, %esi
+; X64-SSSE3-NEXT: movd %xmm0, %edi
+; X64-SSSE3-NEXT: movl %ecx, %eax
; X64-SSSE3-NEXT: xorl %edx, %edx
-; X64-SSSE3-NEXT: divl %esi
-; X64-SSSE3-NEXT: addl %ecx, %eax
+; X64-SSSE3-NEXT: divl %edi
+; X64-SSSE3-NEXT: addl %esi, %eax
; X64-SSSE3-NEXT: movq %rbp, %rsp
; X64-SSSE3-NEXT: popq %rbp
; X64-SSSE3-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset.ll b/llvm/test/CodeGen/X86/huge-stack-offset.ll
index 68dcfa748b0c..e825328ccd89 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-linux-unknown | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -mtriple=i386-linux-unknown | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -mtriple=x86_64-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-64
+; RUN: llc < %s -mtriple=i386-linux-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-32
; Test that a large stack offset uses a single add/sub instruction to
; adjust the stack pointer.
diff --git a/llvm/test/CodeGen/X86/huge-stack-offset2.ll b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
index 3bf0260cc12a..053643eb3686 100644
--- a/llvm/test/CodeGen/X86/huge-stack-offset2.ll
+++ b/llvm/test/CodeGen/X86/huge-stack-offset2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s --check-prefix=CHECK
; Test how we handle pathologically large stack frames when RAX is live through
; the prologue and epilogue.
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index 8ed8495d7a46..5420e6b5ce86 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1009,18 +1009,19 @@ define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $3, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $3, %eax
-; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $3, %ecx
+; X86AVX2-NEXT: movl %eax, 16(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -1362,12 +1363,13 @@ define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
@@ -1742,18 +1744,19 @@ define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
-; X86AVX2-NEXT: movl 8(%ebp), %eax
-; X86AVX2-NEXT: movl 12(%ebp), %ecx
-; X86AVX2-NEXT: movl 16(%ebp), %edx
+; X86AVX2-NEXT: movl 8(%ebp), %edx
+; X86AVX2-NEXT: movl 12(%ebp), %eax
+; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%edx,%edx), %esi
+; X86AVX2-NEXT: addl %ecx, %ecx
+; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $7, %esi
-; X86AVX2-NEXT: movl %eax, (%esp,%esi,4)
+; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%edx,%edx), %eax
-; X86AVX2-NEXT: andl $7, %eax
-; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
+; X86AVX2-NEXT: incl %ecx
+; X86AVX2-NEXT: andl $7, %ecx
+; X86AVX2-NEXT: movl %eax, 32(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
@@ -2128,12 +2131,13 @@ define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
-; X86AVX2-NEXT: leal (%eax,%eax), %esi
+; X86AVX2-NEXT: addl %eax, %eax
+; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X86AVX2-NEXT: leal 1(%eax,%eax), %eax
+; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
diff --git a/llvm/test/CodeGen/X86/isel-traps.ll b/llvm/test/CodeGen/X86/isel-traps.ll
new file mode 100644
index 000000000000..c207387166a6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/isel-traps.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,X86
+; RUN: llc < %s -global-isel -global-isel-abort=1 -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=ALL,GISEL-X86
+
+declare void @llvm.trap()
+
+define void @test_trap() {
+; ALL-LABEL: test_trap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud2
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.trap()
+ ret void
+}
+
+define void @test_debugtrap() {
+; ALL-LABEL: test_debugtrap:
+; ALL: # %bb.0:
+; ALL-NEXT: int3
+; ALL-NEXT: ret{{[l|q]}}
+ tail call void @llvm.debugtrap()
+ ret void
+}
+
+define void @test_ubsantrap() {
+; ALL-LABEL: test_ubsantrap:
+; ALL: # %bb.0:
+; ALL-NEXT: ud1l 12(%eax), %eax
+; ALL-NEXT: ret{{[l|q]}}
+ call void @llvm.ubsantrap(i8 12)
+ ret void
+}
+
+define void @test_ubsantrap_custom() nounwind {
+; X64-LABEL: test_ubsantrap_custom:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $42, %edi
+; X64-NEXT: callq guide@PLT
+; X64-NEXT: popq %rax
+; X64-NEXT: retq
+;
+; GISEL-X64-LABEL: test_ubsantrap_custom:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: movl $42, %edi
+; GISEL-X64-NEXT: callq guide
+; GISEL-X64-NEXT: popq %rax
+; GISEL-X64-NEXT: retq
+;
+; X86-LABEL: test_ubsantrap_custom:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl $42, (%esp)
+; X86-NEXT: calll guide
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; GISEL-X86-LABEL: test_ubsantrap_custom:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: subl $12, %esp
+; GISEL-X86-NEXT: movl $42, %eax
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: calll guide
+; GISEL-X86-NEXT: addl $12, %esp
+; GISEL-X86-NEXT: retl
+ call void @llvm.ubsantrap(i8 42) "trap-func-name"="guide"
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index cc9862769f2b..39d02f9112f4 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
;; Use cttz to test if we properly prove never-zero. There is a very
;; simple transform from cttz -> cttz_zero_undef if its operand is
@@ -9,50 +10,82 @@ declare i32 @llvm.uadd.sat.i32(i32, i32)
declare i32 @llvm.umax.i32(i32, i32)
declare i32 @llvm.umin.i32(i32, i32)
declare i32 @llvm.smin.i32(i32, i32)
+declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.smax.i32(i32, i32)
+declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
declare i32 @llvm.bswap.i32(i32)
declare i32 @llvm.bitreverse.i32(i32)
declare i32 @llvm.ctpop.i32(i32)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
declare i32 @llvm.abs.i32(i32, i1)
declare i32 @llvm.fshl.i32(i32, i32, i32)
declare i32 @llvm.fshr.i32(i32, i32, i32)
define i32 @or_known_nonzero(i32 %x) {
-; CHECK-LABEL: or_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%z = or i32 %x, 1
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @or_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: or_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl %esi, %edi
-; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB1_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB1_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl %esi, %edi
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB1_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = or i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @select_known_nonzero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movl $122, %eax
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $122, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: movl $122, %eax
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 122
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -60,20 +93,36 @@ define i32 @select_known_nonzero(i1 %c, i32 %x) {
}
define i32 @select_maybe_zero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl $1, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB3_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB3_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB3_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB3_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 0
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -81,28 +130,45 @@ define i32 @select_maybe_zero(i1 %c, i32 %x) {
}
define i32 @shl_known_nonzero_1s_bit_set(i32 %x) {
-; CHECK-LABEL: shl_known_nonzero_1s_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $123, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_1s_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $123, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_1s_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $123, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = shl i32 123, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -110,14 +176,23 @@ define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -125,67 +200,116 @@ define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @shl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: shl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB7_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB7_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB7_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB7_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB7_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB7_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = shl nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_known_nonzero(i32 %x) {
-; CHECK-LABEL: uaddsat_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: incl %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: incl %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 1)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: uaddsat_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovael %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB9_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB9_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovael %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB9_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umax_known_nonzero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cmpl %eax, %edi
-; CHECK-NEXT: cmoval %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: cmpl %edx, %eax
+; X86-NEXT: cmoval %eax, %edx
+; X86-NEXT: rep bsfl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cmpl %eax, %edi
+; X64-NEXT: cmoval %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%yy = shl nuw i32 4, %y
%z = call i32 @llvm.umax.i32(i32 %x, i32 %yy)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -193,35 +317,62 @@ define i32 @umax_known_nonzero(i32 %x, i32 %y) {
}
define i32 @umax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: cmoval %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB11_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB11_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: cmoval %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB11_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB11_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: cmoval %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB11_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB11_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umax.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: umin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovbl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovbl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovbl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.umin.i32(i32 %x, i32 %y)
@@ -230,36 +381,63 @@ define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @umin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB13_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB13_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB13_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB13_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovbl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB13_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB13_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovll %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovll %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovll %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smin.i32(i32 %x, i32 %y)
@@ -267,37 +445,120 @@ define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
ret i32 %r
}
+define i32 @smin_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smin_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $-54, %eax
+; X86-NEXT: movl $-54, %ecx
+; X86-NEXT: cmovll %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $-54, %edi
+; X64-NEXT: movl $-54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smin.i32(i32 %x, i32 -54)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
+define <4 x i32> @smin_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smin_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967242,4294967273,4294967284,4294967295]
+; X86-NEXT: movdqa %xmm1, %xmm2
+; X86-NEXT: pcmpgtd %xmm0, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pand %xmm1, %xmm0
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: pcmpeqd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpsrld $31, %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %x, <4 x i32> <i32 -54, i32 -23, i32 -12, i32 -1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
define i32 @smin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovll %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB15_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB15_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovll %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB17_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB17_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB17_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB17_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovgl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovgl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovgl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smax.i32(i32 %x, i32 %y)
@@ -306,35 +567,125 @@ define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @smax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $55, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovgel %edi, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $55, %eax
+; X86-NEXT: movl $54, %ecx
+; X86-NEXT: cmovgel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $55, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovgel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smax.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
+define <4 x i32> @smax_known_zero_vec(<4 x i32> %x, <4 x i32> %y) {
+; X86-LABEL: smax_known_zero_vec:
+; X86: # %bb.0:
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [54,23,12,1]
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: pcmpgtd %xmm1, %xmm2
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: pandn %xmm1, %xmm2
+; X86-NEXT: por %xmm2, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: paddd %xmm0, %xmm1
+; X86-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT: pxor %xmm1, %xmm0
+; X86-NEXT: pcmpgtd %xmm1, %xmm0
+; X86-NEXT: psrld $31, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero_vec:
+; X64: # %bb.0:
+; X64-NEXT: vpmaxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpminud %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: retq
+ %z = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %x, <4 x i32> <i32 54, i32 23, i32 12, i32 1>)
+ %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z)
+ %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1>
+ %ret = zext <4 x i1> %3 to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+define i32 @smax_known_zero(i32 %x, i32 %y) {
+; X86-LABEL: smax_known_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovnsl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB21_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB21_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_zero:
+; X64: # %bb.0:
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnsl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB21_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB21_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
+ %z = call i32 @llvm.smax.i32(i32 %x, i32 -1)
+ %r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
+ ret i32 %r
+}
+
define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB18_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB18_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB22_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB22_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB22_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB22_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
@@ -345,19 +696,33 @@ define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB19_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB19_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB23_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB23_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB23_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB23_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
%shl = shl i32 %x, %sub
@@ -367,14 +732,23 @@ define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -382,39 +756,68 @@ define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_with_fshr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB21_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB21_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB25_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB25_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB25_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB25_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB22_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB22_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB26_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB26_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB26_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB26_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
@@ -425,19 +828,33 @@ define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB23_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB23_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB27_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB27_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB27_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB27_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
%shr = lshr i32 %x, %sub
@@ -447,14 +864,23 @@ define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -462,47 +888,78 @@ define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_with_fshl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB25_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB25_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB29_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB29_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB29_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB29_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: sra_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = ashr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: sra_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -510,47 +967,78 @@ define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @sra_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sra_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB28_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB28_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB32_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB32_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB32_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB32_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: srl_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = lshr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: srl_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -558,33 +1046,56 @@ define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @srl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: srl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB31_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB31_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB35_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB35_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB35_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB35_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: udiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -592,33 +1103,56 @@ define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @udiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: udiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB33_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB33_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB37_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB37_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB37_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB37_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: sdiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -626,31 +1160,53 @@ define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @sdiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sdiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB35_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB35_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB39_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB39_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB39_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB39_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @add_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nuw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -658,17 +1214,30 @@ define i32 @add_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @add_maybe_zero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: je .LBB37_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB37_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB41_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB41_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: je .LBB41_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB41_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nsw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -676,15 +1245,24 @@ define i32 @add_maybe_zero(i32 %xx, i32 %y) {
}
define i32 @sub_known_nonzero_neg_case(i32 %xx) {
-; CHECK-LABEL: sub_known_nonzero_neg_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_neg_case:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_neg_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i32 256, %xx
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -692,14 +1270,24 @@ define i32 @sub_known_nonzero_neg_case(i32 %xx) {
}
define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
-; CHECK-LABEL: sub_known_nonzero_ne_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: andl $-65, %edi
-; CHECK-NEXT: subl %eax, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_ne_case:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: orl $64, %ecx
+; X86-NEXT: andl $-65, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_ne_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: andl $-65, %edi
+; X64-NEXT: subl %eax, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%y = and i32 %xx, -65
%z = sub i32 %y, %x
@@ -708,18 +1296,32 @@ define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
}
define i32 @sub_maybe_zero(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: subl %edi, %eax
-; CHECK-NEXT: je .LBB40_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB40_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: je .LBB44_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB44_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: je .LBB44_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB44_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 64
%z = sub i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -727,34 +1329,60 @@ define i32 @sub_maybe_zero(i32 %x) {
}
define i32 @sub_maybe_zero2(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: negl %edi
-; CHECK-NEXT: je .LBB41_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB41_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: je .LBB45_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB45_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero2:
+; X64: # %bb.0:
+; X64-NEXT: negl %edi
+; X64-NEXT: je .LBB45_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB45_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB42_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB42_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB46_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB46_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB46_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB46_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -762,18 +1390,32 @@ define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB43_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB43_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB47_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB47_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB47_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB47_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -781,36 +1423,63 @@ define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @mul_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: mul_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: imull %esi, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB44_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB44_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB48_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB48_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: imull %esi, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB48_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB48_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = mul nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
-; CHECK-LABEL: bitcast_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; CHECK-NEXT: pslld $23, %xmm0
-; CHECK-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; CHECK-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: pslld $23, %xmm0
+; X86-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: cvttps2dq %xmm0, %xmm0
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; X86-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: bsfl %eax, %ecx
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-NEXT: vpslld $23, %xmm0, %xmm0
+; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: bsfl %eax, %ecx
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: cmovnel %ecx, %eax
+; X64-NEXT: retq
%x = shl nuw nsw <2 x i16> <i16 256, i16 256>, %xx
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -818,49 +1487,83 @@ define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
}
define i32 @bitcast_maybe_zero(<2 x i16> %x) {
-; CHECK-LABEL: bitcast_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB46_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB46_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB50_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB50_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB50_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB50_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_from_float(float %x) {
-; CHECK-LABEL: bitcast_from_float:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB47_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB47_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_from_float:
+; X86: # %bb.0:
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB51_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB51_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_from_float:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB51_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB51_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast float %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @zext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: zext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -868,32 +1571,54 @@ define i32 @zext_known_nonzero(i16 %xx) {
}
define i32 @zext_maybe_zero(i16 %x) {
-; CHECK-LABEL: zext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB49_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movzwl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB49_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB53_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB53_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB53_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB53_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: sext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cwtl
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: cwtl
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cwtl
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -901,17 +1626,29 @@ define i32 @sext_known_nonzero(i16 %xx) {
}
define i32 @sext_maybe_zero(i16 %x) {
-; CHECK-LABEL: sext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB51_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movswl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB51_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB55_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB55_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB55_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB55_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
diff --git a/llvm/test/CodeGen/X86/late-remat-update.mir b/llvm/test/CodeGen/X86/late-remat-update.mir
index 84a78f84728c..dd4e99c6df14 100644
--- a/llvm/test/CodeGen/X86/late-remat-update.mir
+++ b/llvm/test/CodeGen/X86/late-remat-update.mir
@@ -66,6 +66,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/limit-split-cost.mir b/llvm/test/CodeGen/X86/limit-split-cost.mir
index 6f5329e5b332..7ec0404e0f73 100644
--- a/llvm/test/CodeGen/X86/limit-split-cost.mir
+++ b/llvm/test/CodeGen/X86/limit-split-cost.mir
@@ -86,6 +86,7 @@ registers:
liveins:
- { reg: '$edi', virtual-reg: '%0' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index 898b34e969b1..6aa0a81c9020 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -12,7 +12,7 @@
; vXf64
;
-define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) {
+define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val) nounwind {
; SSE-LABEL: store_v1f64_v1i64:
; SSE: ## %bb.0:
; SSE-NEXT: testq %rdi, %rdi
@@ -46,7 +46,7 @@ define void @store_v1f64_v1i64(<1 x i64> %trigger, ptr %addr, <1 x double> %val)
ret void
}
-define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) {
+define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: store_v2f64_v2i64:
; SSE: ## %bb.0:
; SSE-NEXT: movmskpd %xmm0, %eax
@@ -106,7 +106,7 @@ define void @store_v2f64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x double> %val)
ret void
}
-define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) {
+define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val) nounwind {
; SSE2-LABEL: store_v4f64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -222,7 +222,7 @@ define void @store_v4f64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x double> %val)
; vXf32
;
-define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) {
+define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val) nounwind {
; SSE2-LABEL: store_v2f32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -314,7 +314,7 @@ define void @store_v2f32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x float> %val)
ret void
}
-define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) {
+define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i32> %mask) nounwind {
; SSE2-LABEL: store_v4f32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -425,7 +425,7 @@ define void @store_v4f32_v4i32(<4 x float> %x, ptr %ptr, <4 x float> %y, <4 x i3
ret void
}
-define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) {
+define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i32> %mask) nounwind {
; SSE2-LABEL: store_v8f32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: packssdw %xmm5, %xmm4
@@ -605,7 +605,7 @@ define void @store_v8f32_v8i32(<8 x float> %x, ptr %ptr, <8 x float> %y, <8 x i3
ret void
}
-define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) {
+define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16 x i32> %mask) nounwind {
; SSE2-LABEL: store_v16f32_v16i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4
@@ -914,7 +914,7 @@ define void @store_v16f32_v16i32(<16 x float> %x, ptr %ptr, <16 x float> %y, <16
; vXi64
;
-define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
+define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) nounwind {
; SSE2-LABEL: store_v2i64_v2i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskpd %xmm0, %eax
@@ -998,7 +998,7 @@ define void @store_v2i64_v2i64(<2 x i64> %trigger, ptr %addr, <2 x i64> %val) {
ret void
}
-define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
+define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) nounwind {
; SSE2-LABEL: store_v4i64_v4i64:
; SSE2: ## %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
@@ -1122,7 +1122,7 @@ define void @store_v4i64_v4i64(<4 x i64> %trigger, ptr %addr, <4 x i64> %val) {
; vXi32
;
-define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
+define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) nounwind {
; SSE-LABEL: store_v1i32_v1i32:
; SSE: ## %bb.0:
; SSE-NEXT: testl %edi, %edi
@@ -1156,7 +1156,7 @@ define void @store_v1i32_v1i32(<1 x i32> %trigger, ptr %addr, <1 x i32> %val) {
ret void
}
-define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
+define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) nounwind {
; SSE2-LABEL: store_v2i32_v2i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
@@ -1256,7 +1256,7 @@ define void @store_v2i32_v2i32(<2 x i32> %trigger, ptr %addr, <2 x i32> %val) {
ret void
}
-define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE2-LABEL: store_v4i32_v4i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1370,7 +1370,7 @@ define void @store_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
ret void
}
-define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
+define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) nounwind {
; SSE2-LABEL: store_v8i32_v8i32:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -1560,7 +1560,7 @@ define void @store_v8i32_v8i32(<8 x i32> %trigger, ptr %addr, <8 x i32> %val) {
; vXi16
;
-define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
+define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) nounwind {
; SSE2-LABEL: store_v8i16_v8i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -1907,7 +1907,7 @@ define void @store_v8i16_v8i16(<8 x i16> %trigger, ptr %addr, <8 x i16> %val) {
ret void
}
-define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) {
+define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val) nounwind {
; SSE2-LABEL: store_v16i16_v16i16:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -2676,7 +2676,7 @@ define void @store_v16i16_v16i16(<16 x i16> %trigger, ptr %addr, <16 x i16> %val
; vXi8
;
-define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
+define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) nounwind {
; SSE2-LABEL: store_v16i8_v16i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
@@ -3273,7 +3273,7 @@ define void @store_v16i8_v16i8(<16 x i8> %trigger, ptr %addr, <16 x i8> %val) {
ret void
}
-define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
+define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) nounwind {
; SSE2-LABEL: store_v32i8_v32i8:
; SSE2: ## %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
@@ -4670,7 +4670,7 @@ define void @store_v32i8_v32i8(<32 x i8> %trigger, ptr %addr, <32 x i8> %val) {
;;; Stores with Constant Masks
-define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) {
+define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: mstore_constmask_v4i32_v4i32:
; SSE: ## %bb.0:
; SSE-NEXT: movups %xmm1, (%rdi)
@@ -4693,7 +4693,7 @@ define void @mstore_constmask_v4i32_v4i32(<4 x i32> %trigger, ptr %addr, <4 x i3
; Make sure we are able to detect all ones constant mask after type legalization
; to avoid masked stores.
-define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) {
+define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: mstore_constmask_allones_split:
; SSE2: ## %bb.0:
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
@@ -4810,7 +4810,7 @@ define void @mstore_constmask_allones_split(<16 x i64> %trigger, ptr %addr, <16
; When only one element of the mask is set, reduce to a scalar store.
-define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
+define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) nounwind {
; SSE-LABEL: one_mask_bit_set1:
; SSE: ## %bb.0:
; SSE-NEXT: movss %xmm0, (%rdi)
@@ -4832,7 +4832,7 @@ define void @one_mask_bit_set1(ptr %addr, <4 x i32> %val) {
; Choose a different element to show that the correct address offset is produced.
-define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
+define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set2:
; SSE2: ## %bb.0:
; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
@@ -4860,7 +4860,7 @@ define void @one_mask_bit_set2(ptr %addr, <4 x float> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
+define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) nounwind {
; SSE-LABEL: one_mask_bit_set3:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm1, 16(%rdi)
@@ -4886,7 +4886,7 @@ define void @one_mask_bit_set3(ptr %addr, <4 x i64> %val) {
; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
-define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
+define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set4:
; SSE: ## %bb.0:
; SSE-NEXT: movhps %xmm1, 24(%rdi)
@@ -4912,7 +4912,7 @@ define void @one_mask_bit_set4(ptr %addr, <4 x double> %val) {
; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected.
-define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
+define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) nounwind {
; SSE-LABEL: one_mask_bit_set5:
; SSE: ## %bb.0:
; SSE-NEXT: movlps %xmm3, 48(%rdi)
@@ -4944,7 +4944,7 @@ define void @one_mask_bit_set5(ptr %addr, <8 x double> %val) {
}
; Try one elt in each half of a vector that needs to split
-define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
+define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) nounwind {
; SSE2-LABEL: one_mask_bit_set6:
; SSE2: ## %bb.0:
; SSE2-NEXT: movlps %xmm3, 48(%rdi)
@@ -4999,7 +4999,7 @@ define void @one_mask_bit_set6(ptr %addr, <16 x i64> %val) {
ret void
}
-define void @top_bits_unset_stack() {
+define void @top_bits_unset_stack() nounwind {
; SSE-LABEL: top_bits_unset_stack:
; SSE: ## %bb.0: ## %entry
; SSE-NEXT: xorps %xmm0, %xmm0
@@ -5047,7 +5047,6 @@ define void @top_bits_unset_stack() {
; X86-AVX512-LABEL: top_bits_unset_stack:
; X86-AVX512: ## %bb.0: ## %entry
; X86-AVX512-NEXT: subl $76, %esp
-; X86-AVX512-NEXT: .cfi_def_cfa_offset 80
; X86-AVX512-NEXT: vxorpd %xmm0, %xmm0, %xmm0
; X86-AVX512-NEXT: movb $63, %al
; X86-AVX512-NEXT: kmovd %eax, %k1
@@ -5064,7 +5063,7 @@ entry:
; SimplifyDemandedBits eliminates an ashr here.
-define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) {
+define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <4 x i32> %masksrc) nounwind {
; SSE-LABEL: masked_store_bool_mask_demand_trunc_sext:
; SSE: ## %bb.0:
; SSE-NEXT: pslld $31, %xmm2
@@ -5160,7 +5159,7 @@ define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, ptr %p, <
; PR26697
-define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) {
+define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %mask) nounwind {
; SSE2-LABEL: one_mask_bit_set1_variable:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm1, %eax
@@ -5267,7 +5266,7 @@ define void @one_mask_bit_set1_variable(ptr %addr, <4 x float> %val, <4 x i32> %
; This needs to be widened to v4i32.
; This used to assert in type legalization. PR38436
; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask.
-define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
+define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) nounwind {
; SSE2-LABEL: widen_masked_store:
; SSE2: ## %bb.0:
; SSE2-NEXT: andb $1, %sil
@@ -5448,7 +5447,7 @@ define void @widen_masked_store(<3 x i32> %v, ptr %p, <3 x i1> %mask) {
ret void
}
-define void @zero_mask(ptr %addr, <2 x double> %val) {
+define void @zero_mask(ptr %addr, <2 x double> %val) nounwind {
; SSE-LABEL: zero_mask:
; SSE: ## %bb.0:
; SSE-NEXT: retq
@@ -5464,7 +5463,7 @@ define void @zero_mask(ptr %addr, <2 x double> %val) {
ret void
}
-define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) {
+define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask) nounwind {
; SSE2-LABEL: PR11210:
; SSE2: ## %bb.0:
; SSE2-NEXT: movmskps %xmm2, %eax
@@ -5638,492 +5637,248 @@ define void @PR11210(<4 x float> %x, ptr %ptr, <4 x float> %y, <2 x i64> %mask)
ret void
}
-define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) {
-; SSE2-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE2: ## %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm6
-; SSE2-NEXT: movdqa 32(%rdi), %xmm7
-; SSE2-NEXT: movdqa 64(%rdi), %xmm8
-; SSE2-NEXT: movl 80(%rsi), %eax
-; SSE2-NEXT: movl 64(%rsi), %r8d
-; SSE2-NEXT: movl 48(%rsi), %r9d
-; SSE2-NEXT: movl 32(%rsi), %r10d
-; SSE2-NEXT: movl 16(%rsi), %r11d
-; SSE2-NEXT: movdqa 80(%rsi), %xmm0
-; SSE2-NEXT: movdqa 64(%rsi), %xmm1
-; SSE2-NEXT: movdqa 48(%rsi), %xmm2
-; SSE2-NEXT: movdqa 32(%rsi), %xmm3
-; SSE2-NEXT: movdqa 16(%rsi), %xmm4
-; SSE2-NEXT: movdqa (%rsi), %xmm5
-; SSE2-NEXT: packssdw 48(%rdi), %xmm7
-; SSE2-NEXT: packssdw 16(%rdi), %xmm6
-; SSE2-NEXT: packsswb %xmm7, %xmm6
-; SSE2-NEXT: packssdw 80(%rdi), %xmm8
-; SSE2-NEXT: packsswb %xmm8, %xmm8
-; SSE2-NEXT: pmovmskb %xmm6, %edi
-; SSE2-NEXT: andl $21845, %edi ## imm = 0x5555
-; SSE2-NEXT: pmovmskb %xmm8, %ecx
-; SSE2-NEXT: andl $85, %ecx
-; SSE2-NEXT: shll $16, %ecx
-; SSE2-NEXT: orl %edi, %ecx
-; SSE2-NEXT: testb $1, %cl
-; SSE2-NEXT: jne LBB31_1
-; SSE2-NEXT: ## %bb.2: ## %else
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: jne LBB31_3
-; SSE2-NEXT: LBB31_4: ## %else2
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: jne LBB31_5
-; SSE2-NEXT: LBB31_6: ## %else4
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: jne LBB31_7
-; SSE2-NEXT: LBB31_8: ## %else6
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: jne LBB31_9
-; SSE2-NEXT: LBB31_10: ## %else8
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: jne LBB31_11
-; SSE2-NEXT: LBB31_12: ## %else10
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: jne LBB31_13
-; SSE2-NEXT: LBB31_14: ## %else12
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: js LBB31_15
-; SSE2-NEXT: LBB31_16: ## %else14
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: jne LBB31_17
-; SSE2-NEXT: LBB31_18: ## %else16
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: jne LBB31_19
-; SSE2-NEXT: LBB31_20: ## %else18
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: jne LBB31_21
-; SSE2-NEXT: LBB31_22: ## %else20
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: jne LBB31_23
-; SSE2-NEXT: LBB31_24: ## %else22
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: jne LBB31_25
-; SSE2-NEXT: LBB31_26: ## %else24
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: jne LBB31_27
-; SSE2-NEXT: LBB31_28: ## %else26
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: jne LBB31_29
-; SSE2-NEXT: LBB31_30: ## %else28
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: js LBB31_31
-; SSE2-NEXT: LBB31_32: ## %else30
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: jne LBB31_33
-; SSE2-NEXT: LBB31_34: ## %else32
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: jne LBB31_35
-; SSE2-NEXT: LBB31_36: ## %else34
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: jne LBB31_37
-; SSE2-NEXT: LBB31_38: ## %else36
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: jne LBB31_39
-; SSE2-NEXT: LBB31_40: ## %else38
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: jne LBB31_41
-; SSE2-NEXT: LBB31_42: ## %else40
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: jne LBB31_43
-; SSE2-NEXT: LBB31_44: ## %else42
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: je LBB31_46
-; SSE2-NEXT: LBB31_45: ## %cond.store43
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 88(%rdx)
-; SSE2-NEXT: LBB31_46: ## %else44
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: testb %al, %al
-; SSE2-NEXT: jne LBB31_48
-; SSE2-NEXT: ## %bb.47: ## %cond.store45
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movl %eax, 92(%rdx)
-; SSE2-NEXT: LBB31_48: ## %else46
-; SSE2-NEXT: retq
-; SSE2-NEXT: LBB31_1: ## %cond.store
-; SSE2-NEXT: movl (%rsi), %esi
-; SSE2-NEXT: movl %esi, (%rdx)
-; SSE2-NEXT: testb $2, %cl
-; SSE2-NEXT: je LBB31_4
-; SSE2-NEXT: LBB31_3: ## %cond.store1
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,1,1]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 4(%rdx)
-; SSE2-NEXT: testb $4, %cl
-; SSE2-NEXT: je LBB31_6
-; SSE2-NEXT: LBB31_5: ## %cond.store3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; SSE2-NEXT: movd %xmm6, %esi
-; SSE2-NEXT: movl %esi, 8(%rdx)
-; SSE2-NEXT: testb $8, %cl
-; SSE2-NEXT: je LBB31_8
-; SSE2-NEXT: LBB31_7: ## %cond.store5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 12(%rdx)
-; SSE2-NEXT: testb $16, %cl
-; SSE2-NEXT: je LBB31_10
-; SSE2-NEXT: LBB31_9: ## %cond.store7
-; SSE2-NEXT: movl %r11d, 16(%rdx)
-; SSE2-NEXT: testb $32, %cl
-; SSE2-NEXT: je LBB31_12
-; SSE2-NEXT: LBB31_11: ## %cond.store9
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 20(%rdx)
-; SSE2-NEXT: testb $64, %cl
-; SSE2-NEXT: je LBB31_14
-; SSE2-NEXT: LBB31_13: ## %cond.store11
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; SSE2-NEXT: movd %xmm5, %esi
-; SSE2-NEXT: movl %esi, 24(%rdx)
-; SSE2-NEXT: testb %cl, %cl
-; SSE2-NEXT: jns LBB31_16
-; SSE2-NEXT: LBB31_15: ## %cond.store13
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 28(%rdx)
-; SSE2-NEXT: testl $256, %ecx ## imm = 0x100
-; SSE2-NEXT: je LBB31_18
-; SSE2-NEXT: LBB31_17: ## %cond.store15
-; SSE2-NEXT: movl %r10d, 32(%rdx)
-; SSE2-NEXT: testl $512, %ecx ## imm = 0x200
-; SSE2-NEXT: je LBB31_20
-; SSE2-NEXT: LBB31_19: ## %cond.store17
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,1,1]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 36(%rdx)
-; SSE2-NEXT: testl $1024, %ecx ## imm = 0x400
-; SSE2-NEXT: je LBB31_22
-; SSE2-NEXT: LBB31_21: ## %cond.store19
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
-; SSE2-NEXT: movd %xmm4, %esi
-; SSE2-NEXT: movl %esi, 40(%rdx)
-; SSE2-NEXT: testl $2048, %ecx ## imm = 0x800
-; SSE2-NEXT: je LBB31_24
-; SSE2-NEXT: LBB31_23: ## %cond.store21
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 44(%rdx)
-; SSE2-NEXT: testl $4096, %ecx ## imm = 0x1000
-; SSE2-NEXT: je LBB31_26
-; SSE2-NEXT: LBB31_25: ## %cond.store23
-; SSE2-NEXT: movl %r9d, 48(%rdx)
-; SSE2-NEXT: testl $8192, %ecx ## imm = 0x2000
-; SSE2-NEXT: je LBB31_28
-; SSE2-NEXT: LBB31_27: ## %cond.store25
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 52(%rdx)
-; SSE2-NEXT: testl $16384, %ecx ## imm = 0x4000
-; SSE2-NEXT: je LBB31_30
-; SSE2-NEXT: LBB31_29: ## %cond.store27
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movd %xmm3, %esi
-; SSE2-NEXT: movl %esi, 56(%rdx)
-; SSE2-NEXT: testw %cx, %cx
-; SSE2-NEXT: jns LBB31_32
-; SSE2-NEXT: LBB31_31: ## %cond.store29
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 60(%rdx)
-; SSE2-NEXT: testl $65536, %ecx ## imm = 0x10000
-; SSE2-NEXT: je LBB31_34
-; SSE2-NEXT: LBB31_33: ## %cond.store31
-; SSE2-NEXT: movl %r8d, 64(%rdx)
-; SSE2-NEXT: testl $131072, %ecx ## imm = 0x20000
-; SSE2-NEXT: je LBB31_36
-; SSE2-NEXT: LBB31_35: ## %cond.store33
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 68(%rdx)
-; SSE2-NEXT: testl $262144, %ecx ## imm = 0x40000
-; SSE2-NEXT: je LBB31_38
-; SSE2-NEXT: LBB31_37: ## %cond.store35
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movd %xmm2, %esi
-; SSE2-NEXT: movl %esi, 72(%rdx)
-; SSE2-NEXT: testl $524288, %ecx ## imm = 0x80000
-; SSE2-NEXT: je LBB31_40
-; SSE2-NEXT: LBB31_39: ## %cond.store37
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; SSE2-NEXT: movd %xmm1, %esi
-; SSE2-NEXT: movl %esi, 76(%rdx)
-; SSE2-NEXT: testl $1048576, %ecx ## imm = 0x100000
-; SSE2-NEXT: je LBB31_42
-; SSE2-NEXT: LBB31_41: ## %cond.store39
-; SSE2-NEXT: movl %eax, 80(%rdx)
-; SSE2-NEXT: testl $2097152, %ecx ## imm = 0x200000
-; SSE2-NEXT: je LBB31_44
-; SSE2-NEXT: LBB31_43: ## %cond.store41
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movl %eax, 84(%rdx)
-; SSE2-NEXT: testl $4194304, %ecx ## imm = 0x400000
-; SSE2-NEXT: jne LBB31_45
-; SSE2-NEXT: jmp LBB31_46
-;
-; SSE4-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
-; SSE4: ## %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: .cfi_def_cfa_offset 16
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: .cfi_def_cfa_offset 24
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: .cfi_def_cfa_offset 32
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: .cfi_def_cfa_offset 40
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: .cfi_def_cfa_offset 48
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: .cfi_def_cfa_offset 56
-; SSE4-NEXT: .cfi_offset %rbx, -56
-; SSE4-NEXT: .cfi_offset %r12, -48
-; SSE4-NEXT: .cfi_offset %r13, -40
-; SSE4-NEXT: .cfi_offset %r14, -32
-; SSE4-NEXT: .cfi_offset %r15, -24
-; SSE4-NEXT: .cfi_offset %rbp, -16
-; SSE4-NEXT: movdqa (%rdi), %xmm1
-; SSE4-NEXT: movdqa 32(%rdi), %xmm2
-; SSE4-NEXT: movdqa 64(%rdi), %xmm0
-; SSE4-NEXT: movl 92(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 88(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 84(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 80(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 76(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 72(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 68(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 64(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 60(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 56(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: movl 52(%rsi), %eax
-; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
-; SSE4-NEXT: packssdw 48(%rdi), %xmm2
-; SSE4-NEXT: packssdw 16(%rdi), %xmm1
-; SSE4-NEXT: packsswb %xmm2, %xmm1
-; SSE4-NEXT: packssdw 80(%rdi), %xmm0
-; SSE4-NEXT: packsswb %xmm0, %xmm0
-; SSE4-NEXT: pmovmskb %xmm1, %eax
-; SSE4-NEXT: andl $21845, %eax ## imm = 0x5555
-; SSE4-NEXT: pmovmskb %xmm0, %edi
-; SSE4-NEXT: andl $85, %edi
-; SSE4-NEXT: shll $16, %edi
-; SSE4-NEXT: orl %eax, %edi
-; SSE4-NEXT: movl 48(%rsi), %r13d
-; SSE4-NEXT: testb $1, %dil
-; SSE4-NEXT: movl 44(%rsi), %eax
-; SSE4-NEXT: movl 40(%rsi), %ecx
-; SSE4-NEXT: movl 36(%rsi), %r8d
-; SSE4-NEXT: movl 32(%rsi), %r9d
-; SSE4-NEXT: movl 28(%rsi), %r10d
-; SSE4-NEXT: movl 24(%rsi), %r11d
-; SSE4-NEXT: movl 20(%rsi), %ebx
-; SSE4-NEXT: movl 16(%rsi), %ebp
-; SSE4-NEXT: movl 12(%rsi), %r14d
-; SSE4-NEXT: movl 8(%rsi), %r15d
-; SSE4-NEXT: movl 4(%rsi), %r12d
-; SSE4-NEXT: jne LBB31_1
-; SSE4-NEXT: ## %bb.2: ## %else
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: jne LBB31_3
-; SSE4-NEXT: LBB31_4: ## %else2
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: jne LBB31_5
-; SSE4-NEXT: LBB31_6: ## %else4
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: jne LBB31_7
-; SSE4-NEXT: LBB31_8: ## %else6
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: jne LBB31_9
-; SSE4-NEXT: LBB31_10: ## %else8
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: jne LBB31_11
-; SSE4-NEXT: LBB31_12: ## %else10
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: jne LBB31_13
-; SSE4-NEXT: LBB31_14: ## %else12
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: js LBB31_15
-; SSE4-NEXT: LBB31_16: ## %else14
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: jne LBB31_17
-; SSE4-NEXT: LBB31_18: ## %else16
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: jne LBB31_19
-; SSE4-NEXT: LBB31_20: ## %else18
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: jne LBB31_21
-; SSE4-NEXT: LBB31_22: ## %else20
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: jne LBB31_23
-; SSE4-NEXT: LBB31_24: ## %else22
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: jne LBB31_25
-; SSE4-NEXT: LBB31_26: ## %else24
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: jne LBB31_27
-; SSE4-NEXT: LBB31_28: ## %else26
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: jne LBB31_29
-; SSE4-NEXT: LBB31_30: ## %else28
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: js LBB31_31
-; SSE4-NEXT: LBB31_32: ## %else30
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: jne LBB31_33
-; SSE4-NEXT: LBB31_34: ## %else32
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: jne LBB31_35
-; SSE4-NEXT: LBB31_36: ## %else34
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: jne LBB31_37
-; SSE4-NEXT: LBB31_38: ## %else36
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: jne LBB31_39
-; SSE4-NEXT: LBB31_40: ## %else38
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: jne LBB31_41
-; SSE4-NEXT: LBB31_42: ## %else40
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: jne LBB31_43
-; SSE4-NEXT: LBB31_44: ## %else42
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: je LBB31_46
-; SSE4-NEXT: LBB31_45: ## %cond.store43
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 88(%rdx)
-; SSE4-NEXT: LBB31_46: ## %else44
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: testb %al, %al
-; SSE4-NEXT: jne LBB31_48
-; SSE4-NEXT: ## %bb.47: ## %cond.store45
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 92(%rdx)
-; SSE4-NEXT: LBB31_48: ## %else46
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
-; SSE4-NEXT: retq
-; SSE4-NEXT: LBB31_1: ## %cond.store
-; SSE4-NEXT: movl (%rsi), %esi
-; SSE4-NEXT: movl %esi, (%rdx)
-; SSE4-NEXT: testb $2, %dil
-; SSE4-NEXT: je LBB31_4
-; SSE4-NEXT: LBB31_3: ## %cond.store1
-; SSE4-NEXT: movl %r12d, 4(%rdx)
-; SSE4-NEXT: testb $4, %dil
-; SSE4-NEXT: je LBB31_6
-; SSE4-NEXT: LBB31_5: ## %cond.store3
-; SSE4-NEXT: movl %r15d, 8(%rdx)
-; SSE4-NEXT: testb $8, %dil
-; SSE4-NEXT: je LBB31_8
-; SSE4-NEXT: LBB31_7: ## %cond.store5
-; SSE4-NEXT: movl %r14d, 12(%rdx)
-; SSE4-NEXT: testb $16, %dil
-; SSE4-NEXT: je LBB31_10
-; SSE4-NEXT: LBB31_9: ## %cond.store7
-; SSE4-NEXT: movl %ebp, 16(%rdx)
-; SSE4-NEXT: testb $32, %dil
-; SSE4-NEXT: je LBB31_12
-; SSE4-NEXT: LBB31_11: ## %cond.store9
-; SSE4-NEXT: movl %ebx, 20(%rdx)
-; SSE4-NEXT: testb $64, %dil
-; SSE4-NEXT: je LBB31_14
-; SSE4-NEXT: LBB31_13: ## %cond.store11
-; SSE4-NEXT: movl %r11d, 24(%rdx)
-; SSE4-NEXT: testb %dil, %dil
-; SSE4-NEXT: jns LBB31_16
-; SSE4-NEXT: LBB31_15: ## %cond.store13
-; SSE4-NEXT: movl %r10d, 28(%rdx)
-; SSE4-NEXT: testl $256, %edi ## imm = 0x100
-; SSE4-NEXT: je LBB31_18
-; SSE4-NEXT: LBB31_17: ## %cond.store15
-; SSE4-NEXT: movl %r9d, 32(%rdx)
-; SSE4-NEXT: testl $512, %edi ## imm = 0x200
-; SSE4-NEXT: je LBB31_20
-; SSE4-NEXT: LBB31_19: ## %cond.store17
-; SSE4-NEXT: movl %r8d, 36(%rdx)
-; SSE4-NEXT: testl $1024, %edi ## imm = 0x400
-; SSE4-NEXT: je LBB31_22
-; SSE4-NEXT: LBB31_21: ## %cond.store19
-; SSE4-NEXT: movl %ecx, 40(%rdx)
-; SSE4-NEXT: testl $2048, %edi ## imm = 0x800
-; SSE4-NEXT: je LBB31_24
-; SSE4-NEXT: LBB31_23: ## %cond.store21
-; SSE4-NEXT: movl %eax, 44(%rdx)
-; SSE4-NEXT: testl $4096, %edi ## imm = 0x1000
-; SSE4-NEXT: je LBB31_26
-; SSE4-NEXT: LBB31_25: ## %cond.store23
-; SSE4-NEXT: movl %r13d, 48(%rdx)
-; SSE4-NEXT: testl $8192, %edi ## imm = 0x2000
-; SSE4-NEXT: je LBB31_28
-; SSE4-NEXT: LBB31_27: ## %cond.store25
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 52(%rdx)
-; SSE4-NEXT: testl $16384, %edi ## imm = 0x4000
-; SSE4-NEXT: je LBB31_30
-; SSE4-NEXT: LBB31_29: ## %cond.store27
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 56(%rdx)
-; SSE4-NEXT: testw %di, %di
-; SSE4-NEXT: jns LBB31_32
-; SSE4-NEXT: LBB31_31: ## %cond.store29
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 60(%rdx)
-; SSE4-NEXT: testl $65536, %edi ## imm = 0x10000
-; SSE4-NEXT: je LBB31_34
-; SSE4-NEXT: LBB31_33: ## %cond.store31
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 64(%rdx)
-; SSE4-NEXT: testl $131072, %edi ## imm = 0x20000
-; SSE4-NEXT: je LBB31_36
-; SSE4-NEXT: LBB31_35: ## %cond.store33
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 68(%rdx)
-; SSE4-NEXT: testl $262144, %edi ## imm = 0x40000
-; SSE4-NEXT: je LBB31_38
-; SSE4-NEXT: LBB31_37: ## %cond.store35
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 72(%rdx)
-; SSE4-NEXT: testl $524288, %edi ## imm = 0x80000
-; SSE4-NEXT: je LBB31_40
-; SSE4-NEXT: LBB31_39: ## %cond.store37
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 76(%rdx)
-; SSE4-NEXT: testl $1048576, %edi ## imm = 0x100000
-; SSE4-NEXT: je LBB31_42
-; SSE4-NEXT: LBB31_41: ## %cond.store39
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 80(%rdx)
-; SSE4-NEXT: testl $2097152, %edi ## imm = 0x200000
-; SSE4-NEXT: je LBB31_44
-; SSE4-NEXT: LBB31_43: ## %cond.store41
-; SSE4-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
-; SSE4-NEXT: movl %eax, 84(%rdx)
-; SSE4-NEXT: testl $4194304, %edi ## imm = 0x400000
-; SSE4-NEXT: jne LBB31_45
-; SSE4-NEXT: jmp LBB31_46
+define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigger.ptr, ptr %val.ptr, ptr %dst) nounwind {
+; SSE-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
+; SSE: ## %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movdqa (%rdi), %xmm1
+; SSE-NEXT: movdqa 32(%rdi), %xmm2
+; SSE-NEXT: movdqa 64(%rdi), %xmm0
+; SSE-NEXT: movl 92(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 88(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 84(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 80(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 76(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 72(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 68(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 64(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 60(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 56(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: movl 52(%rsi), %eax
+; SSE-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
+; SSE-NEXT: packssdw 48(%rdi), %xmm2
+; SSE-NEXT: packssdw 16(%rdi), %xmm1
+; SSE-NEXT: packsswb %xmm2, %xmm1
+; SSE-NEXT: packssdw 80(%rdi), %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: andl $21845, %eax ## imm = 0x5555
+; SSE-NEXT: pmovmskb %xmm0, %edi
+; SSE-NEXT: andl $85, %edi
+; SSE-NEXT: shll $16, %edi
+; SSE-NEXT: orl %eax, %edi
+; SSE-NEXT: movl 48(%rsi), %r13d
+; SSE-NEXT: testb $1, %dil
+; SSE-NEXT: movl 44(%rsi), %eax
+; SSE-NEXT: movl 40(%rsi), %ecx
+; SSE-NEXT: movl 36(%rsi), %r8d
+; SSE-NEXT: movl 32(%rsi), %r9d
+; SSE-NEXT: movl 28(%rsi), %r10d
+; SSE-NEXT: movl 24(%rsi), %r11d
+; SSE-NEXT: movl 20(%rsi), %ebx
+; SSE-NEXT: movl 16(%rsi), %ebp
+; SSE-NEXT: movl 12(%rsi), %r14d
+; SSE-NEXT: movl 8(%rsi), %r15d
+; SSE-NEXT: movl 4(%rsi), %r12d
+; SSE-NEXT: jne LBB31_1
+; SSE-NEXT: ## %bb.2: ## %else
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: jne LBB31_3
+; SSE-NEXT: LBB31_4: ## %else2
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: jne LBB31_5
+; SSE-NEXT: LBB31_6: ## %else4
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: jne LBB31_7
+; SSE-NEXT: LBB31_8: ## %else6
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: jne LBB31_9
+; SSE-NEXT: LBB31_10: ## %else8
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: jne LBB31_11
+; SSE-NEXT: LBB31_12: ## %else10
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: jne LBB31_13
+; SSE-NEXT: LBB31_14: ## %else12
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: js LBB31_15
+; SSE-NEXT: LBB31_16: ## %else14
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: jne LBB31_17
+; SSE-NEXT: LBB31_18: ## %else16
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: jne LBB31_19
+; SSE-NEXT: LBB31_20: ## %else18
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: jne LBB31_21
+; SSE-NEXT: LBB31_22: ## %else20
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: jne LBB31_23
+; SSE-NEXT: LBB31_24: ## %else22
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: jne LBB31_25
+; SSE-NEXT: LBB31_26: ## %else24
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: jne LBB31_27
+; SSE-NEXT: LBB31_28: ## %else26
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: jne LBB31_29
+; SSE-NEXT: LBB31_30: ## %else28
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: js LBB31_31
+; SSE-NEXT: LBB31_32: ## %else30
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: jne LBB31_33
+; SSE-NEXT: LBB31_34: ## %else32
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: jne LBB31_35
+; SSE-NEXT: LBB31_36: ## %else34
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: jne LBB31_37
+; SSE-NEXT: LBB31_38: ## %else36
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: jne LBB31_39
+; SSE-NEXT: LBB31_40: ## %else38
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: jne LBB31_41
+; SSE-NEXT: LBB31_42: ## %else40
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: jne LBB31_43
+; SSE-NEXT: LBB31_44: ## %else42
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: je LBB31_46
+; SSE-NEXT: LBB31_45: ## %cond.store43
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 88(%rdx)
+; SSE-NEXT: LBB31_46: ## %else44
+; SSE-NEXT: movb $1, %al
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: jne LBB31_48
+; SSE-NEXT: ## %bb.47: ## %cond.store45
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 92(%rdx)
+; SSE-NEXT: LBB31_48: ## %else46
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+; SSE-NEXT: LBB31_1: ## %cond.store
+; SSE-NEXT: movl (%rsi), %esi
+; SSE-NEXT: movl %esi, (%rdx)
+; SSE-NEXT: testb $2, %dil
+; SSE-NEXT: je LBB31_4
+; SSE-NEXT: LBB31_3: ## %cond.store1
+; SSE-NEXT: movl %r12d, 4(%rdx)
+; SSE-NEXT: testb $4, %dil
+; SSE-NEXT: je LBB31_6
+; SSE-NEXT: LBB31_5: ## %cond.store3
+; SSE-NEXT: movl %r15d, 8(%rdx)
+; SSE-NEXT: testb $8, %dil
+; SSE-NEXT: je LBB31_8
+; SSE-NEXT: LBB31_7: ## %cond.store5
+; SSE-NEXT: movl %r14d, 12(%rdx)
+; SSE-NEXT: testb $16, %dil
+; SSE-NEXT: je LBB31_10
+; SSE-NEXT: LBB31_9: ## %cond.store7
+; SSE-NEXT: movl %ebp, 16(%rdx)
+; SSE-NEXT: testb $32, %dil
+; SSE-NEXT: je LBB31_12
+; SSE-NEXT: LBB31_11: ## %cond.store9
+; SSE-NEXT: movl %ebx, 20(%rdx)
+; SSE-NEXT: testb $64, %dil
+; SSE-NEXT: je LBB31_14
+; SSE-NEXT: LBB31_13: ## %cond.store11
+; SSE-NEXT: movl %r11d, 24(%rdx)
+; SSE-NEXT: testb %dil, %dil
+; SSE-NEXT: jns LBB31_16
+; SSE-NEXT: LBB31_15: ## %cond.store13
+; SSE-NEXT: movl %r10d, 28(%rdx)
+; SSE-NEXT: testl $256, %edi ## imm = 0x100
+; SSE-NEXT: je LBB31_18
+; SSE-NEXT: LBB31_17: ## %cond.store15
+; SSE-NEXT: movl %r9d, 32(%rdx)
+; SSE-NEXT: testl $512, %edi ## imm = 0x200
+; SSE-NEXT: je LBB31_20
+; SSE-NEXT: LBB31_19: ## %cond.store17
+; SSE-NEXT: movl %r8d, 36(%rdx)
+; SSE-NEXT: testl $1024, %edi ## imm = 0x400
+; SSE-NEXT: je LBB31_22
+; SSE-NEXT: LBB31_21: ## %cond.store19
+; SSE-NEXT: movl %ecx, 40(%rdx)
+; SSE-NEXT: testl $2048, %edi ## imm = 0x800
+; SSE-NEXT: je LBB31_24
+; SSE-NEXT: LBB31_23: ## %cond.store21
+; SSE-NEXT: movl %eax, 44(%rdx)
+; SSE-NEXT: testl $4096, %edi ## imm = 0x1000
+; SSE-NEXT: je LBB31_26
+; SSE-NEXT: LBB31_25: ## %cond.store23
+; SSE-NEXT: movl %r13d, 48(%rdx)
+; SSE-NEXT: testl $8192, %edi ## imm = 0x2000
+; SSE-NEXT: je LBB31_28
+; SSE-NEXT: LBB31_27: ## %cond.store25
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 52(%rdx)
+; SSE-NEXT: testl $16384, %edi ## imm = 0x4000
+; SSE-NEXT: je LBB31_30
+; SSE-NEXT: LBB31_29: ## %cond.store27
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 56(%rdx)
+; SSE-NEXT: testw %di, %di
+; SSE-NEXT: jns LBB31_32
+; SSE-NEXT: LBB31_31: ## %cond.store29
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 60(%rdx)
+; SSE-NEXT: testl $65536, %edi ## imm = 0x10000
+; SSE-NEXT: je LBB31_34
+; SSE-NEXT: LBB31_33: ## %cond.store31
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 64(%rdx)
+; SSE-NEXT: testl $131072, %edi ## imm = 0x20000
+; SSE-NEXT: je LBB31_36
+; SSE-NEXT: LBB31_35: ## %cond.store33
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 68(%rdx)
+; SSE-NEXT: testl $262144, %edi ## imm = 0x40000
+; SSE-NEXT: je LBB31_38
+; SSE-NEXT: LBB31_37: ## %cond.store35
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 72(%rdx)
+; SSE-NEXT: testl $524288, %edi ## imm = 0x80000
+; SSE-NEXT: je LBB31_40
+; SSE-NEXT: LBB31_39: ## %cond.store37
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 76(%rdx)
+; SSE-NEXT: testl $1048576, %edi ## imm = 0x100000
+; SSE-NEXT: je LBB31_42
+; SSE-NEXT: LBB31_41: ## %cond.store39
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 80(%rdx)
+; SSE-NEXT: testl $2097152, %edi ## imm = 0x200000
+; SSE-NEXT: je LBB31_44
+; SSE-NEXT: LBB31_43: ## %cond.store41
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax ## 4-byte Reload
+; SSE-NEXT: movl %eax, 84(%rdx)
+; SSE-NEXT: testl $4194304, %edi ## imm = 0x400000
+; SSE-NEXT: jne LBB31_45
+; SSE-NEXT: jmp LBB31_46
;
; AVX1-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
; AVX1: ## %bb.0:
@@ -6266,7 +6021,7 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
}
; From https://reviews.llvm.org/rGf8d9097168b7#1165311
-define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) #0 {
+define void @undefshuffle(<8 x i1> %i0, ptr %src, ptr %dst) nounwind {
; SSE2-LABEL: undefshuffle:
; SSE2: ## %bb.0: ## %else
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 5da18ee6ad7c..01056a8b2c24 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -2369,6 +2369,31 @@ define void @PR41097() {
ret void
}
+; FIXME - should use INSERTPS
+define <2 x float> @PR86068(<2 x float> %0, <2 x float> %1) {
+; SSE2-LABEL: PR86068:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,1]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: PR86068:
+; SSE42: # %bb.0: # %entry
+; SSE42-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: PR86068:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
+; AVX-NEXT: retq
+entry:
+ %3 = shufflevector <2 x float> %1, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
+ %4 = shufflevector <2 x float> %3, <2 x float> %0, <2 x i32> <i32 0, i32 3>
+ ret <2 x float> %4
+}
+
define void @D107009(ptr %input, ptr %output) {
; SSE-LABEL: D107009:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 1bd427c4a4b0..81dafdffe311 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -489,7 +489,6 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: jb LBB1_4
; CHECK-NEXT: ## %bb.5: ## %bb9
; CHECK-NEXT: ## in Loop: Header=BB1_4 Depth=1
-; CHECK-NEXT: movl %edi, %ebx
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: addl %edi, %edx
diff --git a/llvm/test/CodeGen/X86/pr45378.ll b/llvm/test/CodeGen/X86/pr45378.ll
index 426f4eed662a..6a5770a4b4ad 100644
--- a/llvm/test/CodeGen/X86/pr45378.ll
+++ b/llvm/test/CodeGen/X86/pr45378.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX
declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
@@ -71,28 +71,12 @@ define i1 @parseHeaders2_scalar_or(ptr %ptr) nounwind {
}
define i1 @parseHeaders2_scalar_and(ptr %ptr) nounwind {
-; SSE2-LABEL: parseHeaders2_scalar_and:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: testq %rax, (%rdi)
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: parseHeaders2_scalar_and:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movq (%rdi), %rax
-; SSE41-NEXT: testq %rax, 8(%rdi)
-; SSE41-NEXT: sete %al
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: parseHeaders2_scalar_and:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: testq %rax, 8(%rdi)
-; AVX-NEXT: sete %al
-; AVX-NEXT: retq
+; CHECK-LABEL: parseHeaders2_scalar_and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: testq %rax, 8(%rdi)
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
%vload = load <2 x i64>, ptr %ptr, align 8
%v1 = extractelement <2 x i64> %vload, i32 0
%v2 = extractelement <2 x i64> %vload, i32 1
diff --git a/llvm/test/CodeGen/X86/pr86305.ll b/llvm/test/CodeGen/X86/pr86305.ll
new file mode 100644
index 000000000000..79b42bb2532c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86305.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16 | FileCheck %s
+
+define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind {
+; CHECK-LABEL: add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdx, %rbx
+; CHECK-NEXT: movzwl (%rsi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm0
+; CHECK-NEXT: movzwl (%rdi), %eax
+; CHECK-NEXT: shll $16, %eax
+; CHECK-NEXT: vmovd %eax, %xmm1
+; CHECK-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, (%rbx)
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ %a = load bfloat, ptr %pa
+ %b = load bfloat, ptr %pb
+ %add = fadd bfloat %a, %b
+ store bfloat %add, ptr %pc
+ ret void
+}
+
+define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind {
+; CHECK-LABEL: fptrunc_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rbp
+; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %r14
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $72, %rsp
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebx
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %ebp
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r14d
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vpextrw $0, %xmm0, %r15d
+; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq __truncsfbf2@PLT
+; CHECK-NEXT: vpextrw $0, %xmm0, %eax
+; CHECK-NEXT: vmovd %r15d, %xmm0
+; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $2, %r14d, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $3, %ebp, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $4, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $5, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $6, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: vpinsrw $7, %ebx, %xmm0, %xmm0
+; CHECK-NEXT: addq $72, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: popq %r14
+; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
+; CHECK-NEXT: retq
+ %b = fptrunc <4 x float> %a to <4 x bfloat>
+ ret <4 x bfloat> %b
+}
diff --git a/llvm/test/CodeGen/X86/pr86880.mir b/llvm/test/CodeGen/X86/pr86880.mir
new file mode 100644
index 000000000000..92ebf9a265bb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr86880.mir
@@ -0,0 +1,21 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=x86_64-- -run-pass=machine-cp -o - %s | FileCheck %s
+
+---
+name: foo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $eax
+
+ ; CHECK-LABEL: name: foo
+ ; CHECK: liveins: $eax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ ; CHECK-NEXT: renamable $ecx = COPY killed renamable $r15d
+ ; CHECK-NEXT: NOOP implicit $ecx
+ INLINEASM &"", 0 /* attdialect */, 10 /* regdef */, implicit-def dead $eax, 2686986 /* regdef:GR32_NOREX2 */, def renamable $r15d, 10 /* regdef */, implicit-def dead $ecx, 10 /* regdef */, implicit-def dead $edx, 2147483657 /* reguse tiedto:$0 */, $eax(tied-def 3)
+ renamable $ecx = COPY killed renamable $r15d
+ NOOP implicit $ecx
+
+...
diff --git a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
index 13b5a541fa22..d09bcd6a6b40 100644
--- a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
+++ b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
@@ -103,6 +103,7 @@ registers:
- { id: 82, class: gr32 }
frameInfo:
maxAlignment: 4
+ adjustsStack: true
hasCalls: true
fixedStack:
- { id: 0, size: 4, alignment: 4, stack-id: default, isImmutable: true }
diff --git a/llvm/test/CodeGen/X86/sar_fold.ll b/llvm/test/CodeGen/X86/sar_fold.ll
index 21655e19440a..0f1396954b03 100644
--- a/llvm/test/CodeGen/X86/sar_fold.ll
+++ b/llvm/test/CodeGen/X86/sar_fold.ll
@@ -44,3 +44,44 @@ define i32 @shl24sar25(i32 %a) #0 {
%2 = ashr exact i32 %1, 25
ret i32 %2
}
+
+define void @shl144sar48(ptr %p) #0 {
+; CHECK-LABEL: shl144sar48:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: sarl $31, %edx
+; CHECK-NEXT: shldl $2, %ecx, %edx
+; CHECK-NEXT: shll $2, %ecx
+; CHECK-NEXT: movl %ecx, 12(%eax)
+; CHECK-NEXT: movl %edx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 46
+ store i160 %2, ptr %p
+ ret void
+}
+
+define void @shl144sar2(ptr %p) #0 {
+; CHECK-LABEL: shl144sar2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movswl (%eax), %ecx
+; CHECK-NEXT: shll $14, %ecx
+; CHECK-NEXT: movl %ecx, 16(%eax)
+; CHECK-NEXT: movl $0, 8(%eax)
+; CHECK-NEXT: movl $0, 12(%eax)
+; CHECK-NEXT: movl $0, 4(%eax)
+; CHECK-NEXT: movl $0, (%eax)
+; CHECK-NEXT: retl
+ %a = load i160, ptr %p
+ %1 = shl i160 %a, 144
+ %2 = ashr exact i160 %1, 2
+ store i160 %2, ptr %p
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
index 2187c653f76c..97c3c2040b29 100644
--- a/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
+++ b/llvm/test/CodeGen/X86/setcc-non-simple-type.ll
@@ -60,36 +60,30 @@ define void @failing(ptr %0, ptr %1) nounwind {
; CHECK-NEXT: .LBB0_2: # %vector.body
; CHECK-NEXT: # Parent Loop BB0_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movdqu 1024(%rdx,%rdi), %xmm5
-; CHECK-NEXT: movdqu 1040(%rdx,%rdi), %xmm6
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r8
-; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
-; CHECK-NEXT: movq %xmm5, %r9
-; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r10
-; CHECK-NEXT: sbbq %r9, %r10
-; CHECK-NEXT: setge %r9b
-; CHECK-NEXT: movzbl %r9b, %r9d
-; CHECK-NEXT: andl $1, %r9d
-; CHECK-NEXT: negq %r9
-; CHECK-NEXT: movq %r9, %xmm5
; CHECK-NEXT: cmpq 1024(%rdx,%rdi), %rsi
-; CHECK-NEXT: movq %rcx, %r9
-; CHECK-NEXT: sbbq %r8, %r9
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1032(%rdx,%rdi), %r8
+; CHECK-NEXT: setge %r8b
+; CHECK-NEXT: movzbl %r8b, %r8d
+; CHECK-NEXT: andl $1, %r8d
+; CHECK-NEXT: negq %r8
+; CHECK-NEXT: movq %r8, %xmm5
+; CHECK-NEXT: cmpq 1040(%rdx,%rdi), %rsi
+; CHECK-NEXT: movq %rcx, %r8
+; CHECK-NEXT: sbbq 1048(%rdx,%rdi), %r8
; CHECK-NEXT: setge %r8b
; CHECK-NEXT: movzbl %r8b, %r8d
; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: negq %r8
; CHECK-NEXT: movq %r8, %xmm6
-; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm5[0]
-; CHECK-NEXT: movdqa %xmm1, %xmm5
-; CHECK-NEXT: psllq %xmm4, %xmm5
+; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; CHECK-NEXT: movdqa %xmm1, %xmm6
+; CHECK-NEXT: psllq %xmm4, %xmm6
; CHECK-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
; CHECK-NEXT: movdqa %xmm1, %xmm8
; CHECK-NEXT: psllq %xmm7, %xmm8
-; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1]
-; CHECK-NEXT: andpd %xmm6, %xmm8
+; CHECK-NEXT: movsd {{.*#+}} xmm8 = xmm6[0],xmm8[1]
+; CHECK-NEXT: andpd %xmm5, %xmm8
; CHECK-NEXT: orpd %xmm8, %xmm3
; CHECK-NEXT: paddq %xmm2, %xmm4
; CHECK-NEXT: addq $32, %rdi
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index 2610f4322c8e..62051d170994 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1983,91 +1983,75 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movzwl 16(%eax), %edx
; X86-SSE-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-SSE-NEXT: movdqa (%eax), %xmm3
-; X86-SSE-NEXT: movdqa (%ecx), %xmm0
-; X86-SSE-NEXT: movdqa 16(%ecx), %xmm1
-; X86-SSE-NEXT: pxor %xmm5, %xmm5
-; X86-SSE-NEXT: movdqa %xmm3, %xmm2
-; X86-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X86-SSE-NEXT: pextrw $4, %xmm3, %edi
-; X86-SSE-NEXT: pextrw $0, %xmm3, %ebp
-; X86-SSE-NEXT: pextrw $1, %xmm3, %esi
-; X86-SSE-NEXT: pextrw $3, %xmm3, %ebx
-; X86-SSE-NEXT: movdqa %xmm3, %xmm4
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm3, %ecx
+; X86-SSE-NEXT: movdqa (%eax), %xmm2
+; X86-SSE-NEXT: pxor %xmm1, %xmm1
+; X86-SSE-NEXT: movdqa %xmm2, %xmm0
+; X86-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X86-SSE-NEXT: pextrw $4, %xmm2, %esi
+; X86-SSE-NEXT: pextrw $1, %xmm2, %edi
+; X86-SSE-NEXT: pextrw $0, %xmm2, %ebx
+; X86-SSE-NEXT: pextrw $3, %xmm2, %ebp
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 28(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm3, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 24(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm5, %ecx
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm5
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; X86-SSE-NEXT: divl 16(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm1
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT: movd %xmm0, %eax
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl 20(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; X86-SSE-NEXT: movl %edi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT: divl 16(%edi)
+; X86-SSE-NEXT: divl 4(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm1, %ecx
+; X86-SSE-NEXT: movl %ebx, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; X86-SSE-NEXT: divl (%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X86-SSE-NEXT: movl %ebp, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl (%edi)
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %esi, %eax
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X86-SSE-NEXT: movd %xmm2, %ecx
-; X86-SSE-NEXT: movl %ebx, %eax
+; X86-SSE-NEXT: divl 12(%ecx)
+; X86-SSE-NEXT: movd %edx, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X86-SSE-NEXT: movd %xmm2, %eax
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
+; X86-SSE-NEXT: divl 8(%ecx)
; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm4, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X86-SSE-NEXT: movd %xmm0, %ecx
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %ecx
-; X86-SSE-NEXT: movd %edx, %xmm0
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; X86-SSE-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl 32(%edi)
+; X86-SSE-NEXT: divl 32(%ecx)
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: pmuludq %xmm2, %xmm4
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm0
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X86-SSE-NEXT: pmuludq %xmm2, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT: pmuludq %xmm2, %xmm3
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X86-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X86-SSE-NEXT: movl %eax, (%eax)
-; X86-SSE-NEXT: movdqa %xmm3, (%eax)
+; X86-SSE-NEXT: movdqa %xmm1, (%eax)
; X86-SSE-NEXT: movdqa %xmm0, (%eax)
; X86-SSE-NEXT: addl $4, %esp
; X86-SSE-NEXT: popl %esi
@@ -2204,91 +2188,76 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-LABEL: PR34947:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movzwl 16(%rdi), %ecx
-; X64-SSE-NEXT: movdqa (%rdi), %xmm3
-; X64-SSE-NEXT: movdqa (%rsi), %xmm0
-; X64-SSE-NEXT: movdqa 16(%rsi), %xmm1
-; X64-SSE-NEXT: pxor %xmm5, %xmm5
-; X64-SSE-NEXT: movdqa %xmm3, %xmm2
-; X64-SSE-NEXT: pextrw $7, %xmm3, %eax
-; X64-SSE-NEXT: pextrw $4, %xmm3, %r8d
-; X64-SSE-NEXT: pextrw $0, %xmm3, %r10d
-; X64-SSE-NEXT: pextrw $1, %xmm3, %edi
-; X64-SSE-NEXT: pextrw $3, %xmm3, %r9d
-; X64-SSE-NEXT: movdqa %xmm3, %xmm4
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm3, %r11d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm5, %r11d
+; X64-SSE-NEXT: movdqa (%rdi), %xmm2
+; X64-SSE-NEXT: pxor %xmm1, %xmm1
+; X64-SSE-NEXT: movdqa %xmm2, %xmm0
+; X64-SSE-NEXT: pextrw $7, %xmm2, %eax
+; X64-SSE-NEXT: pextrw $4, %xmm2, %edi
+; X64-SSE-NEXT: pextrw $1, %xmm2, %r8d
+; X64-SSE-NEXT: pextrw $0, %xmm2, %r9d
+; X64-SSE-NEXT: pextrw $3, %xmm2, %r10d
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r11d
-; X64-SSE-NEXT: movd %edx, %xmm5
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
-; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: divl 28(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm3, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl 16(%rsi)
+; X64-SSE-NEXT: divl 24(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm3
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm1, %r8d
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm1
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; X64-SSE-NEXT: movl %r10d, %eax
+; X64-SSE-NEXT: movl %edi, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: divl 16(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X64-SSE-NEXT: movd %xmm2, %r8d
-; X64-SSE-NEXT: movl %edi, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT: movd %xmm0, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %r8d
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; X64-SSE-NEXT: movd %xmm2, %edi
+; X64-SSE-NEXT: divl 20(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X64-SSE-NEXT: movl %r8d, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 4(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm0
; X64-SSE-NEXT: movl %r9d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
-; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm4, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X64-SSE-NEXT: movd %xmm0, %edi
+; X64-SSE-NEXT: divl (%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm3
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: movl %r10d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %edi
+; X64-SSE-NEXT: divl 12(%rsi)
; X64-SSE-NEXT: movd %edx, %xmm0
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X64-SSE-NEXT: movd %xmm2, %eax
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl 8(%rsi)
+; X64-SSE-NEXT: movd %edx, %xmm2
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; X64-SSE-NEXT: movl %ecx, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl 32(%rsi)
; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm3
; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-SSE-NEXT: pmuludq %xmm0, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X64-SSE-NEXT: movl %eax, (%rax)
-; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: movdqa %xmm1, (%rax)
+; X64-SSE-NEXT: movdqa %xmm3, (%rax)
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: PR34947:
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index a277f9f862ab..f4f3ae4f55f2 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -1,6 +1,7 @@
; RUN: llc -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-I386 %s
; RUN: llc -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-X64 %s
; RUN: llc -code-model=kernel -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck --check-prefix=LINUX-KERNEL-X64 %s
+; RUN: llc -code-model=kernel -mtriple=x86_64-unknown-freebsd < %s -o - | FileCheck --check-prefix=FREEBSD-KERNEL-X64 %s
; RUN: llc -mtriple=x86_64-apple-darwin < %s -o - | FileCheck --check-prefix=DARWIN-X64 %s
; RUN: llc -mtriple=amd64-pc-openbsd < %s -o - | FileCheck --check-prefix=OPENBSD-AMD64 %s
; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-I386 %s
@@ -75,6 +76,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1b:
+; FREEBSD-KERNEL-X64-NOT: mov{{l|q}} __stack_chk_guard@GOTPCREL
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1b:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
@@ -118,6 +123,10 @@ entry:
; LINUX-X64: mov{{l|q}} %fs:
; LINUX-X64: callq __stack_chk_fail
+; FREEBSD-KERNEL-X64-LABEL: test1c:
+; FREEBSD-KERNEL-X64: mov{{l|q}} __stack_chk_guard(%rip)
+; FREEBSD-KERNEL-X64: callq __stack_chk_fail
+
; LINUX-KERNEL-X64-LABEL: test1c:
; LINUX-KERNEL-X64: mov{{l|q}} %gs:
; LINUX-KERNEL-X64: callq __stack_chk_fail
diff --git a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
index 02c931067300..8bac14018a7d 100644
--- a/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fastregalloc.mir
@@ -6,6 +6,8 @@
---
name: test_relocate
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
@@ -25,6 +27,8 @@ body: |
---
name: test_relocate_multi_regmasks
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
body: |
bb.0.entry:
liveins: $rdi
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
index 11968f17c70a..5f05270729fd 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
@@ -231,7 +231,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
index aae2f3870138..cf9128260f19 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-hoist-copies.mir
@@ -398,7 +398,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 1
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
index 87f5f0f96c50..fcebc69d9b2e 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-inline-spiller.mir
@@ -175,7 +175,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
index 49253968fcca..8bb39a03f7e3 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
@@ -226,7 +226,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
index 858ff3f1888b..da651039ce21 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra.mir
@@ -172,7 +172,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 4
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
index e24d5e8af1f5..d40a9a06d162 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
@@ -114,7 +114,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 8
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/CodeGen/X86/tls-loads-control3.ll b/llvm/test/CodeGen/X86/tls-loads-control3.ll
index 82daac5a9bae..4e521b1c696a 100644
--- a/llvm/test/CodeGen/X86/tls-loads-control3.ll
+++ b/llvm/test/CodeGen/X86/tls-loads-control3.ll
@@ -183,7 +183,6 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST0-NEXT: # %bb.1: # %while.body.preheader
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@TLSLD(%rip), %rdi
; HOIST0-NEXT: callq __tls_get_addr@PLT
-; HOIST0-NEXT: movq %rax, %rcx
; HOIST0-NEXT: leaq _ZZ2f2iE2st.0@DTPOFF(%rax), %r15
; HOIST0-NEXT: leaq _ZZ2f2iE2st.1@DTPOFF(%rax), %r12
; HOIST0-NEXT: .p2align 4, 0x90
@@ -245,9 +244,7 @@ define i32 @_Z2f2i(i32 %c) local_unnamed_addr #0 {
; HOIST2-NEXT: movq %rax, %r14
; HOIST2-NEXT: addb %bpl, _ZZ2f2iE2st.0@DTPOFF(%rax)
; HOIST2-NEXT: callq _Z5gfuncv@PLT
-; HOIST2-NEXT: movl %eax, %ecx
-; HOIST2-NEXT: movq %r14, %rax
-; HOIST2-NEXT: addl %ecx, _ZZ2f2iE2st.1@DTPOFF(%r14)
+; HOIST2-NEXT: addl %eax, _ZZ2f2iE2st.1@DTPOFF(%r14)
; HOIST2-NEXT: decl %ebx
; HOIST2-NEXT: jne .LBB1_2
; HOIST2-NEXT: .LBB1_3: # %while.end
diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll
index 99a3821bb9ba..f2240a946844 100644
--- a/llvm/test/CodeGen/X86/var-permute-128.ll
+++ b/llvm/test/CodeGen/X86/var-permute-128.ll
@@ -1101,17 +1101,13 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in
define void @indices_convert() {
; SSE3-LABEL: indices_convert:
; SSE3: # %bb.0: # %bb
-; SSE3-NEXT: movdqa (%rax), %xmm0
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE3-NEXT: movd %xmm1, %eax
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps (%rax), %xmm0
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movl (%rax), %eax
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE3-NEXT: andl $3, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSE3-NEXT: movd %xmm1, %ecx
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSE3-NEXT: andl $3, %ecx
; SSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
@@ -1120,17 +1116,13 @@ define void @indices_convert() {
;
; SSSE3-LABEL: indices_convert:
; SSSE3: # %bb.0: # %bb
-; SSSE3-NEXT: movdqa (%rax), %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps (%rax), %xmm0
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movl (%rax), %eax
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: andl $3, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
-; SSSE3-NEXT: andl $3, %ecx
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSSE3-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index 7bbcdee9a680..e26de4be7066 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -2911,23 +2911,12 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
;
define <2 x double> @sitofp_load_2i64_to_2f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_2i64_to_2f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_2i64_to_2f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_2i64_to_2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_2i64_to_2f64:
; VEX: # %bb.0:
@@ -3093,35 +3082,16 @@ define <2 x double> @sitofp_load_2i8_to_2f64(ptr%a) {
}
define <4 x double> @sitofp_load_4i64_to_4f64(ptr%a) {
-; SSE2-LABEL: sitofp_load_4i64_to_4f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm2
-; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sd %rax, %xmm1
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2sd %rax, %xmm2
-; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sitofp_load_4i64_to_4f64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: cvtsi2sdq 8(%rdi), %xmm1
-; SSE41-NEXT: cvtsi2sdq (%rdi), %xmm0
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE41-NEXT: cvtsi2sdq 24(%rdi), %xmm2
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: cvtsi2sdq 16(%rdi), %xmm1
-; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE41-NEXT: retq
+; SSE-LABEL: sitofp_load_4i64_to_4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: cvtsi2sdq 8(%rdi), %xmm1
+; SSE-NEXT: cvtsi2sdq (%rdi), %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: cvtsi2sdq 24(%rdi), %xmm2
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: cvtsi2sdq 16(%rdi), %xmm1
+; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE-NEXT: retq
;
; VEX-LABEL: sitofp_load_4i64_to_4f64:
; VEX: # %bb.0:
@@ -3865,22 +3835,14 @@ define <4 x double> @uitofp_load_4i8_to_4f64(ptr%a) {
define <4 x float> @sitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: sitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_4i64_to_4f32:
@@ -4015,39 +3977,24 @@ define <4 x float> @sitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @sitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: sitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movdqa 32(%rdi), %xmm2
-; SSE2-NEXT: movdqa 48(%rdi), %xmm3
-; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 24(%rdi), %xmm0
+; SSE2-NEXT: cvtsi2ssq 16(%rdi), %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: cvtsi2ssq 8(%rdi), %xmm2
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: xorps %xmm4, %xmm4
-; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 56(%rdi), %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ssq 48(%rdi), %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: cvtsi2ssq 40(%rdi), %xmm3
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: cvtsi2ssq 32(%rdi), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: sitofp_load_8i64_to_8f32:
@@ -4256,70 +4203,64 @@ define <8 x float> @sitofp_load_8i8_to_8f32(ptr%a) {
define <4 x float> @uitofp_load_4i64_to_4f32(ptr%a) {
; SSE2-LABEL: uitofp_load_4i64_to_4f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_1
; SSE2-NEXT: # %bb.2:
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_3
; SSE2-NEXT: .LBB83_1:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB83_6
; SSE2-NEXT: .LBB83_4:
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: .LBB83_6:
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: js .LBB83_7
+; SSE2-NEXT: # %bb.8:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
+; SSE2-NEXT: jmp .LBB83_9
+; SSE2-NEXT: .LBB83_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
; SSE2-NEXT: cvtsi2ss %rcx, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
-; SSE2-NEXT: .LBB83_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
-; SSE2-NEXT: js .LBB83_7
-; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB83_9
-; SSE2-NEXT: .LBB83_7:
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shrq %rcx
-; SSE2-NEXT: andl $1, %eax
-; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_9:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB83_10
; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: jmp .LBB83_12
; SSE2-NEXT: .LBB83_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm0, %xmm0
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
+; SSE2-NEXT: addss %xmm0, %xmm0
; SSE2-NEXT: .LBB83_12:
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -4591,8 +4532,7 @@ define <4 x float> @uitofp_load_4i8_to_4f32(ptr%a) {
define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-LABEL: uitofp_load_8i64_to_8f32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa 16(%rdi), %xmm0
-; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: movq 24(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_1
; SSE2-NEXT: # %bb.2:
@@ -4606,127 +4546,114 @@ define <8 x float> @uitofp_load_8i64_to_8f32(ptr%a) {
; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_3:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 16(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_6
; SSE2-NEXT: .LBB87_4:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm1
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_6:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: movq (%rdi), %rax
+; SSE2-NEXT: movq 8(%rdi), %rcx
+; SSE2-NEXT: testq %rcx, %rcx
; SSE2-NEXT: js .LBB87_7
; SSE2-NEXT: # %bb.8:
-; SSE2-NEXT: xorps %xmm0, %xmm0
-; SSE2-NEXT: cvtsi2ss %rax, %xmm0
-; SSE2-NEXT: jmp .LBB87_9
-; SSE2-NEXT: .LBB87_7:
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: jns .LBB87_11
+; SSE2-NEXT: .LBB87_10:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: addss %xmm0, %xmm0
-; SSE2-NEXT: .LBB87_9:
-; SSE2-NEXT: movq 48(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
-; SSE2-NEXT: js .LBB87_10
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
; SSE2-NEXT: jmp .LBB87_12
-; SSE2-NEXT: .LBB87_10:
+; SSE2-NEXT: .LBB87_7:
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: shrq %rdx
; SSE2-NEXT: andl $1, %ecx
; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm4
-; SSE2-NEXT: addss %xmm4, %xmm4
+; SSE2-NEXT: cvtsi2ss %rcx, %xmm3
+; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: testq %rax, %rax
+; SSE2-NEXT: js .LBB87_10
+; SSE2-NEXT: .LBB87_11:
+; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: .LBB87_12:
-; SSE2-NEXT: movdqa 48(%rdi), %xmm5
+; SSE2-NEXT: movq 56(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_13
; SSE2-NEXT: # %bb.14:
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
; SSE2-NEXT: jmp .LBB87_15
; SSE2-NEXT: .LBB87_13:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: cvtsi2ss %rax, %xmm3
-; SSE2-NEXT: addss %xmm3, %xmm3
+; SSE2-NEXT: cvtsi2ss %rax, %xmm5
+; SSE2-NEXT: addss %xmm5, %xmm5
; SSE2-NEXT: .LBB87_15:
-; SSE2-NEXT: movq 32(%rdi), %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm5, %rcx
-; SSE2-NEXT: testq %rcx, %rcx
+; SSE2-NEXT: movq 48(%rdi), %rax
+; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_16
; SSE2-NEXT: # %bb.17:
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
; SSE2-NEXT: jmp .LBB87_18
; SSE2-NEXT: .LBB87_16:
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: shrq %rdx
-; SSE2-NEXT: andl $1, %ecx
-; SSE2-NEXT: orq %rdx, %rcx
-; SSE2-NEXT: xorps %xmm5, %xmm5
-; SSE2-NEXT: cvtsi2ss %rcx, %xmm5
-; SSE2-NEXT: addss %xmm5, %xmm5
+; SSE2-NEXT: movq %rax, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: orq %rcx, %rax
+; SSE2-NEXT: cvtsi2ss %rax, %xmm4
+; SSE2-NEXT: addss %xmm4, %xmm4
; SSE2-NEXT: .LBB87_18:
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa 32(%rdi), %xmm4
+; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: movq 40(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_19
; SSE2-NEXT: # %bb.20:
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
; SSE2-NEXT: jmp .LBB87_21
; SSE2-NEXT: .LBB87_19:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm1, %xmm1
-; SSE2-NEXT: cvtsi2ss %rax, %xmm1
-; SSE2-NEXT: addss %xmm1, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: addss %xmm2, %xmm2
; SSE2-NEXT: .LBB87_21:
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: movq 32(%rdi), %rax
; SSE2-NEXT: testq %rax, %rax
; SSE2-NEXT: js .LBB87_22
; SSE2-NEXT: # %bb.23:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
; SSE2-NEXT: jmp .LBB87_24
; SSE2-NEXT: .LBB87_22:
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: shrq %rcx
; SSE2-NEXT: andl $1, %eax
; SSE2-NEXT: orq %rcx, %rax
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: cvtsi2ss %rax, %xmm2
-; SSE2-NEXT: addss %xmm2, %xmm2
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: cvtsi2ss %rax, %xmm1
+; SSE2-NEXT: addss %xmm1, %xmm1
; SSE2-NEXT: .LBB87_24:
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: uitofp_load_8i64_to_8f32:
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index ba21af231985..563cf0165013 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -4989,3 +4989,257 @@ define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret <4 x i32> %ext
}
+
+define <4 x i32> @fptosi_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX-LABEL: fptosi_4f16_to_4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: subq $72, %rsp
+; AVX-NEXT: vmovdqa %xmm0, %xmm1
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT: callq __extendhfsf2@PLT
+; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $72, %rsp
+; AVX-NEXT: retq
+;
+; F16C-LABEL: fptosi_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512-LABEL: fptosi_4f16_to_4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %cvt = fptosi <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
+
+define <4 x i32> @fptoui_2f16_to_4i32(<2 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_2f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $40, %rsp
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX1-NEXT: addq $40, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_2f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_2f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vpsrld $16, %xmm0, %xmm1
+; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
+; F16C-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
+; F16C-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm1
+; F16C-NEXT: vpsrad $31, %xmm1, %xmm2
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
+; F16C-NEXT: vpand %xmm2, %xmm0, %xmm0
+; F16C-NEXT: vpor %xmm0, %xmm1, %xmm0
+; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_2f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_2f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-FASTLANE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512-FASTLANE-NEXT: vcvttps2udq %xmm0, %xmm0
+; AVX512-FASTLANE-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <2 x half> %a to <2 x i32>
+ %ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %ext
+}
+
+define <4 x i32> @fptoui_4f16_to_4i32(<4 x half> %a) nounwind {
+; AVX1-LABEL: fptoui_4f16_to_4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: subq $72, %rsp
+; AVX1-NEXT: vmovdqa %xmm0, %xmm1
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT: callq __extendhfsf2@PLT
+; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX1-NEXT: addq $72, %rsp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: fptoui_4f16_to_4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: subq $72, %rsp
+; AVX2-NEXT: vmovdqa %xmm0, %xmm1
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vpsrlq $48, %xmm1, %xmm0
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-NEXT: callq __extendhfsf2@PLT
+; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
+; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX2-NEXT: addq $72, %rsp
+; AVX2-NEXT: retq
+;
+; F16C-LABEL: fptoui_4f16_to_4i32:
+; F16C: # %bb.0:
+; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm1
+; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; F16C-NEXT: vcvttps2dq %ymm0, %ymm0
+; F16C-NEXT: vorps %ymm0, %ymm1, %ymm0
+; F16C-NEXT: vblendvps %ymm1, %ymm0, %ymm1, %ymm0
+; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; F16C-NEXT: vzeroupper
+; F16C-NEXT: retq
+;
+; AVX512F-LABEL: fptoui_4f16_to_4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512-FASTLANE-LABEL: fptoui_4f16_to_4i32:
+; AVX512-FASTLANE: # %bb.0:
+; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %ymm0
+; AVX512-FASTLANE-NEXT: vcvttps2udq %ymm0, %ymm0
+; AVX512-FASTLANE-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-FASTLANE-NEXT: vzeroupper
+; AVX512-FASTLANE-NEXT: retq
+ %cvt = fptoui <4 x half> %a to <4 x i32>
+ ret <4 x i32> %cvt
+}
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 88144e7880e3..de34e48c01d7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -3004,428 +3004,412 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-LABEL: store_i8_stride6_vf32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-NEXT: kmovd %ecx, %k3
-; AVX512BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512BW-NEXT: kmovd %ecx, %k2
-; AVX512BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512BW-NEXT: kmovd %ecx, %k1
-; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512BW-NEXT: kmovd %ecx, %k2
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm4
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm0
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm9
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm9, %xmm7
-; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm10
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm10, %xmm6
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm6 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm11
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm11, %xmm8
-; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm12
-; AVX512DQ-BW-NEXT: vpshufb %xmm6, %xmm12, %xmm13
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm13[0],xmm8[0],xmm13[1],xmm8[1],xmm13[2],xmm8[2],xmm13[3],xmm8[3],xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm9
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %ymm10
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm11
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %ymm12
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %ymm7
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %ymm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[16],ymm10[16],ymm9[17],ymm10[17],ymm9[18],ymm10[18],ymm9[19],ymm10[19],ymm9[20],ymm10[20],ymm9[21],ymm10[21],ymm9[22],ymm10[22],ymm9[23],ymm10[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-NEXT: vpermw %zmm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqa (%rcx), %xmm3
+; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %xmm4
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm6 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %ymm5, %ymm6, %ymm5
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vprold $16, %ymm6, %ymm6
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm7, %ymm8 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm13 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm13, %ymm8
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-NEXT: vprold $16, %xmm13, %xmm13
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-NEXT: movw $9362, %cx # imm = 0x2492
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm0 {%k1}
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[16],ymm8[16],ymm7[17],ymm8[17],ymm7[18],ymm8[18],ymm7[19],ymm8[19],ymm7[20],ymm8[20],ymm7[21],ymm8[21],ymm7[22],ymm8[22],ymm7[23],ymm8[23]
+; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm5
+; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm6
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm14 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm13, %ymm8 {%k2}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm13, %xmm14
-; AVX512DQ-BW-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
-; AVX512DQ-BW-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm14, %zmm7 {%k3}
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm16 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm16, %ymm14
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vprold $16, %ymm16, %ymm16
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm16, %ymm14 {%k2}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
+; AVX512DQ-BW-NEXT: vpermw %zmm13, %zmm14, %zmm0 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm10, %ymm14
+; AVX512DQ-BW-NEXT: vpshufb %ymm13, %ymm9, %ymm13
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[4],ymm14[4],ymm13[5],ymm14[5],ymm13[6],ymm14[6],ymm13[7],ymm14[7],ymm13[16],ymm14[16],ymm13[17],ymm14[17],ymm13[18],ymm14[18],ymm13[19],ymm14[19],ymm13[20],ymm14[20],ymm13[21],ymm14[21],ymm13[22],ymm14[22],ymm13[23],ymm14[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm14 = ymm11[8],ymm12[8],ymm11[9],ymm12[9],ymm11[10],ymm12[10],ymm11[11],ymm12[11],ymm11[12],ymm12[12],ymm11[13],ymm12[13],ymm11[14],ymm12[14],ymm11[15],ymm12[15],ymm11[24],ymm12[24],ymm11[25],ymm12[25],ymm11[26],ymm12[26],ymm11[27],ymm12[27],ymm11[28],ymm12[28],ymm11[29],ymm12[29],ymm11[30],ymm12[30],ymm11[31],ymm12[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm15 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm12, %ymm12
+; AVX512DQ-BW-NEXT: vpshufb %ymm14, %ymm11, %ymm11
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[4],ymm12[4],ymm11[5],ymm12[5],ymm11[6],ymm12[6],ymm11[7],ymm12[7],ymm11[16],ymm12[16],ymm11[17],ymm12[17],ymm11[18],ymm12[18],ymm11[19],ymm12[19],ymm11[20],ymm12[20],ymm11[21],ymm12[21],ymm11[22],ymm12[22],ymm11[23],ymm12[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15],ymm9[24],ymm10[24],ymm9[25],ymm10[25],ymm9[26],ymm10[26],ymm9[27],ymm10[27],ymm9[28],ymm10[28],ymm9[29],ymm10[29],ymm9[30],ymm10[30],ymm9[31],ymm10[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
; AVX512DQ-BW-NEXT: vpermw %ymm9, %ymm10, %ymm9
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm10 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm10, %ymm9 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
-; AVX512DQ-BW-NEXT: movl $1227133513, %ecx # imm = 0x49249249
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512DQ-BW-NEXT: movl $1227114788, %ecx # imm = 0x49244924
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm4, %ymm4
-; AVX512DQ-BW-NEXT: vpshufb %ymm6, %ymm2, %ymm2
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm13, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm8, %ymm11
+; AVX512DQ-BW-NEXT: vpshufb %ymm10, %ymm7, %ymm12
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} ymm11 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[16],ymm11[16],ymm12[17],ymm11[17],ymm12[18],ymm11[18],ymm12[19],ymm11[19],ymm12[20],ymm11[20],ymm12[21],ymm11[21],ymm12[22],ymm11[22],ymm12[23],ymm11[23]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15],ymm7[24],ymm8[24],ymm7[25],ymm8[25],ymm7[26],ymm8[26],ymm7[27],ymm8[27],ymm7[28],ymm8[28],ymm7[29],ymm8[29],ymm7[30],ymm8[30],ymm7[31],ymm8[31]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm8 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm11, %zmm7
; AVX512DQ-BW-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
-; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 64(%rax)
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm7, %zmm9 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm3, %xmm7
+; AVX512DQ-BW-NEXT: vpshufb %xmm14, %xmm4, %xmm8
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-NEXT: vpermw %ymm8, %ymm11, %ymm8
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512DQ-BW-NEXT: vpbroadcastq {{.*#+}} xmm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm1, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm8, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512DQ-BW-NEXT: vprold $16, %xmm2, %xmm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512DQ-BW-NEXT: movl $1227105426, %ecx # imm = 0x49242492
+; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k2}
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm5, %xmm1
+; AVX512DQ-BW-NEXT: vpshufb %xmm10, %xmm6, %xmm2
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512DQ-BW-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512DQ-BW-NEXT: vpmovsxbw {{.*#+}} ymm3 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-NEXT: vpermw %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, 128(%rax)
+; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i8_stride6_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm3
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm5
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm2
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm4
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %ymm3
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm5
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %ymm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %ymm1
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm9
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm10
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm4 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[16],ymm7[16],ymm4[17],ymm7[17],ymm4[18],ymm7[18],ymm4[19],ymm7[19],ymm4[20],ymm7[20],ymm4[21],ymm7[21],ymm4[22],ymm7[22],ymm4[23],ymm7[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm6[8],ymm5[9],ymm6[9],ymm5[10],ymm6[10],ymm5[11],ymm6[11],ymm5[12],ymm6[12],ymm5[13],ymm6[13],ymm5[14],ymm6[14],ymm5[15],ymm6[15],ymm5[24],ymm6[24],ymm5[25],ymm6[25],ymm5[26],ymm6[26],ymm5[27],ymm6[27],ymm5[28],ymm6[28],ymm5[29],ymm6[29],ymm5[30],ymm6[30],ymm5[31],ymm6[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm7
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm4
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm5, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[4],ymm4[4],ymm9[5],ymm4[5],ymm9[6],ymm4[6],ymm9[7],ymm4[7],ymm9[16],ymm4[16],ymm9[17],ymm4[17],ymm9[18],ymm4[18],ymm9[19],ymm4[19],ymm9[20],ymm4[20],ymm9[21],ymm4[21],ymm9[22],ymm4[22],ymm9[23],ymm4[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm10, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm4, %zmm4
+; AVX512DQ-BW-FCP-NEXT: movl $1227114788, %r10d # imm = 0x49244924
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm9
+; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm0, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[16],ymm9[16],ymm10[17],ymm9[17],ymm10[18],ymm9[18],ymm10[19],ymm9[19],ymm10[20],ymm9[20],ymm10[21],ymm9[21],ymm10[22],ymm9[22],ymm10[23],ymm9[23]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %r10d # imm = 0x92492492
+; AVX512DQ-BW-FCP-NEXT: kmovd %r10d, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm9, %zmm4 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rsi), %xmm9
+; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm9, %xmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %xmm12
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm8 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm7, %ymm8, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm7 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512DQ-BW-FCP-NEXT: movw $9362, %cx # imm = 0x2492
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm6, %ymm7, %ymm8 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} xmm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm11, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm6, %xmm12, %xmm6
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: movw $18724, %cx # imm = 0x4924
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rcx), %xmm11
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %xmm13
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm11, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm15 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm15, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm14, %zmm8
+; AVX512DQ-BW-FCP-NEXT: movl $1227105426, %ecx # imm = 0x49242492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm6, %ymm13 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm0, %zmm6
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm8 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
-; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm13
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm14
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm10, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r9), %xmm10
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm10, %xmm14
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%r8), %xmm15
-; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm16
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm16[0],xmm14[0],xmm16[1],xmm14[1],xmm16[2],xmm14[2],xmm16[3],xmm14[3],xmm16[4],xmm14[4],xmm16[5],xmm14[5],xmm16[6],xmm14[6],xmm16[7],xmm14[7]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm16 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3],xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm16, %zmm14
+; AVX512DQ-BW-FCP-NEXT: vpshufb %xmm7, %xmm15, %xmm7
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3],xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm16 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4]
+; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm16, %ymm14
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm14, %zmm7
; AVX512DQ-BW-FCP-NEXT: movl $613566756, %ecx # imm = 0x24924924
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k3
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm14, %zmm6 {%k3}
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm14 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm16 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [8,11,10,9,8,11,10,9,8,11,10,9,12,13,14,13]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm16, %ymm17, %ymm16
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm17 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm14, %ymm17, %ymm16 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm11, %ymm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm11 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm9, %ymm11, %ymm10 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm9 = zmm10[0,1,2,3],zmm14[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm11 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
+; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
+; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm7, %zmm8 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[2],ymm6[2],ymm5[3],ymm6[3],ymm5[4],ymm6[4],ymm5[5],ymm6[5],ymm5[6],ymm6[6],ymm5[7],ymm6[7],ymm5[16],ymm6[16],ymm5[17],ymm6[17],ymm5[18],ymm6[18],ymm5[19],ymm6[19],ymm5[20],ymm6[20],ymm5[21],ymm6[21],ymm5[22],ymm6[22],ymm5[23],ymm6[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm2, %zmm3, %zmm2
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm3 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm5, %zmm3, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm10[8],xmm15[9],xmm10[9],xmm15[10],xmm10[10],xmm15[11],xmm10[11],xmm15[12],xmm10[12],xmm15[13],xmm10[13],xmm15[14],xmm10[14],xmm15[15],xmm10[15]
+; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} zmm1 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
; AVX512DQ-BW-FCP-NEXT: movl $1227133513, %ecx # imm = 0x49249249
-; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
-; AVX512DQ-BW-FCP-NEXT: vpermw %zmm10, %zmm11, %zmm9 {%k2}
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm10 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm11, %ymm12, %ymm11
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm12 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm10, %ymm12, %ymm11 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm0, %zmm10
-; AVX512DQ-BW-FCP-NEXT: vpbroadcastq {{.*#+}} ymm11 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm4, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm2
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm1, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb %ymm8, %ymm0, %ymm4
-; AVX512DQ-BW-FCP-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbw {{.*#+}} ymm1 = [10,13,12,11,10,13,12,11,10,13,12,11,14,13,14,15]
-; AVX512DQ-BW-FCP-NEXT: vpermw %ymm0, %ymm1, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512DQ-BW-FCP-NEXT: movl $-1840700270, %ecx # imm = 0x92492492
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm0, %zmm2 {%k1}
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 128(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, 64(%rax)
-; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
index dfa7f2dbdaee..c981d973fef3 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -177,6 +177,36 @@ define <16 x float> @shuffle_v16f32_02_03_16_17_06_07_20_21_10_11_24_25_14_15_28
ret <16 x float> %shuffle
}
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_01_09_02_10_03_11_04_12_05_13_06_14_07_15:
+; ALL: # %bb.0:
+; ALL-NEXT: vbroadcastss %xmm0, %ymm0
+; ALL-NEXT: vbroadcastss %xmm1, %ymm1
+; ALL-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %b0 = shufflevector <8 x float> %v0, <8 x float> poison, <8 x i32> zeroinitializer
+ %b1 = shufflevector <8 x float> %v1, <8 x float> poison, <8 x i32> zeroinitializer
+ %r = shufflevector <8 x float> %b0, <8 x float> %b1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <16 x float> %r
+}
+
+; PR86076
+define <16 x float> @shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08(float %a0, float %a1) {
+; ALL-LABEL: shuffle_f32_v16f32_00_08_00_08_00_08_00_08_00_08_00_08_00_08_00_08:
+; ALL: # %bb.0:
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
+; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
+; ALL-NEXT: retq
+ %v0 = insertelement <8 x float> poison, float %a0, i64 0
+ %v1 = insertelement <8 x float> poison, float %a1, i64 0
+ %sv = shufflevector <8 x float> %v0, <8 x float> %v1, <16 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
+ ret <16 x float> %sv
+}
+
define <16 x i32> @shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00:
; ALL: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 68f2ed436804..be249ddc0cca 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fadd_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fadd_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fadd_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fadd_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fadd_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fadd_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fadd_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fadd_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm4, %xmm1
+; SSE-NEXT: addps %xmm4, %xmm2
+; SSE-NEXT: addps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fadd_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fadd <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fadd <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fadd <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fadd <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index ac208da9ee11..9aa9d63e7fcb 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fmul_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fmul_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fmul_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fmul_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fmul_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: mulps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX512F-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fmul_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fmul_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fmul_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm4, %xmm1
+; SSE-NEXT: mulps %xmm4, %xmm2
+; SSE-NEXT: mulps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0,3.0E+0]
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vmulps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fmul_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fmul <4 x float> %x, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %y2 = fmul <4 x float> %y, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %z2 = fmul <4 x float> %z, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %w2 = fmul <4 x float> %w, <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index 90cf455ba61f..60e54ab71abc 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VL
define void @widen_fsub_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) {
; SSE-LABEL: widen_fsub_v2f32_v4f32:
@@ -364,3 +364,86 @@ define void @widen_fsub_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) {
store <2 x float> %vc14, ptr %c14, align 4
ret void
}
+
+define <8 x float> @widen_fsub_v4f32_v8f32_const(<4 x float> %x, <4 x float> %y) {
+; SSE-LABEL: widen_fsub_v4f32_v8f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm2, %xmm0
+; SSE-NEXT: subps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX1: # %bb.0:
+; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX512F-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: widen_fsub_v4f32_v8f32_const:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x float> %r
+}
+
+define <16 x float> @widen_fsub_v4f32_v16f32_const(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) {
+; SSE-LABEL: widen_fsub_v4f32_v16f32_const:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm4 = [2.0E+0,2.0E+0,2.0E+0,2.0E+0]
+; SSE-NEXT: subps %xmm4, %xmm0
+; SSE-NEXT: subps %xmm4, %xmm1
+; SSE-NEXT: subps %xmm4, %xmm2
+; SSE-NEXT: subps %xmm4, %xmm3
+; SSE-NEXT: retq
+;
+; AVX1OR2-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vbroadcastss {{.*#+}} ymm1 = [-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0,-2.0E+0]
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1OR2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: widen_fsub_v4f32_v16f32_const:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %x2 = fsub <4 x float> %x, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %y2 = fsub <4 x float> %y, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %z2 = fsub <4 x float> %z, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %w2 = fsub <4 x float> %w, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+ %r0 = shufflevector <4 x float> %x2, <4 x float> %y2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r1 = shufflevector <4 x float> %z2, <4 x float> %w2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %r = shufflevector <8 x float> %r0, <8 x float> %r1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x float> %r
+}
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
index cece656d0897..5ebd1a89ae92 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-folding-tieddef.mir
@@ -100,6 +100,7 @@ registers:
- { id: 38, class: gr8 }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
index f0af93804048..b0bff30c4c82 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/memory-operand-load-folding.mir
@@ -87,6 +87,7 @@ liveins:
- { reg: '$edi', virtual-reg: '%0' }
- { reg: '$xmm0', virtual-reg: '%1' }
frameInfo:
+ adjustsStack: true
hasCalls: true
body: |
bb.0.if.then:
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
index 51d3f7e1a6a4..d73d8a375906 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalesce-subreg.mir
@@ -97,6 +97,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
index bc1c7ebac6ce..6460263c6025 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-coalescing.mir
@@ -106,6 +106,7 @@ liveins:
- { reg: '$rsi', virtual-reg: '%5' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
index d59333e73fbc..68c9bf6c89dd 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced.mir
@@ -70,6 +70,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
index ab2647d3b45a..cf17af4ba430 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-on-stack-coalesced2.mir
@@ -71,6 +71,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
index 0fe80980e4e6..cb35bd892eea 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-regallocd-to-stack.mir
@@ -65,6 +65,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir b/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
index 2a031b295a1e..61dcec49b74c 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/phi-through-regalloc.mir
@@ -94,6 +94,7 @@ liveins:
- { reg: '$esi', virtual-reg: '%2' }
frameInfo:
maxAlignment: 1
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
index 47a7b460e43e..e80ed2e3e8eb 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/stack-coloring-dbg-phi.mir
@@ -106,6 +106,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%15' }
frameInfo:
maxAlignment: 8
+ adjustsStack: true
hasCalls: true
stack:
- { id: 0, size: 8, alignment: 8 }
diff --git a/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir b/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
index 3e806e43ca9e..6dbd2cd4faa7 100644
--- a/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
+++ b/llvm/test/DebugInfo/MIR/InstrRef/survives-livedebugvars.mir
@@ -113,6 +113,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%2' }
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir b/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
index 35ab906efc90..5df70096e930 100644
--- a/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
+++ b/llvm/test/DebugInfo/MIR/Mips/livedebugvars-stop-trimming-loc.mir
@@ -72,6 +72,8 @@
name: fn2
alignment: 4
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
registers:
- { id: 0, class: gpr32, preferred-register: '' }
- { id: 1, class: gpr32, preferred-register: '' }
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
index 3cb9da8fdfe3..4d48774a78dd 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
@@ -116,7 +116,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
index 35d12b52af89..e618f48f527b 100644
--- a/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
+++ b/llvm/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
@@ -114,7 +114,7 @@ frameInfo:
stackSize: 0
offsetAdjustment: 0
maxAlignment: 0
- adjustsStack: false
+ adjustsStack: true
hasCalls: true
stackProtector: ''
maxCallFrameSize: 4294967295
diff --git a/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir b/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
index 037306a2ca95..42ee73dde965 100644
--- a/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
+++ b/llvm/test/DebugInfo/MIR/X86/livedebugvars-crossbb-interval.mir
@@ -100,6 +100,7 @@ liveins:
- { reg: '$rdi', virtual-reg: '%2' }
- { reg: '$esi', virtual-reg: '%4' }
frameInfo:
+ adjustsStack: true
hasCalls: true
machineFunctionInfo: {}
body: |
diff --git a/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll b/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll
new file mode 100644
index 000000000000..e61b7256345c
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/dbg-value-funcarg-duplicates.ll
@@ -0,0 +1,65 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -start-after=codegenprepare -stop-before=finalize-isel -o - %s -experimental-debug-variable-locations=false | FileCheck %s
+
+; Input to this test was created by reducing a Swift file using bugpoint
+
+; CHECK-DAG: ![[LHS:.*]] = !DILocalVariable(name: "lhs"
+
+define hidden i64 @"_wideDivide42"(ptr %0, ptr %1, ptr %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8) local_unnamed_addr !dbg !16 {
+; CHECK-LABEL: name: _wideDivide42
+; CHECK-NOT: DBG_VALUE
+; CHECK: DBG_VALUE $rcx, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 0, 64)
+; CHECK-NEXT: DBG_VALUE $r8, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 64, 64)
+; CHECK-NEXT: DBG_VALUE $r9, $noreg, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 128, 64)
+; CHECK-NEXT: DBG_VALUE %fixed-stack.{{.+}}, ![[LHS]], !DIExpression(DW_OP_LLVM_fragment, 192, 64)
+; The duplicates should be removed:
+; CHECK-NOT: DBG_VALUE
+
+entry:
+ %9 = alloca i64, align 8
+ call void @llvm.dbg.value(metadata i64 %3, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %4, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %3, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %4, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 64, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %5, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 128, 64)), !dbg !67
+ call void @llvm.dbg.value(metadata i64 %6, metadata !24, metadata !DIExpression(DW_OP_LLVM_fragment, 192, 64)), !dbg !67
+ br i1 poison, label %11, label %10, !dbg !68
+
+10: ; preds = %entry
+ tail call void asm sideeffect "", "n"(i32 7) #7
+ unreachable
+
+11: ; preds = %entry
+ tail call void @abort()
+ unreachable
+}
+
+declare void @abort()
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+attributes #7 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!13}
+!llvm.linker.options = !{!14, !15}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_Swift, file: !1, producer: "Swift", isOptimized: true, runtimeVersion: 6, emissionKind: FullDebug)
+!1 = !DIFile(filename: "Int128.swift", directory: "")
+!13 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{!"-lswiftCore"}
+!15 = !{!"-lobjc"}
+!16 = distinct !DISubprogram(name: "_wideDivide42", scope: !0, file: !1, line: 222, type: !17, scopeLine: 222, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !23)
+!17 = !DISubroutineType(types: !18)
+!18 = !{!19, !20, !20, !20, !20, !20, !20}
+!19 = !DICompositeType(tag: DW_TAG_structure_type, name: "4 x UInt64", flags: DIFlagFwdDecl, runtimeLang: DW_LANG_Swift)
+!20 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "UInt64", scope: !1, file: !1, size: 64, elements: !22, runtimeLang: DW_LANG_Swift)
+!22 = !{}
+!23 = !{!24, !27}
+!24 = !DILocalVariable(name: "lhs", arg: 1, scope: !16, file: !1, line: 223, type: !25, flags: DIFlagArtificial)
+!25 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !26)
+!26 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "2 x 2 x UInt64", file: !1, size: 256, elements: !22, runtimeLang: DW_LANG_Swift)
+!27 = !DILocalVariable(name: "rhs", arg: 2, scope: !16, file: !1, line: 223, type: !28, flags: DIFlagArtificial)
+!28 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !29)
+!29 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "2 x UInt64", file: !1, size: 128, elements: !22, runtimeLang: DW_LANG_Swift)
+!67 = !DILocation(line: 0, scope: !16)
+!68 = !DILocation(line: 225, column: 9, scope: !16)
diff --git a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
index c5b6d7381ace..3beaf8996e4f 100644
--- a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
+++ b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
@@ -99,6 +99,8 @@
---
name: f1
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: x.addr, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
@@ -127,6 +129,8 @@ body: |
---
name: f2
tracksRegLiveness: true
+frameInfo:
+ adjustsStack: true
stack:
- { id: 0, name: x.addr, type: default, offset: 0, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s
new file mode 100644
index 000000000000..f8e7ba96f006
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_section_start_and_stop_symbols.s
@@ -0,0 +1,43 @@
+# RUN: llvm-mc -triple=aarch64-unknown-linux-gnu -position-independent \
+# RUN: -filetype=obj -o %t.o %s
+# RUN: llvm-jitlink -noexec -check %s %t.o
+
+ .text
+ .file "elf_section_start_stop.c"
+ .globl main
+ .p2align 2
+ .type main,@function
+main:
+ adrp x8, z
+ adrp x9, y
+ ldr w8, [x8, :lo12:z]
+ ldr w9, [x9, :lo12:y]
+ sub w0, w8, w9
+ ret
+.Lfunc_end0:
+ .size main, .Lfunc_end0-main
+
+ .type x,@object
+ .section custom_section,"aw",@progbits
+ .globl x
+ .p2align 2
+x:
+ .word 42
+ .size x, 4
+
+# jitlink-check: *{8}z = (*{8}y) + 4
+
+ .type y,@object
+ .data
+ .globl y
+ .p2align 3, 0x0
+y:
+ .xword __start_custom_section
+ .size y, 8
+
+ .type z,@object
+ .globl z
+ .p2align 3, 0x0
+z:
+ .xword __stop_custom_section
+ .size z, 8
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s
new file mode 100644
index 000000000000..8862dd8a28fa
--- /dev/null
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_section_start_and_stop_symbols.s
@@ -0,0 +1,30 @@
+# RUN: llvm-mc -triple=arm64-apple-darwin24 -filetype=obj -o %t.o %s
+# RUN: llvm-jitlink -noexec -check %s %t.o
+
+# jitlink-check: *{8}_z = (*{8}_y) + 4
+
+ .section __TEXT,__text,regular,pure_instructions
+ .globl _main
+ .p2align 2
+_main:
+ mov w0, #0
+ ret
+
+ .section __DATA,__custom_section
+ .globl _x
+ .p2align 2, 0x0
+_x:
+ .long 42
+
+ .section __DATA,__data
+ .globl _y
+ .p2align 3, 0x0
+_y:
+ .quad section$start$__DATA$__custom_section
+
+ .globl _z
+ .p2align 3, 0x0
+_z:
+ .quad section$end$__DATA$__custom_section
+
+.subsections_via_symbols
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll b/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll
new file mode 100644
index 000000000000..f9040afd1c01
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/globals-access.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --global-value-regex "x" --version 4
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64-linux-gnu -hwasan-globals=0 | FileCheck %s --check-prefixes=NOGLOB
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64-linux-gnu -hwasan-globals=1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+@x = dso_local global i32 0, align 4
+
+;.
+; NOGLOB: @x = dso_local global i32 0, align 4
+;.
+; CHECK: @x = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @x.hwasan to i64), i64 5260204364768739328) to ptr)
+;.
+define dso_local noundef i32 @_Z3tmpv() sanitize_hwaddress {
+; NOGLOB-LABEL: define dso_local noundef i32 @_Z3tmpv(
+; NOGLOB-SAME: ) #[[ATTR0:[0-9]+]] {
+; NOGLOB-NEXT: entry:
+; NOGLOB-NEXT: [[TMP0:%.*]] = load i32, ptr @x, align 4
+; NOGLOB-NEXT: ret i32 [[TMP0]]
+;
+; CHECK-LABEL: define dso_local noundef i32 @_Z3tmpv(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__hwasan_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[TMP12]], 4294967295
+; CHECK-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP1]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to ptr
+; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 ptrtoint (ptr @x to i64), 56
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i8
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 ptrtoint (ptr @x to i64), 72057594037927935
+; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP4]], [[TMP8]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF2:![0-9]+]]
+; CHECK: 10:
+; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[TMP2]], ptr @x, i32 2)
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @x, align 4
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %0 = load i32, ptr @x, align 4
+ ret i32 %0
+}
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
index 4bb846bff274..62fd7a167156 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
@@ -48,13 +48,12 @@ define dso_local noundef i1 @_Z6targetv() sanitize_hwaddress {
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP26]], i8 [[TMP22]], i64 256, i1 false)
; CHECK-NEXT: [[CALL:%.*]] = call i32 @setjmp(ptr noundef @jbuf)
; CHECK-NEXT: switch i32 [[CALL]], label [[WHILE_BODY:%.*]] [
-; CHECK-NEXT: i32 1, label [[RETURN:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB1:%.*]]
+; CHECK-NEXT: i32 1, label [[RETURN:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB1:%.*]]
; CHECK-NEXT: ]
; CHECK: sw.bb1:
; CHECK-NEXT: br label [[RETURN]]
; CHECK: while.body:
-; CHECK-NEXT: call void @llvm.hwasan.check.memaccess(ptr [[TMP16]], ptr @stackbuf, i32 19)
; CHECK-NEXT: store ptr [[BUF_HWASAN]], ptr @stackbuf, align 8
; CHECK-NEXT: call void @may_jump()
; CHECK-NEXT: br label [[RETURN]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 96ac4b6088c3..9133b329deb2 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -758,7 +758,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -808,7 +808,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -851,7 +851,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -901,7 +901,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -936,7 +936,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -986,7 +986,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1021,7 +1021,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1071,7 +1071,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1106,7 +1106,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1156,7 +1156,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1191,7 +1191,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1241,7 +1241,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1276,7 +1276,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1326,7 +1326,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1361,7 +1361,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1411,7 +1411,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1446,7 +1446,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1496,7 +1496,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1531,7 +1531,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1581,7 +1581,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1616,7 +1616,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1666,7 +1666,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1701,7 +1701,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1751,7 +1751,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index 1535fccfc211..e0b5907719af 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -39,7 +39,7 @@ define i64 @foo(i64 %guard, ...) #1 {
; Only 56 bytes of the register save area is copied, because of
; "use-soft-float".
-; CHECK: call void @llvm.va_start(ptr %vl)
+; CHECK: call void @llvm.va_start.p0(ptr %vl)
; CHECK: [[VlAddr:%.*]] = ptrtoint ptr %vl to i64
; CHECK: [[RegSaveAreaAddrAddr:%.*]] = add i64 [[VlAddr]], 24
; CHECK: [[RegSaveAreaAddr:%.*]] = inttoptr i64 [[RegSaveAreaAddrAddr]] to ptr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
index aff4d2c55ad6..205101564dfe 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -560,7 +560,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -580,7 +580,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -623,7 +623,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -643,7 +643,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -678,7 +678,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -698,7 +698,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -733,7 +733,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -753,7 +753,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -788,7 +788,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -808,7 +808,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -843,7 +843,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -863,7 +863,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -898,7 +898,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -918,7 +918,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -953,7 +953,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -973,7 +973,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1008,7 +1008,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1028,7 +1028,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1063,7 +1063,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1083,7 +1083,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1118,7 +1118,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1138,7 +1138,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
@@ -1173,7 +1173,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -1193,7 +1193,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index 21f3311a57ef..f07f3ad06e60 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -542,7 +542,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr, !dbg [[DBG11]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT: call void @llvm.va_start(ptr [[VA]]), !dbg [[DBG11]]
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG11]]
; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16, !dbg [[DBG11]]
; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG11]]
diff --git a/llvm/test/MC/AArch64/coff-relocations.s b/llvm/test/MC/AArch64/coff-relocations.s
index fb67a21992c6..2370fd9fb436 100644
--- a/llvm/test/MC/AArch64/coff-relocations.s
+++ b/llvm/test/MC/AArch64/coff-relocations.s
@@ -1,7 +1,11 @@
// RUN: llvm-mc -triple aarch64-windows -filetype obj -o %t.obj %s
-// RUN: llvm-readobj -r %t.obj | FileCheck %s
+// RUN: llvm-mc -triple arm64ec-windows -filetype obj -o %t-ec.obj %s
+// RUN: llvm-readobj -r %t.obj | FileCheck %s --check-prefixes=CHECK,CHECK-ARM64
+// RUN: llvm-readobj -r %t-ec.obj | FileCheck %s --check-prefixes=CHECK,CHECK-ARM64EC
// RUN: llvm-objdump --no-print-imm-hex -d %t.obj | FileCheck %s --check-prefix=DISASM
+// RUN: llvm-objdump --no-print-imm-hex -d %t-ec.obj | FileCheck %s --check-prefix=DISASM
// RUN: llvm-objdump -s %t.obj | FileCheck %s --check-prefix=DATA
+// RUN: llvm-objdump -s %t-ec.obj | FileCheck %s --check-prefix=DATA
// IMAGE_REL_ARM64_ADDR32
.Linfo_foo:
@@ -71,8 +75,10 @@ tbz x0, #0, target
// IMAGE_REL_ARM64_REL32 because IMAGE_REL_ARM64_REL64 does not exist.
.xword .Linfo_foo - .Ltable
-// CHECK: Format: COFF-ARM64
-// CHECK: Arch: aarch64
+// CHECK-ARM64: Format: COFF-ARM64
+// CHECK-ARM64EC: Format: COFF-ARM64EC
+// CHECK-ARM64: Arch: aarch64
+// CHECK-ARM64EC: Arch: aarch64
// CHECK: AddressSize: 64bit
// CHECK: Relocations [
// CHECK: Section (1) .text {
diff --git a/llvm/test/MC/AArch64/constant-pool-sizes.s b/llvm/test/MC/AArch64/constant-pool-sizes.s
new file mode 100644
index 000000000000..279402af025f
--- /dev/null
+++ b/llvm/test/MC/AArch64/constant-pool-sizes.s
@@ -0,0 +1,25 @@
+// RUN: llvm-mc -triple aarch64-none-linux-gnu %s | FileCheck %s
+
+ ldr w0, =symbol
+ ldr x1, =symbol
+
+ ldr w2, =1234567890
+ ldr x3, =1234567890
+
+// CHECK: ldr w0, .Ltmp0
+// CHECK: ldr x1, .Ltmp1
+// CHECK: ldr w2, .Ltmp2
+// CHECK: ldr x3, .Ltmp3
+
+// CHECK: .p2align 2, 0x0
+// CHECK-NEXT:.Ltmp0:
+// CHECK-NEXT: .word symbol
+// CHECK: .p2align 3, 0x0
+// CHECK-NEXT:.Ltmp1:
+// CHECK-NEXT: .xword symbol
+// CHECK: .p2align 2, 0x0
+// CHECK-NEXT:.Ltmp2:
+// CHECK-NEXT: .word 1234567890
+// CHECK: .p2align 3, 0x0
+// CHECK-NEXT:.Ltmp3:
+// CHECK-NEXT: .xword 1234567890
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s b/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s
deleted file mode 100644
index fdfbf65c0e3c..000000000000
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vinterp.s
+++ /dev/null
@@ -1,278 +0,0 @@
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -show-encoding %s | FileCheck -check-prefix=GCN %s
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s | FileCheck -check-prefix=GCN %s
-
-v_interp_p10_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v1, v10, v20, v30
-// GCN: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04]
-
-v_interp_p10_f32 v2, v11, v21, v31
-// GCN: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04]
-
-v_interp_p10_f32 v3, v12, v22, v32
-// GCN: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7
-// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v1, v10, v20, v30
-// GCN: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04]
-
-v_interp_p2_f32 v2, v11, v21, v31
-// GCN: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04]
-
-v_interp_p2_f32 v3, v12, v22, v32
-// GCN: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7
-// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24]
-
-v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0 ; encoding: [0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0 ; encoding: [0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0 ; encoding: [0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0 ; encoding: [0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1]
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0 ; encoding: [0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04]
-
-v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5
-// GCN: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5 ; encoding: [0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4]
diff --git a/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s b/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s
new file mode 100644
index 000000000000..4623500987be
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-amdgpu-exprs.s
@@ -0,0 +1,27 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// OBJDUMP: 0000 00000000 0f000000 00000000 00000000
+
+.text
+
+.p2align 8
+.type caller,@function
+caller:
+ s_endpgm
+
+.rodata
+
+.p2align 6
+.amdhsa_kernel caller
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_private_segment_fixed_size max(7, callee1.private_seg_size, callee2.private_seg_size)
+.end_amdhsa_kernel
+
+.set callee1.private_seg_size, 4
+.set callee2.private_seg_size, 15
+
+// ASM: .amdhsa_private_segment_fixed_size max(7, callee1.private_seg_size, callee2.private_seg_size)
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s b/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s
new file mode 100644
index 000000000000..fab3e893352b
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-expr-failure.s
@@ -0,0 +1,281 @@
+// RUN: not llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a %s 2>&1 | FileCheck --check-prefix=ASM %s
+
+// Some expression currently require (immediately) solvable expressions, i.e.,
+// they don't depend on yet-unknown symbolic values.
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type user_sgpr_count,@function
+user_sgpr_count:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_count
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_count defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_count
+
+.p2align 8
+.type user_sgpr_private_segment_buffer,@function
+user_sgpr_private_segment_buffer:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_private_segment_buffer
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_private_segment_buffer defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer
+
+.p2align 8
+.type user_sgpr_kernarg_preload_length,@function
+user_sgpr_kernarg_preload_length:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_kernarg_preload_length
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_preload_length defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length defined_boolean
+
+.p2align 8
+.type user_sgpr_kernarg_preload_offset,@function
+user_sgpr_kernarg_preload_offset:
+ s_endpgm
+
+.amdhsa_kernel user_sgpr_kernarg_preload_offset
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_preload_offset defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset defined_boolean
+
+.p2align 8
+.type user_sgpr_dispatch_ptr,@function
+user_sgpr_dispatch_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_dispatch_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_dispatch_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr
+
+.p2align 8
+.type user_sgpr_queue_ptr,@function
+user_sgpr_queue_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_queue_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_queue_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr
+
+.p2align 8
+.type user_sgpr_kernarg_segment_ptr,@function
+user_sgpr_kernarg_segment_ptr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_kernarg_segment_ptr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_kernarg_segment_ptr defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr
+
+.p2align 8
+.type user_sgpr_dispatch_id,@function
+user_sgpr_dispatch_id:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_dispatch_id
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_dispatch_id defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id
+
+.p2align 8
+.type user_sgpr_flat_scratch_init,@function
+user_sgpr_flat_scratch_init:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_flat_scratch_init
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_flat_scratch_init defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init
+
+.p2align 8
+.type user_sgpr_private_segment_size,@function
+user_sgpr_private_segment_size:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel user_sgpr_private_segment_size
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_user_sgpr_private_segment_size defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size
+
+.p2align 8
+.type wavefront_size32,@function
+wavefront_size32:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel wavefront_size32
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_wavefront_size32 defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_wavefront_size32
+
+.p2align 8
+.type next_free_vgpr,@function
+next_free_vgpr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel next_free_vgpr
+ .amdhsa_next_free_vgpr defined_boolean
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_next_free_vgpr
+
+.p2align 8
+.type next_free_sgpr,@function
+next_free_sgpr:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel next_free_sgpr
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr defined_boolean
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_next_free_sgpr
+
+.p2align 8
+.type accum_offset,@function
+accum_offset:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel accum_offset
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_accum_offset
+
+.p2align 8
+.type reserve_vcc,@function
+reserve_vcc:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel reserve_vcc
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_reserve_vcc defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_reserve_vcc
+
+.p2align 8
+.type reserve_flat_scratch,@function
+reserve_flat_scratch:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel reserve_flat_scratch
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_reserve_flat_scratch defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_reserve_flat_scratch
+
+.p2align 8
+.type shared_vgpr_count,@function
+shared_vgpr_count:
+ s_endpgm
+
+.p2align 6
+.amdhsa_kernel shared_vgpr_count
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+ .amdhsa_shared_vgpr_count defined_boolean
+.end_amdhsa_kernel
+
+// ASM: error: directive should have resolvable expression
+// ASM-NEXT: .amdhsa_shared_vgpr_count
+
+.set defined_boolean, 1
+
+// ASM: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s
new file mode 100644
index 000000000000..95af59c413ae
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx10.s
@@ -0,0 +1,190 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1010 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0afe4 801f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0afe4 801f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s
new file mode 100644
index 000000000000..e1107fb69ba4
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx11.s
@@ -0,0 +1,186 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1100 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0afe4 811f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0afe4 811f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_enable_private_segment (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_enable_private_segment 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_shared_vgpr_count 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s
new file mode 100644
index 000000000000..449616d35186
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx12.s
@@ -0,0 +1,184 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx1200 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f02fe4 811f007f 000c0000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f02fe4 811f007f 000c0000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_round_robin_scheduling defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_workgroup_processor_mode defined_boolean
+ .amdhsa_memory_ordered defined_boolean
+ .amdhsa_forward_progress defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_round_robin_scheduling defined_boolean
+ .amdhsa_enable_private_segment defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_wavefront_size32 (((((0&(~1024))|(1<<10))&(~2048))|(defined_boolean<<11))&1024)>>10
+// ASM-NEXT: .amdhsa_enable_private_segment (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_workgroup_processor_mode (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&536870912)>>29
+// ASM-NEXT: .amdhsa_memory_ordered (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&1073741824)>>30
+// ASM-NEXT: .amdhsa_forward_progress (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&2147483648)>>31
+// ASM-NEXT: .amdhsa_round_robin_scheduling (((((((((((((((((((((((((((((0&(~786432))|(3<<18))&(~536870912))|(1<<29))&(~1073741824))|(1<<30))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~67108864))|(defined_boolean<<26))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~2147483648))|(defined_boolean<<31))&(~2097152))|(defined_boolean<<21))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_wavefront_size32 1
+// ASM-NEXT: .amdhsa_enable_private_segment 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_workgroup_processor_mode 1
+// ASM-NEXT: .amdhsa_memory_ordered 1
+// ASM-NEXT: .amdhsa_forward_progress 1
+// ASM-NEXT: .amdhsa_round_robin_scheduling 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s
new file mode 100644
index 000000000000..c7e05441b45f
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx7.s
@@ -0,0 +1,168 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx700 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0af00 801f007f 00080000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0af00 801f007f 00080000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((0&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((0&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((0&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((0&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((0&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((0&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((0&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s
new file mode 100644
index 000000000000..49a5015987a6
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx8.s
@@ -0,0 +1,171 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx801 < %s | FileCheck --check-prefix=ASM %s
+
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx801 -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 2b000000 2c000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0030 00f0af00 801f007f 00080000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 2a000000 2b000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0070 00f0af00 801f007f 00080000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_group_segment_fixed_size defined_value+2
+ .amdhsa_private_segment_fixed_size defined_value+3
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+.set defined_value, 41
+.set defined_2_bits, 3
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_group_segment_fixed_size defined_value+1
+ .amdhsa_private_segment_fixed_size defined_value+2
+ .amdhsa_system_vgpr_workitem_id defined_2_bits
+ .amdhsa_float_round_mode_32 defined_2_bits
+ .amdhsa_float_round_mode_16_64 defined_2_bits
+ .amdhsa_float_denorm_mode_32 defined_2_bits
+ .amdhsa_float_denorm_mode_16_64 defined_2_bits
+ .amdhsa_system_sgpr_workgroup_id_x defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_y defined_boolean
+ .amdhsa_system_sgpr_workgroup_id_z defined_boolean
+ .amdhsa_system_sgpr_workgroup_info defined_boolean
+ .amdhsa_exception_fp_ieee_invalid_op defined_boolean
+ .amdhsa_exception_fp_denorm_src defined_boolean
+ .amdhsa_exception_fp_ieee_div_zero defined_boolean
+ .amdhsa_exception_fp_ieee_overflow defined_boolean
+ .amdhsa_exception_fp_ieee_underflow defined_boolean
+ .amdhsa_exception_fp_ieee_inexact defined_boolean
+ .amdhsa_exception_int_div_zero defined_boolean
+ .amdhsa_uses_dynamic_stack defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size defined_value+2
+// ASM-NEXT: .amdhsa_private_segment_fixed_size defined_value+3
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer (((0&(~2048))|(defined_boolean<<11))&1)>>0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr (((0&(~2048))|(defined_boolean<<11))&2)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr (((0&(~2048))|(defined_boolean<<11))&4)>>2
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr (((0&(~2048))|(defined_boolean<<11))&8)>>3
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id (((0&(~2048))|(defined_boolean<<11))&16)>>4
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init (((0&(~2048))|(defined_boolean<<11))&32)>>5
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size (((0&(~2048))|(defined_boolean<<11))&64)>>6
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~12288))|(defined_2_bits<<12))&(~49152))|(defined_2_bits<<14))&(~196608))|(defined_2_bits<<16))&(~786432))|(defined_2_bits<<18))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((((((((((((((((((((((((0&(~128))|(1<<7))&(~6144))|(defined_2_bits<<11))&(~128))|(defined_boolean<<7))&(~256))|(defined_boolean<<8))&(~512))|(defined_boolean<<9))&(~1024))|(defined_boolean<<10))&(~16777216))|(defined_boolean<<24))&(~33554432))|(defined_boolean<<25))&(~67108864))|(defined_boolean<<26))&(~134217728))|(defined_boolean<<27))&(~268435456))|(defined_boolean<<28))&(~536870912))|(defined_boolean<<29))&(~1073741824))|(defined_boolean<<30))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_value, 41
+// ASM-NEXT: .no_dead_strip defined_value
+// ASM-NEXT: .set defined_2_bits, 3
+// ASM-NEXT: .no_dead_strip defined_2_bits
+// ASM-NEXT: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 42
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 43
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 1
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 3
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 3
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 3
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 1
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 1
+// ASM-NEXT: .amdhsa_exception_int_div_zero 1
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s
new file mode 100644
index 000000000000..b7f89239160f
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-sym-exprs-gfx90a.s
@@ -0,0 +1,148 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// When going from asm -> asm, the expressions should remain the same (i.e., symbolic).
+// When going from asm -> obj, the expressions should get resolved (through fixups),
+
+// OBJDUMP: Contents of section .rodata
+// expr_defined_later
+// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0030 0000ac04 81000000 00000000 00000000
+// expr_defined
+// OBJDUMP-NEXT: 0040 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0050 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0060 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0070 0000ac04 81000000 00000000 00000000
+
+.text
+// ASM: .text
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type expr_defined_later,@function
+expr_defined_later:
+ s_endpgm
+
+.p2align 8
+.type expr_defined,@function
+expr_defined:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel expr_defined_later
+ .amdhsa_system_sgpr_private_segment_wavefront_offset defined_boolean
+ .amdhsa_dx10_clamp defined_boolean
+ .amdhsa_ieee_mode defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_tg_split defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+.set defined_boolean, 1
+
+.p2align 6
+.amdhsa_kernel expr_defined
+ .amdhsa_system_sgpr_private_segment_wavefront_offset defined_boolean
+ .amdhsa_dx10_clamp defined_boolean
+ .amdhsa_ieee_mode defined_boolean
+ .amdhsa_fp16_overflow defined_boolean
+ .amdhsa_tg_split defined_boolean
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel expr_defined_later
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&62)>>1
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1)>>0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&128)>>7
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&256)>>8
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&512)>>9
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1024)>>10
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&6144)>>11
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset (((((((0&(~65536))|(defined_boolean<<16))&(~63))|(0<<0))&63)>>0)+1)*4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&12288)>>12
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&49152)>>14
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&196608)>>16
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&786432)>>18
+// ASM-NEXT: .amdhsa_dx10_clamp (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&2097152)>>21
+// ASM-NEXT: .amdhsa_ieee_mode (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&8388608)>>23
+// ASM-NEXT: .amdhsa_fp16_overflow (((((((((((((((((0&(~786432))|(3<<18))&(~2097152))|(1<<21))&(~8388608))|(1<<23))&(~2097152))|(defined_boolean<<21))&(~8388608))|(defined_boolean<<23))&(~67108864))|(defined_boolean<<26))&(~63))|(0<<0))&(~960))|(0<<6))&67108864)>>26
+// ASM-NEXT: .amdhsa_tg_split (((((0&(~65536))|(defined_boolean<<16))&(~63))|(0<<0))&65536)>>16
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&16777216)>>24
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&33554432)>>25
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&67108864)>>26
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&134217728)>>27
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&268435456)>>28
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&536870912)>>29
+// ASM-NEXT: .amdhsa_exception_int_div_zero (((((((0&(~128))|(1<<7))&(~1))|(defined_boolean<<0))&(~62))|(0<<1))&1073741824)>>30
+// ASM-NEXT: .end_amdhsa_kernel
+
+// ASM: .set defined_boolean, 1
+// ASM-NEXT: .no_dead_strip defined_boolean
+
+// ASM: .amdhsa_kernel expr_defined
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 0
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 0
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset 4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 0
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 1
+// ASM-NEXT: .amdhsa_tg_split 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 0
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 0
+// ASM-NEXT: .amdhsa_exception_int_div_zero 0
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/hsa-tg-split.s b/llvm/test/MC/AMDGPU/hsa-tg-split.s
new file mode 100644
index 000000000000..5a4d3e2c279c
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/hsa-tg-split.s
@@ -0,0 +1,74 @@
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+xnack,+tgsplit < %s | FileCheck --check-prefix=ASM %s
+// RUN: llvm-mc -triple amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+xnack,+tgsplit -filetype=obj < %s > %t
+// RUN: llvm-objdump -s -j .rodata %t | FileCheck --check-prefix=OBJDUMP %s
+
+// OBJDUMP: Contents of section .rodata
+// OBJDUMP-NEXT: 0000 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0010 00000000 00000000 00000000 00000000
+// OBJDUMP-NEXT: 0020 00000000 00000000 00000000 00000100
+// OBJDUMP-NEXT: 0030 0000ac00 80000000 00000000 00000000
+
+.text
+// ASM: .text
+
+.amdgcn_target "amdgcn-amd-amdhsa--gfx90a:xnack+"
+// ASM: .amdgcn_target "amdgcn-amd-amdhsa--gfx90a:xnack+"
+
+.amdhsa_code_object_version 4
+// ASM: .amdhsa_code_object_version 4
+
+.p2align 8
+.type minimal,@function
+minimal:
+ s_endpgm
+
+.rodata
+// ASM: .rodata
+
+.p2align 6
+.amdhsa_kernel minimal
+ .amdhsa_next_free_vgpr 0
+ .amdhsa_next_free_sgpr 0
+ .amdhsa_accum_offset 4
+.end_amdhsa_kernel
+
+// ASM: .amdhsa_kernel minimal
+// ASM-NEXT: .amdhsa_group_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_private_segment_fixed_size 0
+// ASM-NEXT: .amdhsa_kernarg_size 0
+// ASM-NEXT: .amdhsa_user_sgpr_count 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_buffer 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_queue_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_segment_ptr 0
+// ASM-NEXT: .amdhsa_user_sgpr_dispatch_id 0
+// ASM-NEXT: .amdhsa_user_sgpr_flat_scratch_init 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_length 0
+// ASM-NEXT: .amdhsa_user_sgpr_kernarg_preload_offset 0
+// ASM-NEXT: .amdhsa_user_sgpr_private_segment_size 0
+// ASM-NEXT: .amdhsa_system_sgpr_private_segment_wavefront_offset 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_x 1
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_y 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_id_z 0
+// ASM-NEXT: .amdhsa_system_sgpr_workgroup_info 0
+// ASM-NEXT: .amdhsa_system_vgpr_workitem_id 0
+// ASM-NEXT: .amdhsa_next_free_vgpr 0
+// ASM-NEXT: .amdhsa_next_free_sgpr 0
+// ASM-NEXT: .amdhsa_accum_offset 4
+// ASM-NEXT: .amdhsa_reserve_xnack_mask 1
+// ASM-NEXT: .amdhsa_float_round_mode_32 0
+// ASM-NEXT: .amdhsa_float_round_mode_16_64 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_32 0
+// ASM-NEXT: .amdhsa_float_denorm_mode_16_64 3
+// ASM-NEXT: .amdhsa_dx10_clamp 1
+// ASM-NEXT: .amdhsa_ieee_mode 1
+// ASM-NEXT: .amdhsa_fp16_overflow 0
+// ASM-NEXT: .amdhsa_tg_split 1
+// ASM-NEXT: .amdhsa_exception_fp_ieee_invalid_op 0
+// ASM-NEXT: .amdhsa_exception_fp_denorm_src 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_div_zero 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_overflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_underflow 0
+// ASM-NEXT: .amdhsa_exception_fp_ieee_inexact 0
+// ASM-NEXT: .amdhsa_exception_int_div_zero 0
+// ASM-NEXT: .end_amdhsa_kernel
diff --git a/llvm/test/MC/AMDGPU/vinterp-fake16.s b/llvm/test/MC/AMDGPU/vinterp-fake16.s
new file mode 100644
index 000000000000..33dacdd92c31
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/vinterp-fake16.s
@@ -0,0 +1,182 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -show-encoding %s | FileCheck -check-prefix=GCN %s
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -show-encoding %s | FileCheck -check-prefix=GCN %s
+
+v_interp_p10_f32 v0, v1, v2, v3
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v1, v10, v20, v30
+// GCN: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04]
+
+v_interp_p10_f32 v2, v11, v21, v31
+// GCN: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04]
+
+v_interp_p10_f32 v3, v12, v22, v32
+// GCN: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 clamp
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, -v1, v2, v3
+// GCN: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_f32 v0, v1, -v2, v3
+// GCN: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_f32 v0, v1, v2, -v3
+// GCN: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7
+// GCN: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v1, v10, v20, v30
+// GCN: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0 ; encoding: [0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04]
+
+v_interp_p2_f32 v2, v11, v21, v31
+// GCN: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0 ; encoding: [0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04]
+
+v_interp_p2_f32 v3, v12, v22, v32
+// GCN: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0 ; encoding: [0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 clamp
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, -v1, v2, v3
+// GCN: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_f32 v0, v1, -v2, v3
+// GCN: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_f32 v0, v1, v2, -v3
+// GCN: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7
+// GCN: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7 ; encoding: [0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24]
+
+v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0 ; encoding: [0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1 ; encoding: [0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7 ; encoding: [0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04]
+
+v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
+// GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04]
diff --git a/llvm/test/MC/ARM/basic-arm-instructions.s b/llvm/test/MC/ARM/basic-arm-instructions.s
index 84a7cf52fa30..9f3a5cd4afa7 100644
--- a/llvm/test/MC/ARM/basic-arm-instructions.s
+++ b/llvm/test/MC/ARM/basic-arm-instructions.s
@@ -1202,6 +1202,10 @@ Lforward:
@ CHECK: ldrex r1, [r7] @ encoding: [0x9f,0x1f,0x97,0xe1]
@ CHECK: ldrexd r6, r7, [r8] @ encoding: [0x9f,0x6f,0xb8,0xe1]
+@ GNU alias
+ ldrexd r6, [r8]
+@ CHECK: ldrexd r6, r7, [r8] @ encoding: [0x9f,0x6f,0xb8,0xe1]
+
@------------------------------------------------------------------------------
@ LDRHT
@------------------------------------------------------------------------------
@@ -2904,6 +2908,10 @@ Lforward:
@ CHECK: strex r2, r1, [r7] @ encoding: [0x91,0x2f,0x87,0xe1]
@ CHECK: strexd r6, r2, r3, [r8] @ encoding: [0x92,0x6f,0xa8,0xe1]
+@ GNU alias
+ strexd r6, r2, [r8]
+@ CHECK: strexd r6, r2, r3, [r8] @ encoding: [0x92,0x6f,0xa8,0xe1]
+
@------------------------------------------------------------------------------
@ STR
@------------------------------------------------------------------------------
diff --git a/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s b/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
index be8d3c324e68..b802bfdb28f7 100644
--- a/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
+++ b/llvm/test/MC/ARM/load-store-acquire-release-v8-thumb.s
@@ -14,6 +14,10 @@
@ CHECK-V7: error: instruction requires: acquire/release
@ CHECK-V7: error: instruction requires: acquire/release
+@ GNU alias
+ ldaexd r6, [r8]
+@ CHECK: ldaexd r6, r7, [r8] @ encoding: [0xd8,0xe8,0xff,0x67]
+
stlexb r1, r3, [r4]
stlexh r4, r2, [r5]
stlex r2, r1, [r7]
@@ -27,6 +31,10 @@
@ CHECK-V7: error: instruction requires: acquire/release
@ CHECK-V7: error: instruction requires: acquire/release
+@ GNU alias
+ stlexd r6, r2, [r8]
+@ CHECK: stlexd r6, r2, r3, [r8] @ encoding: [0xc8,0xe8,0xf6,0x23]
+
lda r5, [r6]
ldab r5, [r6]
ldah r12, [r9]
diff --git a/llvm/test/MC/ARM/load-store-acquire-release-v8.s b/llvm/test/MC/ARM/load-store-acquire-release-v8.s
index 273519e050b1..edfe14c93e4e 100644
--- a/llvm/test/MC/ARM/load-store-acquire-release-v8.s
+++ b/llvm/test/MC/ARM/load-store-acquire-release-v8.s
@@ -1,4 +1,5 @@
-@ RUN: llvm-mc -triple=armv8 -show-encoding < %s | FileCheck %s
+@ RUN: not llvm-mc -triple=armv8 -show-encoding < %s 2> %t | FileCheck %s
+@ RUN: FileCheck %s < %t --check-prefix=CHECK-ERROR
@ RUN: not llvm-mc -triple=armv7 -show-encoding < %s 2>&1 | FileCheck %s --check-prefix=CHECK-V7
ldaexb r3, [r4]
ldaexh r2, [r5]
@@ -14,6 +15,13 @@
@ CHECK-V7: instruction requires: acquire/release
@ CHECK-V7: instruction requires: acquire/release
+ ldaexd r2, r4, [r8]
+@ CHECK-ERROR: error: destination operands must be sequential
+
+@ GNU alias
+ ldaexd r6, [r8]
+@ CHECK: ldaexd r6, r7, [r8] @ encoding: [0x9f,0x6e,0xb8,0xe1]
+
stlexb r1, r3, [r4]
stlexh r4, r2, [r5]
stlex r2, r1, [r7]
@@ -27,6 +35,13 @@
@ CHECK-V7: instruction requires: acquire/release
@ CHECK-V7: instruction requires: acquire/release
+ stlexd r6, r2, r4, [r8]
+@ CHECK-ERROR: error: source operands must be sequential
+
+@ GNU alias
+ stlexd r6, r2, [r8]
+@ CHECK: stlexd r6, r2, r3, [r8] @ encoding: [0x92,0x6e,0xa8,0xe1]
+
lda r5, [r6]
ldab r5, [r6]
ldah r12, [r9]
diff --git a/llvm/test/MC/COFF/dwarf5lineinfo.s b/llvm/test/MC/COFF/dwarf5lineinfo.s
new file mode 100644
index 000000000000..f0789feb2085
--- /dev/null
+++ b/llvm/test/MC/COFF/dwarf5lineinfo.s
@@ -0,0 +1,13 @@
+// RUN: llvm-mc -filetype=obj -triple x86_64-pc-windows-gnu %s -o - | llvm-readobj -r - | FileCheck %s
+
+// CHECK: Relocations [
+// CHECK: Section (4) .debug_line {
+// CHECK: 0x22 IMAGE_REL_AMD64_SECREL .debug_line_str (8)
+// CHECK: 0x2C IMAGE_REL_AMD64_SECREL .debug_line_str (8)
+// CHECK: 0x36 IMAGE_REL_AMD64_ADDR64 .text (0)
+// CHECK: }
+
+main:
+ .file 0 "/" "test.c"
+ .loc 0 1 0
+ retq
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt
deleted file mode 100644
index b22fd5e289fa..000000000000
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vinterp.txt
+++ /dev/null
@@ -1,251 +0,0 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble %s | FileCheck -strict-whitespace -check-prefix=GFX11 %s
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# Check that unused bits in the encoding are ignored.
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x80,0xcd,0x01,0x05,0x0e,0x1c
-
-# GFX11: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX11: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX11: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX11: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX11: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX11: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
index 0c4427cff63e..1be97b242284 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_features.txt
@@ -12,8 +12,13 @@
# GFX12: v_add3_u32_e64_dpp v5, v1, 42, v0 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x54,0x01,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x54,0x01,0x04,0x01,0x77,0x39,0x05
-# GFX1150: v_add3_u32_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05]
+# GFX12: v_add3_u32_e64_dpp v5, v1, s2, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05]
0x05,0x00,0x55,0xd6,0xe9,0x04,0x0c,0x00,0x01,0x77,0x39,0x05
-# GFX1150: v_cmp_ne_i32_e64_dpp vcc_lo, v1, s2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
+# GFX12: v_cmp_ne_i32_e64_dpp vcc_lo, v1, s2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05]
0x6a,0x00,0x45,0xd4,0xe9,0x04,0x00,0x00,0x01,0x77,0x39,0x05
+
+# Check that unused bits in the encoding are ignored.
+# This is more strict than the check in vinterp-fake16.txt and is GFX12 specific.
+# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0 ; encoding: [0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04]
+0x00,0x00,0xe0,0xcd,0x01,0x05,0x0e,0x1c
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt
deleted file mode 100644
index 977cd732947c..000000000000
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vinterp.txt
+++ /dev/null
@@ -1,251 +0,0 @@
-# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -disassemble %s | FileCheck -strict-whitespace -check-prefix=GFX12 %s
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# Check that unused bits in the encoding are ignored.
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0xe0,0xcd,0x01,0x05,0x0e,0x1c
-
-# GFX12: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX12: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX12: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
-0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
-
-# GFX12: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
-0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
-
-# GFX12: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
-0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
-0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
-0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
-0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
-0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
-0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
-0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
-0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
-0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
-0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
-0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
-
-# GFX12: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
-0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt b/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt
new file mode 100644
index 000000000000..239f1d8b3058
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/vinterp-fake16.txt
@@ -0,0 +1,252 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -disassemble %s | FileCheck -strict-whitespace -check-prefix=CHECK %s
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -disassemble %s | FileCheck -strict-whitespace -check-prefix=CHECK %s
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# Check that unused bits in the encoding are ignored.
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x80,0xcd,0x01,0x05,0x0e,0x1c
+
+# CHECK: v_interp_p10_f32 v1, v10, v20, v30 wait_exp:0{{$}}
+0x01,0x00,0x00,0xcd,0x0a,0x29,0x7a,0x04
+
+# CHECK: v_interp_p10_f32 v2, v11, v21, v31 wait_exp:0{{$}}
+0x02,0x00,0x00,0xcd,0x0b,0x2b,0x7e,0x04
+
+# CHECK: v_interp_p10_f32 v3, v12, v22, v32 wait_exp:0{{$}}
+0x03,0x00,0x00,0xcd,0x0c,0x2d,0x82,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x00,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
+0x00,0x87,0x00,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v1, v10, v20, v30 wait_exp:0{{$}}
+0x01,0x00,0x01,0xcd,0x0a,0x29,0x7a,0x04
+
+# CHECK: v_interp_p2_f32 v2, v11, v21, v31 wait_exp:0{{$}}
+0x02,0x00,0x01,0xcd,0x0b,0x2b,0x7e,0x04
+
+# CHECK: v_interp_p2_f32 v3, v12, v22, v32 wait_exp:0{{$}}
+0x03,0x00,0x01,0xcd,0x0c,0x2d,0x82,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x01,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f32 v0, v1, v2, v3 clamp wait_exp:7{{$}}
+0x00,0x87,0x01,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x02,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x02,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x03,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x03,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x04,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p10_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x04,0xcd,0x01,0x05,0x0e,0xe4
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, -v1, v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x24
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, -v2, v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x44
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, -v3 wait_exp:0{{$}}
+0x00,0x00,0x05,0xcd,0x01,0x05,0x0e,0x84
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp wait_exp:0{{$}}
+0x00,0x80,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:1{{$}}
+0x00,0x01,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 wait_exp:7{{$}}
+0x00,0x07,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,0] wait_exp:0{{$}}
+0x00,0x08,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,1,0,0] wait_exp:0{{$}}
+0x00,0x10,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,1,0] wait_exp:0{{$}}
+0x00,0x20,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[0,0,0,1] wait_exp:0{{$}}
+0x00,0x40,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,1,1,1] wait_exp:0{{$}}
+0x00,0x78,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0x4d,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, v1, v2, v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0x04
+
+# CHECK: v_interp_p2_rtz_f16_f32 v0, -v1, -v2, -v3 clamp op_sel:[1,0,0,1] wait_exp:5{{$}}
+0x00,0xcd,0x05,0xcd,0x01,0x05,0x0e,0xe4
diff --git a/llvm/test/MC/Disassembler/X86/apx/imulzu.txt b/llvm/test/MC/Disassembler/X86/apx/imulzu.txt
new file mode 100644
index 000000000000..86142e054097
--- /dev/null
+++ b/llvm/test/MC/Disassembler/X86/apx/imulzu.txt
@@ -0,0 +1,50 @@
+# RUN: llvm-mc -triple x86_64 -disassemble %s | FileCheck %s --check-prefix=ATT
+# RUN: llvm-mc -triple x86_64 -disassemble -output-asm-variant=1 %s | FileCheck %s --check-prefix=INTEL
+
+# ATT: imulzuw $123, %dx, %dx
+# INTEL: imulzu dx, dx, 123
+0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b
+
+# ATT: imulzul $123, %ecx, %ecx
+# INTEL: imulzu ecx, ecx, 123
+0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b
+
+# ATT: imulzuq $123, %r9, %r9
+# INTEL: imulzu r9, r9, 123
+0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b
+
+# ATT: imulzuw $123, 291(%r8,%rax,4), %dx
+# INTEL: imulzu dx, word ptr [r8 + 4*rax + 291], 123
+0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzul $123, 291(%r8,%rax,4), %ecx
+# INTEL: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzuq $123, 291(%r8,%rax,4), %r9
+# INTEL: imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b
+
+# ATT: imulzuw $1234, %dx, %dx
+# INTEL: imulzu dx, dx, 1234
+0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04
+
+# ATT: imulzuw $1234, 291(%r8,%rax,4), %dx
+# INTEL: imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04
+
+# ATT: imulzul $123456, %ecx, %ecx
+# INTEL: imulzu ecx, ecx, 123456
+0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00
+
+# ATT: imulzuq $123456, %r9, %r9
+# INTEL: imulzu r9, r9, 123456
+0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00
+
+# ATT: imulzul $123456, 291(%r8,%rax,4), %ecx
+# INTEL: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00
+
+# ATT: imulzuq $123456, 291(%r8,%rax,4), %r9
+# INTEL: imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
+0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00
diff --git a/llvm/test/MC/RISCV/rv32zcmp-invalid.s b/llvm/test/MC/RISCV/rv32zcmp-invalid.s
index cb99bba0aaa1..1acea187585f 100644
--- a/llvm/test/MC/RISCV/rv32zcmp-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zcmp-invalid.s
@@ -15,3 +15,15 @@ cm.popretz {ra, s0-s10}, 112
# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
cm.popretz {ra, s0-s1}, 112
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, 16
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -32
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, -8
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -40
diff --git a/llvm/test/MC/RISCV/rv64zcmp-invalid.s b/llvm/test/MC/RISCV/rv64zcmp-invalid.s
index 103934583495..bf34554095ea 100644
--- a/llvm/test/MC/RISCV/rv64zcmp-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zcmp-invalid.s
@@ -15,3 +15,15 @@ cm.popretz {ra, s0-s10}, 112
# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
cm.popretz {ra, s0-s1}, 112
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, 16
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -32
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.push {ra}, -15
+
+# CHECK-ERROR: error: stack adjustment is invalid for this instruction and register list; refer to Zc spec for a detailed range of stack adjustment
+cm.pop {ra, s0-s1}, -33
diff --git a/llvm/test/MC/RISCV/rvv/zvkned-invalid.s b/llvm/test/MC/RISCV/rvv/zvkned-invalid.s
new file mode 100644
index 000000000000..9230bc08e3fa
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvkned-invalid.s
@@ -0,0 +1,23 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvkned %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vaesdf.vs v10, v10
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesdf.vs v10, v10
+
+vaesef.vs v11, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesef.vs v11, v11
+
+vaesdm.vs v12, v12
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesdm.vs v12, v12
+
+vaesem.vs v13, v13
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesem.vs v13, v13
+
+vaesz.vs v14, v14
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vaesz.vs v14, v14
+
diff --git a/llvm/test/MC/RISCV/rvv/zvknh-invalid.s b/llvm/test/MC/RISCV/rvv/zvknh-invalid.s
new file mode 100644
index 000000000000..d9902511c0e1
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvknh-invalid.s
@@ -0,0 +1,26 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvknha %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsha2ms.vv v10, v10, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ms.vv v10, v10, v11
+
+vsha2ms.vv v11, v10, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ms.vv v11, v10, v11
+
+vsha2ch.vv v12, v12, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ch.vv v12, v12, v11
+
+vsha2ch.vv v11, v12, v11
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2ch.vv v11, v12, v11
+
+vsha2cl.vv v13, v13, v15
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2cl.vv v13, v13, v15
+
+vsha2cl.vv v15, v13, v15
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsha2cl.vv v15, v13, v15
diff --git a/llvm/test/MC/RISCV/rvv/zvksed-invalid.s b/llvm/test/MC/RISCV/rvv/zvksed-invalid.s
new file mode 100644
index 000000000000..41df8d3bc296
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvksed-invalid.s
@@ -0,0 +1,6 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvksed %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsm4r.vs v10, v10
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm4r.vs v10, v10
diff --git a/llvm/test/MC/RISCV/rvv/zvksh-invalid.s b/llvm/test/MC/RISCV/rvv/zvksh-invalid.s
new file mode 100644
index 000000000000..cccec44b8191
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvksh-invalid.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+zvksh %s 2>&1 \
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+
+vsm3me.vv v10, v10, v8
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm3me.vv v10, v10, v8
+
+vsm3c.vi v9, v9, 7
+# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
+# CHECK-ERROR-LABEL: vsm3c.vi v9, v9, 7
diff --git a/llvm/test/MC/RISCV/rvv/zvksh.s b/llvm/test/MC/RISCV/rvv/zvksh.s
index ca6cb49d3079..06251ff6efe5 100644
--- a/llvm/test/MC/RISCV/rvv/zvksh.s
+++ b/llvm/test/MC/RISCV/rvv/zvksh.s
@@ -19,3 +19,10 @@ vsm3me.vv v10, v9, v8
# CHECK-ENCODING: [0x77,0x25,0x94,0x82]
# CHECK-ERROR: instruction requires the following: 'Zvksh' (SM3 Hash Function Instructions){{$}}
# CHECK-UNKNOWN: 77 25 94 82 <unknown>
+
+# vs1 is allowed to overlap, but not vs2.
+vsm3me.vv v10, v9, v10
+# CHECK-INST: vsm3me.vv v10, v9, v10
+# CHECK-ENCODING: [0x77,0x25,0x95,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvksh' (SM3 Hash Function Instructions){{$}}
+# CHECK-UNKNOWN: 77 25 95 82 <unknown>
diff --git a/llvm/test/MC/X86/apx/imulzu-att.s b/llvm/test/MC/X86/apx/imulzu-att.s
new file mode 100644
index 000000000000..f56bfa77e1ce
--- /dev/null
+++ b/llvm/test/MC/X86/apx/imulzu-att.s
@@ -0,0 +1,41 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding %s | FileCheck %s
+# RUN: not llvm-mc -triple i386 -show-encoding %s 2>&1 | FileCheck %s --check-prefix=ERROR
+
+# ERROR-COUNT-12: error:
+# ERROR-NOT: error:
+# CHECK: imulzuw $123, %dx, %dx
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b]
+ imulzuw $123, %dx, %dx
+# CHECK: imulzul $123, %ecx, %ecx
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b]
+ imulzul $123, %ecx, %ecx
+# CHECK: imulzuq $123, %r9, %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b]
+ imulzuq $123, %r9, %r9
+# CHECK: imulzuw $123, 291(%r8,%rax,4), %dx
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzuw $123, 291(%r8,%rax,4), %dx
+# CHECK: imulzul $123, 291(%r8,%rax,4), %ecx
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzul $123, 291(%r8,%rax,4), %ecx
+# CHECK: imulzuq $123, 291(%r8,%rax,4), %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzuq $123, 291(%r8,%rax,4), %r9
+# CHECK: imulzuw $1234, %dx, %dx
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04]
+ imulzuw $1234, %dx, %dx
+# CHECK: imulzuw $1234, 291(%r8,%rax,4), %dx
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04]
+ imulzuw $1234, 291(%r8,%rax,4), %dx
+# CHECK: imulzul $123456, %ecx, %ecx
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzul $123456, %ecx, %ecx
+# CHECK: imulzuq $123456, %r9, %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzuq $123456, %r9, %r9
+# CHECK: imulzul $123456, 291(%r8,%rax,4), %ecx
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzul $123456, 291(%r8,%rax,4), %ecx
+# CHECK: imulzuq $123456, 291(%r8,%rax,4), %r9
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzuq $123456, 291(%r8,%rax,4), %r9
diff --git a/llvm/test/MC/X86/apx/imulzu-intel.s b/llvm/test/MC/X86/apx/imulzu-intel.s
new file mode 100644
index 000000000000..3a01fdca1489
--- /dev/null
+++ b/llvm/test/MC/X86/apx/imulzu-intel.s
@@ -0,0 +1,38 @@
+# RUN: llvm-mc -triple x86_64 -show-encoding -x86-asm-syntax=intel -output-asm-variant=1 %s | FileCheck %s
+
+# CHECK: imulzu dx, dx, 123
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x6b,0xd2,0x7b]
+ imulzu dx, dx, 123
+# CHECK: imulzu ecx, ecx, 123
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x6b,0xc9,0x7b]
+ imulzu ecx, ecx, 123
+# CHECK: imulzu r9, r9, 123
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0xc9,0x7b]
+ imulzu r9, r9, 123
+# CHECK: imulzu dx, word ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x6b,0x94,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu dx, word ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu ecx, dword ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x6b,0x8c,0x80,0x23,0x01,0x00,0x00,0x7b]
+ imulzu r9, qword ptr [r8 + 4*rax + 291], 123
+# CHECK: imulzu dx, dx, 1234
+# CHECK: encoding: [0x62,0xf4,0x7d,0x18,0x69,0xd2,0xd2,0x04]
+ imulzu dx, dx, 1234
+# CHECK: imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+# CHECK: encoding: [0x62,0xd4,0x7d,0x18,0x69,0x94,0x80,0x23,0x01,0x00,0x00,0xd2,0x04]
+ imulzu dx, word ptr [r8 + 4*rax + 291], 1234
+# CHECK: imulzu ecx, ecx, 123456
+# CHECK: encoding: [0x62,0xf4,0x7c,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzu ecx, ecx, 123456
+# CHECK: imulzu r9, r9, 123456
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0xc9,0x40,0xe2,0x01,0x00]
+ imulzu r9, r9, 123456
+# CHECK: imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+# CHECK: encoding: [0x62,0xd4,0x7c,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzu ecx, dword ptr [r8 + 4*rax + 291], 123456
+# CHECK: imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
+# CHECK: encoding: [0x62,0x54,0xfc,0x18,0x69,0x8c,0x80,0x23,0x01,0x00,0x00,0x40,0xe2,0x01,0x00]
+ imulzu r9, qword ptr [r8 + 4*rax + 291], 123456
diff --git a/llvm/test/MachineVerifier/test_adjustsstack.mir b/llvm/test/MachineVerifier/test_adjustsstack.mir
new file mode 100644
index 000000000000..d333737e000c
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_adjustsstack.mir
@@ -0,0 +1,26 @@
+# RUN: not --crash llc -o - -start-before=twoaddressinstruction -verify-machineinstrs %s 2>&1 \
+# RUN: | FileCheck %s
+# REQUIRES: aarch64-registered-target
+--- |
+ target triple = "aarch64-unknown-linux"
+ declare i32 @bar(i32) nounwind
+ define i32 @foo() nounwind {
+ call i32 @bar(i32 0)
+ ret i32 0
+ }
+...
+---
+name: foo
+registers:
+ - { id: 0, class: gpr32 }
+body: |
+ bb.0 (%ir-block.0):
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ %0 = COPY $wzr
+ $w0 = COPY %0
+ BL @bar, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $w0, implicit-def $sp, implicit-def $w0
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ $w0 = COPY killed %0
+ RET_ReallyLR implicit $w0
+...
+# CHECK-LABEL: Bad machine code: AdjustsStack not set in presence of a frame pseudo instruction.
diff --git a/llvm/test/MachineVerifier/test_g_ubsantrap.mir b/llvm/test/MachineVerifier/test_g_ubsantrap.mir
new file mode 100644
index 000000000000..d2b219d8650a
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_ubsantrap.mir
@@ -0,0 +1,18 @@
+# RUN: not --crash llc -o - -mtriple=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: aarch64-registered-target
+
+---
+name: test_ubsantrap
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Crash kind must be 8 bit wide
+ G_UBSANTRAP 4096
+
+ ; CHECK: Crash kind must be an immediate
+ %5:_(s32) = IMPLICIT_DEF
+ G_UBSANTRAP %5
+
+...
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml
new file mode 100644
index 000000000000..09885bd529f0
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-amplification.yaml
@@ -0,0 +1,97 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 14
+ PayloadSizeInBytes: 4092
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: ASEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 14
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 14
+# CHECK-NEXT: PayloadSizeInBytes: 4092
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: ASEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml
new file mode 100644
index 000000000000..ee6fb112c772
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-compute.yaml
@@ -0,0 +1,95 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 5
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: CSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 5
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 5
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: CSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml
new file mode 100644
index 000000000000..dd367deae88e
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-domain.yaml
@@ -0,0 +1,105 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 4
+ InputControlPointCount: 1024
+ OutputPositionPresent: 1
+ TessellatorDomain: 2056
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPatchConstOrPrimVectors: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 0, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: DSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ PatchOutputMap: []
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 4
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 4
+# CHECK-NEXT: InputControlPointCount: 1024
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: TessellatorDomain: 2056
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPatchConstOrPrimVectors: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 0, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: DSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: PatchOutputMap: [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml
new file mode 100644
index 000000000000..4c7680b63b02
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-geometry.yaml
@@ -0,0 +1,105 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 2
+ InputPrimitive: 1024
+ OutputTopology: 4096
+ OutputStreamMask: 2056
+ OutputPositionPresent: 1
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ MaxVertexCount: 4096
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: GSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 2
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 2
+# CHECK-NEXT: InputPrimitive: 1024
+# CHECK-NEXT: OutputTopology: 4096
+# CHECK-NEXT: OutputStreamMask: 2056
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: MaxVertexCount: 4096
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: GSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml
new file mode 100644
index 000000000000..3bbad8a9b0ee
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-hull.yaml
@@ -0,0 +1,107 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 3
+ InputControlPointCount: 1024
+ OutputControlPointCount: 4096
+ TessellatorDomain: 2056
+ TessellatorOutputPrimitive: 8192
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPatchConstOrPrimVectors: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 0, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: HSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ InputPatchMap: []
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 3
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 3
+# CHECK-NEXT: InputControlPointCount: 1024
+# CHECK-NEXT: OutputControlPointCount: 4096
+# CHECK-NEXT: TessellatorDomain: 2056
+# CHECK-NEXT: TessellatorOutputPrimitive: 8192
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPatchConstOrPrimVectors: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 0, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: HSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: InputPatchMap: [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml
new file mode 100644
index 000000000000..c5ea1fcf0780
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-mesh.yaml
@@ -0,0 +1,109 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 13
+ GroupSharedBytesUsed: 1024
+ GroupSharedBytesDependentOnViewID: 2056
+ PayloadSizeInBytes: 4092
+ MaxOutputVertices: 8196
+ MaxOutputPrimitives: 4092
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigPrimVectors: 128
+ MeshOutputTopology: 16
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: MSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 13
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 13
+# CHECK-NEXT: GroupSharedBytesUsed: 1024
+# CHECK-NEXT: GroupSharedBytesDependentOnViewID: 2056
+# CHECK-NEXT: PayloadSizeInBytes: 4092
+# CHECK-NEXT: MaxOutputVertices: 8196
+# CHECK-NEXT: MaxOutputPrimitives: 4092
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigPrimVectors: 128
+# CHECK-NEXT: MeshOutputTopology: 16
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: MSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml
new file mode 100644
index 000000000000..b28d5ec8074d
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-pixel.yaml
@@ -0,0 +1,99 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 0
+ DepthOutput: 7
+ SampleFrequency: 96
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: PSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 0
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 0
+# CHECK-NEXT: DepthOutput: 7
+# CHECK-NEXT: SampleFrequency: 96
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: PSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml b/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml
new file mode 100644
index 000000000000..d1fb55839931
--- /dev/null
+++ b/llvm/test/ObjectYAML/DXContainer/PSVv3-vertex.yaml
@@ -0,0 +1,97 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+
+--- !dxcontainer
+Header:
+ Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+ Version:
+ Major: 1
+ Minor: 0
+ PartCount: 2
+Parts:
+ - Name: PSV0
+ Size: 144
+ PSVInfo:
+ Version: 3
+ ShaderStage: 1
+ OutputPositionPresent: 1
+ MinimumWaveLaneCount: 0
+ MaximumWaveLaneCount: 4294967295
+ UsesViewID: 0
+ SigInputVectors: 0
+ SigOutputVectors: [ 8, 16, 32, 64 ]
+ NumThreadsX: 512
+ NumThreadsY: 1024
+ NumThreadsZ: 2048
+ EntryName: VSEntry
+ ResourceStride: 24
+ Resources:
+ - Type: 1
+ Space: 2
+ LowerBound: 3
+ UpperBound: 4
+ Kind: 5
+ Flags: 6
+ - Type: 128
+ Space: 32768
+ LowerBound: 8388608
+ UpperBound: 2147483648
+ Kind: 65535
+ Flags: 16776960
+ SigInputElements: []
+ SigOutputElements: []
+ SigPatchOrPrimElements: []
+ InputOutputMap:
+ - [ ]
+ - [ ]
+ - [ ]
+ - [ ]
+ - Name: DXIL
+ Size: 24
+ Program:
+ MajorVersion: 6
+ MinorVersion: 0
+ ShaderKind: 1
+ Size: 6
+ DXILMajorVersion: 0
+ DXILMinorVersion: 1
+ DXILSize: 0
+...
+
+# CHECK: Name: PSV0
+# CHECK: PSVInfo:
+# CHECK-NEXT: Version: 3
+# CHECK-NEXT: ShaderStage: 1
+# CHECK-NEXT: OutputPositionPresent: 1
+# CHECK-NEXT: MinimumWaveLaneCount: 0
+# CHECK-NEXT: MaximumWaveLaneCount: 4294967295
+# CHECK-NEXT: UsesViewID: 0
+# CHECK-NEXT: SigInputVectors: 0
+# CHECK-NEXT: SigOutputVectors: [ 8, 16, 32, 64 ]
+# CHECK-NEXT: NumThreadsX: 512
+# CHECK-NEXT: NumThreadsY: 1024
+# CHECK-NEXT: NumThreadsZ: 2048
+# CHECK-NEXT: EntryName: VSEntry
+# CHECK-NEXT: ResourceStride: 24
+# CHECK-NEXT: Resources:
+# CHECK-NEXT: - Type: 1
+# CHECK-NEXT: Space: 2
+# CHECK-NEXT: LowerBound: 3
+# CHECK-NEXT: UpperBound: 4
+# CHECK-NEXT: Kind: 5
+# CHECK-NEXT: Flags: 6
+# CHECK-NEXT: - Type: 128
+# CHECK-NEXT: Space: 32768
+# CHECK-NEXT: LowerBound: 8388608
+# CHECK-NEXT: UpperBound: 2147483648
+# CHECK-NEXT: Kind: 65535
+# CHECK-NEXT: Flags: 16776960
+# CHECK-NEXT: SigInputElements: []
+# CHECK-NEXT: SigOutputElements: []
+# CHECK-NEXT: SigPatchOrPrimElements: []
+# CHECK-NEXT: InputOutputMap:
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: - [ ]
+# CHECK-NEXT: Name
diff --git a/llvm/test/TableGen/ConcatenatedSubregs.td b/llvm/test/TableGen/ConcatenatedSubregs.td
index 5b354c94dca5..ea4e7f01a2e2 100644
--- a/llvm/test/TableGen/ConcatenatedSubregs.td
+++ b/llvm/test/TableGen/ConcatenatedSubregs.td
@@ -90,16 +90,19 @@ def TestTarget : Target;
// CHECK-LABEL: RegisterClass DRegs:
// CHECK-LABEL: SubRegIndex ssub1:
-// CHECK: Offset, Size: 16, 16
+// CHECK: Offset: { Default:16 }
+// CHECK: Size: { Default:16 }
// CHECK-LABEL: SubRegIndex sub0:
// CHECK-LABEL: SubRegIndex sub1:
// CHECK-LABEL: SubRegIndex sub2:
// Check inferred indexes:
// CHECK-LABEL: SubRegIndex ssub1_ssub2:
-// CHECK: Offset, Size: 16, 65535
+// CHECK: Offset: { Default:16 }
+// CHECK: Size: { Default:65535 }
// CHECK-LABEL: SubRegIndex ssub3_ssub4:
// CHECK-LABEL: SubRegIndex ssub0_ssub1_ssub2_ssub3:
-// CHECK: Offset, Size: 65535, 65535
+// CHECK: Offset: { Default:65535 }
+// CHECK: Size: { Default:65535 }
// CHECK-LABEL: SubRegIndex ssub1_ssub2_ssub3_ssub4:
// Check that all subregs are generated on some examples
diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
index 5cf4e044a0fb..0189d3d056fc 100644
--- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
+++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table.td
@@ -114,14 +114,17 @@ def MyCombiner: GICombiner<"GenMyCombiner", [
// CHECK-NEXT: void GenMyCombiner::runCustomAction(unsigned ApplyID, const MatcherState &State, NewMIVector &OutMIs) const {
// CHECK-NEXT: switch(ApplyID) {
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner0:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner1:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY MatchInfos.MDInfo0, MatchInfos.MDInfo1
// CHECK-NEXT: return;
// CHECK-NEXT: }
// CHECK-NEXT: case GICXXCustomAction_CombineApplyGICombiner2:{
+// CHECK-NEXT: Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);
// CHECK-NEXT: APPLY State.MIs[1]->getOperand(1) State.MIs[0]->getOperand(1) OutMIs[0]
// CHECK-NEXT: return;
// CHECK-NEXT: }
diff --git a/llvm/test/TableGen/HwModeSubRegs.td b/llvm/test/TableGen/HwModeSubRegs.td
new file mode 100644
index 000000000000..2bf7a917979d
--- /dev/null
+++ b/llvm/test/TableGen/HwModeSubRegs.td
@@ -0,0 +1,75 @@
+// RUN: llvm-tblgen -gen-register-info -register-info-debug -I %p/../../include %s -o /dev/null 2>&1 | FileCheck %s
+include "llvm/Target/Target.td"
+
+def HasFeat : Predicate<"Subtarget->hasFeat()">;
+
+def TestMode : HwMode<"+feat1", [HasFeat]>;
+
+class MyReg<string n>
+ : Register<n> {
+ let Namespace = "Test";
+}
+class MyClass<int size, list<ValueType> types, dag registers>
+ : RegisterClass<"Test", types, size, registers> {
+ let Size = size;
+}
+
+def X0 : MyReg<"x0">;
+def X1 : MyReg<"x1">;
+def X2 : MyReg<"x2">;
+def X3 : MyReg<"x3">;
+def X4 : MyReg<"x4">;
+def X5 : MyReg<"x5">;
+def X6 : MyReg<"x6">;
+def X7 : MyReg<"x7">;
+def X8 : MyReg<"x8">;
+def X9 : MyReg<"x9">;
+def X10 : MyReg<"x10">;
+def X11 : MyReg<"x11">;
+def X12 : MyReg<"x12">;
+def X13 : MyReg<"x13">;
+def X14 : MyReg<"x14">;
+def X15 : MyReg<"x15">;
+
+def ModeVT : ValueTypeByHwMode<[DefaultMode, TestMode],
+ [i32, i64]>;
+let RegInfos = RegInfoByHwMode<[DefaultMode, TestMode],
+ [RegInfo<32,32,32>, RegInfo<64,64,64>]> in
+def XRegs : MyClass<32, [ModeVT], (sequence "X%u", 0, 15)>;
+
+def sub_even : SubRegIndex<32> {
+ let SubRegRanges = SubRegRangeByHwMode<[DefaultMode, TestMode],
+ [SubRegRange<32>, SubRegRange<64>]>;
+}
+def sub_odd : SubRegIndex<32, 32> {
+ let SubRegRanges = SubRegRangeByHwMode<[DefaultMode, TestMode],
+ [SubRegRange<32, 32>, SubRegRange<64, 64>]>;
+}
+
+def XPairs : RegisterTuples<[sub_even, sub_odd],
+ [(decimate (rotl XRegs, 0), 2),
+ (decimate (rotl XRegs, 1), 2)]>;
+
+let RegInfos = RegInfoByHwMode<[DefaultMode, TestMode],
+ [RegInfo<64,64,32>, RegInfo<128,128,64>]> in
+def XPairsClass : MyClass<64, [untyped], (add XPairs)>;
+
+def TestTarget : Target;
+
+// CHECK-LABEL: RegisterClass XRegs:
+// CHECK: SpillSize: { Default:32 TestMode:64 }
+// CHECK: SpillAlignment: { Default:32 TestMode:64 }
+// CHECK: Regs: X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
+
+// CHECK-LABEL: RegisterClass XPairsClass:
+// CHECK: SpillSize: { Default:64 TestMode:128 }
+// CHECK: SpillAlignment: { Default:32 TestMode:64 }
+// CHECK: CoveredBySubRegs: 1
+// CHECK: Regs: X0_X1 X2_X3 X4_X5 X6_X7 X8_X9 X10_X11 X12_X13 X14_X15
+
+// CHECK-LABEL: SubRegIndex sub_even:
+// CHECK: Offset: { Default:0 TestMode:0 }
+// CHECK: Size: { Default:32 TestMode:64 }
+// CHECK-LABEL: SubRegIndex sub_odd:
+// CHECK: Offset: { Default:32 TestMode:64 }
+// CHECK: Size: { Default:32 TestMode:64 }
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 7b65e483c39d..4ab5567f6287 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -756,6 +756,12 @@ static const X86FoldTableEntry Table1[] = {
{X86::IMUL64rri32_NF, X86::IMUL64rmi32_NF, 0},
{X86::IMUL64rri8, X86::IMUL64rmi8, 0},
{X86::IMUL64rri8_NF, X86::IMUL64rmi8_NF, 0},
+ {X86::IMULZU16rri, X86::IMULZU16rmi, 0},
+ {X86::IMULZU16rri8, X86::IMULZU16rmi8, 0},
+ {X86::IMULZU32rri, X86::IMULZU32rmi, 0},
+ {X86::IMULZU32rri8, X86::IMULZU32rmi8, 0},
+ {X86::IMULZU64rri32, X86::IMULZU64rmi32, 0},
+ {X86::IMULZU64rri8, X86::IMULZU64rmi8, 0},
{X86::INC16r_ND, X86::INC16m_ND, 0},
{X86::INC16r_NF_ND, X86::INC16m_NF_ND, 0},
{X86::INC32r_ND, X86::INC32m_ND, 0},
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index b6e6b2602495..a5d4c329446f 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -3028,7 +3028,7 @@ define bfloat @test_atomicrmw_fadd_bf16_global_system_align4(ptr addrspace(1) %p
define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bfloat %value) #2 {
; CI-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; CI-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; CI-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; CI-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; CI-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; CI-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3041,7 +3041,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; CI-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; CI-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; CI-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; CI-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; CI-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; CI-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; CI-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; CI-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3058,7 +3058,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; CI-NEXT: ret bfloat [[TMP7]]
;
; GFX9-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX9-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX9-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX9-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX9-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX9-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3071,7 +3071,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX9-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX9-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX9-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX9-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX9-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX9-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX9-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX9-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3088,7 +3088,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX9-NEXT: ret bfloat [[TMP7]]
;
; GFX908-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX908-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX908-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX908-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX908-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX908-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3101,7 +3101,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX908-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX908-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX908-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX908-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX908-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX908-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX908-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX908-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3118,7 +3118,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX908-NEXT: ret bfloat [[TMP7]]
;
; GFX90A-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX90A-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX90A-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6:[0-9]+]]
; GFX90A-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX90A-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX90A-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3131,7 +3131,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX90A-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX90A-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX90A-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX90A-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX90A-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX90A-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX90A-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX90A-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3148,7 +3148,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX90A-NEXT: ret bfloat [[TMP7]]
;
; GFX940-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX940-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX940-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6:[0-9]+]]
; GFX940-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX940-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX940-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3161,7 +3161,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX940-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX940-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX940-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX940-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX940-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX940-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX940-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX940-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
@@ -3178,7 +3178,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX940-NEXT: ret bfloat [[TMP7]]
;
; GFX11-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; GFX11-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
+; GFX11-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR6]]
; GFX11-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; GFX11-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; GFX11-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -3191,7 +3191,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; GFX11-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; GFX11-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; GFX11-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; GFX11-NEXT: [[NEW:%.*]] = fadd bfloat [[TMP4]], [[VALUE:%.*]]
+; GFX11-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR6]]
; GFX11-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; GFX11-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; GFX11-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
diff --git a/llvm/test/Transforms/Attributor/align.ll b/llvm/test/Transforms/Attributor/align.ll
index 5103b6f1f1e9..9880e53fd43a 100644
--- a/llvm/test/Transforms/Attributor/align.ll
+++ b/llvm/test/Transforms/Attributor/align.ll
@@ -11,10 +11,10 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; TEST 1
;;
;.
-; CHECK: @[[A1:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 8
-; CHECK: @[[A2:[a-zA-Z0-9_$"\\.-]+]] = common global i8 0, align 16
-; CHECK: @[[CND:[a-zA-Z0-9_$"\\.-]+]] = external global i1
-; CHECK: @[[G:[a-zA-Z0-9_$"\\.-]+]] = global i8 0, align 32
+; CHECK: @a1 = common global i8 0, align 8
+; CHECK: @a2 = common global i8 0, align 16
+; CHECK: @cnd = external global i1
+; CHECK: @G = global i8 0, align 32
;.
define ptr @test1(ptr align 8 %0) #0 {
; CHECK: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
@@ -158,18 +158,31 @@ define internal ptr @f1(ptr readnone %0) local_unnamed_addr #0 {
; Function Attrs: nounwind readnone ssp uwtable
define ptr @f2(ptr readnone %0) local_unnamed_addr #0 {
-; CHECK: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
-; CHECK-LABEL: define {{[^@]+}}@f2
-; CHECK-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
-; CHECK-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
-; CHECK: 3:
-; CHECK-NEXT: br label [[TMP5:%.*]]
-; CHECK: 4:
-; CHECK-NEXT: br label [[TMP5]]
-; CHECK: 5:
-; CHECK-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
-; CHECK-NEXT: ret ptr [[TMP6]]
+; TUNIT: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
+; TUNIT-LABEL: define {{[^@]+}}@f2
+; TUNIT-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; TUNIT-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
+; TUNIT-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
+; TUNIT: 3:
+; TUNIT-NEXT: br label [[TMP5:%.*]]
+; TUNIT: 4:
+; TUNIT-NEXT: br label [[TMP5]]
+; TUNIT: 5:
+; TUNIT-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
+; TUNIT-NEXT: ret ptr [[TMP6]]
+;
+; CGSCC: Function Attrs: mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable
+; CGSCC-LABEL: define {{[^@]+}}@f2
+; CGSCC-SAME: (ptr nofree readnone [[TMP0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CGSCC-NEXT: [[TMP2:%.*]] = icmp eq ptr [[TMP0]], null
+; CGSCC-NEXT: br i1 [[TMP2]], label [[TMP4:%.*]], label [[TMP3:%.*]]
+; CGSCC: 3:
+; CGSCC-NEXT: br label [[TMP5:%.*]]
+; CGSCC: 4:
+; CGSCC-NEXT: br label [[TMP5]]
+; CGSCC: 5:
+; CGSCC-NEXT: [[TMP6:%.*]] = phi ptr [ [[TMP0]], [[TMP3]] ], [ @a1, [[TMP4]] ]
+; CGSCC-NEXT: ret ptr [[TMP6]]
;
%2 = icmp eq ptr %0, null
br i1 %2, label %5, label %3
@@ -222,7 +235,7 @@ define align 4 ptr @test7() #0 {
; CGSCC: Function Attrs: mustprogress nofree noinline nosync nounwind willreturn memory(none) uwtable
; CGSCC-LABEL: define {{[^@]+}}@test7
; CGSCC-SAME: () #[[ATTR1:[0-9]+]] {
-; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) ptr @f1() #[[ATTR14:[0-9]+]]
+; CGSCC-NEXT: [[C:%.*]] = tail call noundef nonnull align 8 dereferenceable(1) ptr @f1() #[[ATTR15:[0-9]+]]
; CGSCC-NEXT: ret ptr [[C]]
;
%c = tail call ptr @f1(ptr align 8 dereferenceable(1) @a1)
@@ -933,7 +946,7 @@ define i32 @musttail_caller_1(ptr %p) {
; TUNIT-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; TUNIT-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; TUNIT: mt:
-; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef readonly [[P]]) #[[ATTR12:[0-9]+]]
+; TUNIT-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef readonly [[P]]) #[[ATTR13:[0-9]+]]
; TUNIT-NEXT: ret i32 [[V]]
; TUNIT: exit:
; TUNIT-NEXT: ret i32 0
@@ -944,7 +957,7 @@ define i32 @musttail_caller_1(ptr %p) {
; CGSCC-NEXT: [[C:%.*]] = load i1, ptr @cnd, align 1
; CGSCC-NEXT: br i1 [[C]], label [[MT:%.*]], label [[EXIT:%.*]]
; CGSCC: mt:
-; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR15:[0-9]+]]
+; CGSCC-NEXT: [[V:%.*]] = musttail call i32 @musttail_callee_1(ptr nocapture nofree noundef nonnull readonly dereferenceable(4) [[P]]) #[[ATTR16:[0-9]+]]
; CGSCC-NEXT: ret i32 [[V]]
; CGSCC: exit:
; CGSCC-NEXT: ret i32 0
@@ -1076,13 +1089,13 @@ define ptr @aligned_8_return_caller(ptr align(16) %a, i1 %c1, i1 %c2) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
; TUNIT-LABEL: define {{[^@]+}}@aligned_8_return_caller
; TUNIT-SAME: (ptr nofree readnone align 16 "no-capture-maybe-returned" [[A:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR10]] {
-; TUNIT-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR13:[0-9]+]]
+; TUNIT-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 "no-capture-maybe-returned" [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR14:[0-9]+]]
; TUNIT-NEXT: ret ptr [[R]]
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)
; CGSCC-LABEL: define {{[^@]+}}@aligned_8_return_caller
; CGSCC-SAME: (ptr nofree readnone align 16 [[A:%.*]], i1 noundef [[C1:%.*]], i1 [[C2:%.*]]) #[[ATTR13:[0-9]+]] {
-; CGSCC-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR14]]
+; CGSCC-NEXT: [[R:%.*]] = call align 8 ptr @aligned_8_return(ptr noalias nofree readnone align 16 [[A]], i1 noundef [[C1]], i1 [[C2]]) #[[ATTR15]]
; CGSCC-NEXT: ret ptr [[R]]
;
%r = call ptr @aligned_8_return(ptr %a, i1 %c1, i1 %c2)
@@ -1101,6 +1114,104 @@ entry:
ret i32 0
}
+define i64 @infer_align_atomicrmw(ptr align 4 %p) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_atomicrmw
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR12:[0-9]+]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[RET:%.*]] = atomicrmw add ptr [[ARRAYIDX1]], i64 4 seq_cst, align 16
+; TUNIT-NEXT: ret i64 [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_atomicrmw
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR14:[0-9]+]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[RET:%.*]] = atomicrmw add ptr [[ARRAYIDX1]], i64 4 seq_cst, align 16
+; CGSCC-NEXT: ret i64 [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %ret = atomicrmw add ptr %arrayidx1, i64 4 seq_cst, align 16
+ ret i64 %ret
+}
+
+define ptr @infer_align_atomicrmw_ptr(ptr align 4 %p, ptr %val) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_atomicrmw_ptr
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[VAL:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[RET:%.*]] = atomicrmw xchg ptr [[ARRAYIDX1]], ptr [[VAL]] seq_cst, align 16
+; TUNIT-NEXT: ret ptr [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_atomicrmw_ptr
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[VAL:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[RET:%.*]] = atomicrmw xchg ptr [[ARRAYIDX1]], ptr [[VAL]] seq_cst, align 16
+; CGSCC-NEXT: ret ptr [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %ret = atomicrmw xchg ptr %arrayidx1, ptr %val seq_cst, align 16
+ ret ptr %ret
+}
+
+define i64 @infer_align_cmpxchg(ptr align 4 %p) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_cmpxchg
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], i64 4, i64 1 seq_cst seq_cst, align 16
+; TUNIT-NEXT: [[RET:%.*]] = extractvalue { i64, i1 } [[CMPX]], 0
+; TUNIT-NEXT: ret i64 [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_cmpxchg
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], i64 4, i64 1 seq_cst seq_cst, align 16
+; CGSCC-NEXT: [[RET:%.*]] = extractvalue { i64, i1 } [[CMPX]], 0
+; CGSCC-NEXT: ret i64 [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %cmpx = cmpxchg ptr %arrayidx1, i64 4, i64 1 seq_cst seq_cst, align 16
+ %ret = extractvalue { i64, i1 } %cmpx, 0
+ ret i64 %ret
+}
+
+define ptr @infer_align_cmpxchg_ptr(ptr align 4 %p, ptr %cmp0, ptr %cmp1) {
+; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; TUNIT-LABEL: define {{[^@]+}}@infer_align_cmpxchg_ptr
+; TUNIT-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[CMP0:%.*]], ptr nofree [[CMP1:%.*]]) #[[ATTR12]] {
+; TUNIT-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; TUNIT-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; TUNIT-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], ptr [[CMP0]], ptr [[CMP1]] seq_cst seq_cst, align 16
+; TUNIT-NEXT: [[RET:%.*]] = extractvalue { ptr, i1 } [[CMPX]], 0
+; TUNIT-NEXT: ret ptr [[RET]]
+;
+; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
+; CGSCC-LABEL: define {{[^@]+}}@infer_align_cmpxchg_ptr
+; CGSCC-SAME: (ptr nocapture nofree align 16 [[P:%.*]], ptr nofree [[CMP0:%.*]], ptr nofree [[CMP1:%.*]]) #[[ATTR14]] {
+; CGSCC-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i64, ptr [[P]], i64 1
+; CGSCC-NEXT: [[ARRAYIDX1:%.*]] = getelementptr i64, ptr [[ARRAYIDX0]], i64 3
+; CGSCC-NEXT: [[CMPX:%.*]] = cmpxchg ptr [[ARRAYIDX1]], ptr [[CMP0]], ptr [[CMP1]] seq_cst seq_cst, align 16
+; CGSCC-NEXT: [[RET:%.*]] = extractvalue { ptr, i1 } [[CMPX]], 0
+; CGSCC-NEXT: ret ptr [[RET]]
+;
+ %arrayidx0 = getelementptr i64, ptr %p, i64 1
+ %arrayidx1 = getelementptr i64, ptr %arrayidx0, i64 3
+ %cmpx = cmpxchg ptr %arrayidx1, ptr %cmp0, ptr %cmp1 seq_cst seq_cst, align 16
+ %ret = extractvalue { ptr, i1 } %cmpx, 0
+ ret ptr %ret
+}
+
declare void @implicit_cast_callee(i64)
attributes #0 = { nounwind uwtable noinline }
@@ -1119,8 +1230,9 @@ attributes #2 = { null_pointer_is_valid }
; TUNIT: attributes #[[ATTR9]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(write) }
; TUNIT: attributes #[[ATTR10]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
; TUNIT: attributes #[[ATTR11]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(read) }
-; TUNIT: attributes #[[ATTR12]] = { nofree nosync nounwind willreturn memory(read) }
-; TUNIT: attributes #[[ATTR13]] = { nofree nosync nounwind willreturn }
+; TUNIT: attributes #[[ATTR12]] = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) }
+; TUNIT: attributes #[[ATTR13]] = { nofree nosync nounwind willreturn memory(read) }
+; TUNIT: attributes #[[ATTR14]] = { nofree nosync nounwind willreturn }
;.
; CGSCC: attributes #[[ATTR0]] = { mustprogress nofree noinline norecurse nosync nounwind willreturn memory(none) uwtable }
; CGSCC: attributes #[[ATTR1]] = { mustprogress nofree noinline nosync nounwind willreturn memory(none) uwtable }
@@ -1136,6 +1248,7 @@ attributes #2 = { null_pointer_is_valid }
; CGSCC: attributes #[[ATTR11]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
; CGSCC: attributes #[[ATTR12]] = { mustprogress nofree nosync nounwind willreturn memory(read) }
; CGSCC: attributes #[[ATTR13]] = { mustprogress nofree nosync nounwind willreturn memory(none) }
-; CGSCC: attributes #[[ATTR14]] = { nofree nosync willreturn }
-; CGSCC: attributes #[[ATTR15]] = { nofree willreturn memory(read) }
+; CGSCC: attributes #[[ATTR14]] = { mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite) }
+; CGSCC: attributes #[[ATTR15]] = { nofree nosync willreturn }
+; CGSCC: attributes #[[ATTR16]] = { nofree willreturn memory(read) }
;.
diff --git a/llvm/test/Transforms/Attributor/nocapture-1.ll b/llvm/test/Transforms/Attributor/nocapture-1.ll
index 7d2f0a1351a4..f61388f71c46 100644
--- a/llvm/test/Transforms/Attributor/nocapture-1.ll
+++ b/llvm/test/Transforms/Attributor/nocapture-1.ll
@@ -524,13 +524,13 @@ define void @test6_2(ptr %x6_2, ptr %y6_2, ptr %z6_2) {
define void @test_cmpxchg(ptr %p) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_cmpxchg
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR8:[0-9]+]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR8:[0-9]+]] {
; TUNIT-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], i32 0, i32 1 acquire monotonic, align 4
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_cmpxchg
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR11:[0-9]+]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR11:[0-9]+]] {
; CGSCC-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], i32 0, i32 1 acquire monotonic, align 4
; CGSCC-NEXT: ret void
;
@@ -541,13 +541,13 @@ define void @test_cmpxchg(ptr %p) {
define void @test_cmpxchg_ptr(ptr %p, ptr %q) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_cmpxchg_ptr
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 8 dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], ptr null, ptr [[Q]] acquire monotonic, align 8
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_cmpxchg_ptr
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR11]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 8 dereferenceable(8) [[P:%.*]], ptr nofree [[Q:%.*]]) #[[ATTR11]] {
; CGSCC-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[P]], ptr null, ptr [[Q]] acquire monotonic, align 8
; CGSCC-NEXT: ret void
;
@@ -558,13 +558,13 @@ define void @test_cmpxchg_ptr(ptr %p, ptr %q) {
define void @test_atomicrmw(ptr %p) {
; TUNIT: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; TUNIT-LABEL: define {{[^@]+}}@test_atomicrmw
-; TUNIT-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR8]] {
+; TUNIT-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR8]] {
; TUNIT-NEXT: [[TMP1:%.*]] = atomicrmw add ptr [[P]], i32 1 seq_cst, align 4
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CGSCC-LABEL: define {{[^@]+}}@test_atomicrmw
-; CGSCC-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[P:%.*]]) #[[ATTR11]] {
+; CGSCC-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[P:%.*]]) #[[ATTR11]] {
; CGSCC-NEXT: [[TMP1:%.*]] = atomicrmw add ptr [[P]], i32 1 seq_cst, align 4
; CGSCC-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll
index 442464cde389..4df647cf3bb5 100644
--- a/llvm/test/Transforms/Attributor/nofpclass.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass.ll
@@ -1813,7 +1813,7 @@ define double @fpext(float nofpclass(inf nan) %arg) {
define float @atomicrmw_fadd(ptr %ptr, float nofpclass(inf nan) %val) {
; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn memory(argmem: readwrite)
; CHECK-LABEL: define float @atomicrmw_fadd
-; CHECK-SAME: (ptr nocapture nofree noundef nonnull dereferenceable(4) [[PTR:%.*]], float nofpclass(nan inf) [[VAL:%.*]]) #[[ATTR6:[0-9]+]] {
+; CHECK-SAME: (ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[PTR:%.*]], float nofpclass(nan inf) [[VAL:%.*]]) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr [[PTR]], float [[VAL]] seq_cst, align 4
; CHECK-NEXT: ret float [[RESULT]]
;
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
index 8dce9ef9fa43..701d867416a1 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -870,6 +870,33 @@ out:
ret i1 false
}
+define i1 @clamp_high1_or(i32 noundef %a) {
+; CHECK-LABEL: @clamp_high1_or(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[A:%.*]], 5
+; CHECK-NEXT: br i1 [[CMP]], label [[A_GUARD:%.*]], label [[OUT:%.*]]
+; CHECK: a_guard:
+; CHECK-NEXT: [[SEL_CMP:%.*]] = icmp eq i32 [[A]], 5
+; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[A]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[SEL_CMP]], i32 5, i32 [[ADD]]
+; CHECK-NEXT: ret i1 false
+; CHECK: out:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp sle i32 %a, 5
+ br i1 %cmp, label %a_guard, label %out
+
+a_guard:
+ %sel_cmp = icmp eq i32 %a, 5
+ %add = or disjoint i32 %a, 1
+ %sel = select i1 %sel_cmp, i32 5, i32 %add
+ %res = icmp eq i32 %sel, 6
+ ret i1 %res
+out:
+ ret i1 false
+}
+
define i1 @clamp_high2(i32 noundef %a) {
; CHECK-LABEL: @clamp_high2(
; CHECK-NEXT: entry:
@@ -897,6 +924,35 @@ out:
ret i1 false
}
+
+define i1 @clamp_high2_or_disjoint(i32 noundef %a) {
+; CHECK-LABEL: @clamp_high2_or_disjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[A:%.*]], 5
+; CHECK-NEXT: br i1 [[CMP]], label [[A_GUARD:%.*]], label [[OUT:%.*]]
+; CHECK: a_guard:
+; CHECK-NEXT: [[SEL_CMP:%.*]] = icmp ne i32 [[A]], 5
+; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[A]], 1
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[SEL_CMP]], i32 [[ADD]], i32 5
+; CHECK-NEXT: ret i1 false
+; CHECK: out:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp sle i32 %a, 5
+ br i1 %cmp, label %a_guard, label %out
+
+a_guard:
+ %sel_cmp = icmp ne i32 %a, 5
+ %add = or disjoint i32 %a, 1
+ %sel = select i1 %sel_cmp, i32 %add, i32 5
+ %res = icmp eq i32 %sel, 6
+ ret i1 %res
+out:
+ ret i1 false
+}
+
+
define i1 @clamp_high3(i32 noundef %a) {
; CHECK-LABEL: @clamp_high3(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
index 101820a4c65f..b5337b9ddc24 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll
@@ -587,6 +587,26 @@ define i1 @test_assume_cmp_with_offset(i64 %idx) {
ret i1 %cmp2
}
+define i1 @test_assume_cmp_with_offset_or(i64 %idx, i1 %other) {
+; CHECK-LABEL: @test_assume_cmp_with_offset_or(
+; CHECK-NEXT: [[IDX_OFF1:%.*]] = or disjoint i64 [[IDX:%.*]], 5
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i64 [[IDX_OFF1]], 10
+; CHECK-NEXT: br i1 [[CMP1]], label [[T:%.*]], label [[F:%.*]]
+; CHECK: T:
+; CHECK-NEXT: ret i1 true
+; CHECK: F:
+; CHECK-NEXT: ret i1 [[CMP2:%.*]]
+;
+ %idx.off1 = or disjoint i64 %idx, 5
+ %cmp1 = icmp ugt i64 %idx.off1, 10
+ br i1 %cmp1, label %T, label %F
+T:
+ %cmp2 = icmp ugt i64 %idx, 2
+ ret i1 %cmp2
+F:
+ ret i1 %other
+}
+
define void @test_cmp_phi(i8 %a) {
; CHECK-LABEL: @test_cmp_phi(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
index 7a4908d550a9..4d9a767e08d4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
@@ -487,20 +487,3 @@ define void @dse_strncpy_test6(ptr noalias %out1, ptr noalias %out2, ptr noalias
%call = tail call ptr @strncpy(ptr %out2, ptr %in, i64 100)
ret void
}
-
-define i32 @test_strcat_with_allow_check(ptr %src) {
-; CHECK-LABEL: @test_strcat_with_allow_check(
-; CHECK-NEXT: [[ALLOW1:%.*]] = call i1 @llvm.allow.runtime.check(metadata !"test_check")
-; CHECK-NEXT: [[ALLOW2:%.*]] = call i1 @llvm.allow.ubsan.check(i8 7)
-; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr [[B:%.*]], i8 44, i64 16, i1 false)
-; CHECK-NEXT: [[RET:%.*]] = load i32, ptr [[B]], align 4
-; CHECK-NEXT: ret i32 [[RET]]
-;
- tail call void @llvm.memset.p0.i64(ptr %src, i8 42, i64 16, i1 false)
- %allow1 = call i1 @llvm.allow.runtime.check(metadata !"test_check")
- tail call void @llvm.memset.p0.i64(ptr %src, i8 43, i64 16, i1 false)
- %allow2 = call i1 @llvm.allow.ubsan.check(i8 7)
- tail call void @llvm.memset.p0.i64(ptr %src, i8 44, i64 16, i1 false)
- %ret = load i32, ptr %src, align 4
- ret i32 %ret
-}
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
index 77bbd5f0bb42..75130c27f473 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptosi129.ll
@@ -27,7 +27,7 @@ define i129 @floattosi129(float %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -256
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -41,7 +41,7 @@ define i129 @floattosi129(float %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294967146
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -150
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -68,7 +68,7 @@ define i129 @doubletosi129(double %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -1152
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -82,7 +82,7 @@ define i129 @doubletosi129(double %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294966221
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -1075
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -110,7 +110,7 @@ define i129 @x86_fp80tosi129(x86_fp80 %a) {
; CHECK-NEXT: br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP10:%.*]] = add i129 [[TMP6]], -16512
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], 4294967167
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
; CHECK-NEXT: br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -124,7 +124,7 @@ define i129 @x86_fp80tosi129(x86_fp80 %a) {
; CHECK-NEXT: [[TMP16:%.*]] = mul i129 [[TMP15]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], 4294950801
+; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], -16495
; CHECK-NEXT: [[TMP18:%.*]] = shl i129 [[TMP8]], [[TMP17]]
; CHECK-NEXT: [[TMP19:%.*]] = mul i129 [[TMP18]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -151,7 +151,7 @@ define i129 @fp128tosi129(fp128 %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -16512
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -165,7 +165,7 @@ define i129 @fp128tosi129(fp128 %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294950801
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -16495
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
diff --git a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
index 67d9eb533a3e..ed630d7934c3 100644
--- a/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
+++ b/llvm/test/Transforms/ExpandLargeFpConvert/X86/expand-large-fp-convert-fptoui129.ll
@@ -27,7 +27,7 @@ define i129 @floattoui129(float %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -256
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -41,7 +41,7 @@ define i129 @floattoui129(float %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294967146
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -150
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -68,7 +68,7 @@ define i129 @doubletoui129(double %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -1152
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -82,7 +82,7 @@ define i129 @doubletoui129(double %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294966221
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -1075
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -110,7 +110,7 @@ define i129 @x86_fp80toui129(x86_fp80 %a) {
; CHECK-NEXT: br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP10:%.*]] = add i129 [[TMP6]], -16512
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], 4294967167
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
; CHECK-NEXT: br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -124,7 +124,7 @@ define i129 @x86_fp80toui129(x86_fp80 %a) {
; CHECK-NEXT: [[TMP16:%.*]] = mul i129 [[TMP15]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], 4294950801
+; CHECK-NEXT: [[TMP17:%.*]] = add i129 [[TMP6]], -16495
; CHECK-NEXT: [[TMP18:%.*]] = shl i129 [[TMP8]], [[TMP17]]
; CHECK-NEXT: [[TMP19:%.*]] = mul i129 [[TMP18]], [[TMP4]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
@@ -151,7 +151,7 @@ define i129 @fp128toui129(fp128 %a) {
; CHECK-NEXT: br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
; CHECK: fp-to-i-if-end:
; CHECK-NEXT: [[TMP9:%.*]] = add i129 [[TMP5]], -16512
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], 4294967167
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
; CHECK-NEXT: br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
; CHECK: fp-to-i-if-then5:
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
@@ -165,7 +165,7 @@ define i129 @fp128toui129(fp128 %a) {
; CHECK-NEXT: [[TMP15:%.*]] = mul i129 [[TMP14]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
; CHECK: fp-to-i-if-else:
-; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], 4294950801
+; CHECK-NEXT: [[TMP16:%.*]] = add i129 [[TMP5]], -16495
; CHECK-NEXT: [[TMP17:%.*]] = shl i129 [[TMP7]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i129 [[TMP17]], [[TMP3]]
; CHECK-NEXT: br label [[FP_TO_I_CLEANUP]]
diff --git a/llvm/test/Transforms/Float2Int/basic.ll b/llvm/test/Transforms/Float2Int/basic.ll
index 2854a83179b7..32f5ca2a053c 100644
--- a/llvm/test/Transforms/Float2Int/basic.ll
+++ b/llvm/test/Transforms/Float2Int/basic.ll
@@ -1,16 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes='float2int' -S | FileCheck %s
+; RUN: opt < %s -passes='float2int' -S | FileCheck %s -check-prefixes=CHECK,NONE
+; RUN: opt < %s -passes='float2int' -S --data-layout="n64" | FileCheck %s -check-prefixes=CHECK,ONLY64
+; RUN: opt < %s -passes='float2int' -S --data-layout="n8:16:32:64"| FileCheck %s -check-prefixes=CHECK,MULTIPLE
+; RUN: opt < %s -passes=float2int -S --data-layout="e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"| FileCheck %s -check-prefixes=CHECK,PR-79158
;
; Positive tests
;
define i16 @simple1(i8 %a) {
-; CHECK-LABEL: @simple1(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple1(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 1
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple1(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = add i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple1(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 1
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple1(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 1
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fadd float %t1, 1.0
@@ -19,11 +38,29 @@ define i16 @simple1(i8 %a) {
}
define i8 @simple2(i8 %a) {
-; CHECK-LABEL: @simple2(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i8
-; CHECK-NEXT: ret i8 [[TMP2]]
+; NONE-LABEL: @simple2(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i8
+; NONE-NEXT: ret i8 [[TMP2]]
+;
+; ONLY64-LABEL: @simple2(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i8
+; ONLY64-NEXT: ret i8 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple2(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; MULTIPLE-NEXT: [[TMP2:%.*]] = trunc i16 [[T21]] to i8
+; MULTIPLE-NEXT: ret i8 [[TMP2]]
+;
+; PR-79158-LABEL: @simple2(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; PR-79158-NEXT: [[TMP2:%.*]] = trunc i16 [[T21]] to i8
+; PR-79158-NEXT: ret i8 [[TMP2]]
;
%t1 = uitofp i8 %a to float
%t2 = fsub float %t1, 1.0
@@ -32,10 +69,28 @@ define i8 @simple2(i8 %a) {
}
define i32 @simple3(i8 %a) {
-; CHECK-LABEL: @simple3(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
-; CHECK-NEXT: ret i32 [[T21]]
+; NONE-LABEL: @simple3(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 [[TMP1]], 1
+; NONE-NEXT: ret i32 [[T21]]
+;
+; ONLY64-LABEL: @simple3(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 [[TMP1]], 1
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i32
+; ONLY64-NEXT: ret i32 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple3(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i16 [[T21]] to i32
+; MULTIPLE-NEXT: ret i32 [[TMP2]]
+;
+; PR-79158-LABEL: @simple3(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 [[TMP1]], 1
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i16 [[T21]] to i32
+; PR-79158-NEXT: ret i32 [[TMP2]]
;
%t1 = uitofp i8 %a to float
%t2 = fsub float %t1, 1.0
@@ -44,11 +99,29 @@ define i32 @simple3(i8 %a) {
}
define i1 @cmp(i8 %a, i8 %b) {
-; CHECK-LABEL: @cmp(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: ret i1 [[T31]]
+; NONE-LABEL: @cmp(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]]
+; NONE-NEXT: ret i1 [[T31]]
+;
+; ONLY64-LABEL: @cmp(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = icmp slt i64 [[TMP1]], [[TMP2]]
+; ONLY64-NEXT: ret i1 [[T31]]
+;
+; MULTIPLE-LABEL: @cmp(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; MULTIPLE-NEXT: [[T31:%.*]] = icmp slt i16 [[TMP1]], [[TMP2]]
+; MULTIPLE-NEXT: ret i1 [[T31]]
+;
+; PR-79158-LABEL: @cmp(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; PR-79158-NEXT: [[T31:%.*]] = icmp slt i16 [[TMP1]], [[TMP2]]
+; PR-79158-NEXT: ret i1 [[T31]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -70,12 +143,34 @@ define i32 @simple4(i32 %a) {
}
define i32 @simple5(i8 %a, i8 %b) {
-; CHECK-LABEL: @simple5(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
-; CHECK-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
-; CHECK-NEXT: ret i32 [[T42]]
+; NONE-LABEL: @simple5(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; NONE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; NONE-NEXT: ret i32 [[T42]]
+;
+; ONLY64-LABEL: @simple5(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = add i64 [[TMP1]], 1
+; ONLY64-NEXT: [[T42:%.*]] = mul i64 [[T31]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[T42]] to i32
+; ONLY64-NEXT: ret i32 [[TMP3]]
+;
+; MULTIPLE-LABEL: @simple5(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; MULTIPLE-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; MULTIPLE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; MULTIPLE-NEXT: ret i32 [[T42]]
+;
+; PR-79158-LABEL: @simple5(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; PR-79158-NEXT: [[T31:%.*]] = add i32 [[TMP1]], 1
+; PR-79158-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; PR-79158-NEXT: ret i32 [[T42]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -86,12 +181,34 @@ define i32 @simple5(i8 %a, i8 %b) {
}
define i32 @simple6(i8 %a, i8 %b) {
-; CHECK-LABEL: @simple6(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
-; CHECK-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
-; CHECK-NEXT: ret i32 [[T42]]
+; NONE-LABEL: @simple6(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; NONE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; NONE-NEXT: ret i32 [[T42]]
+;
+; ONLY64-LABEL: @simple6(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[T31:%.*]] = sub i64 0, [[TMP1]]
+; ONLY64-NEXT: [[T42:%.*]] = mul i64 [[T31]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[T42]] to i32
+; ONLY64-NEXT: ret i32 [[TMP3]]
+;
+; MULTIPLE-LABEL: @simple6(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; MULTIPLE-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; MULTIPLE-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; MULTIPLE-NEXT: ret i32 [[T42]]
+;
+; PR-79158-LABEL: @simple6(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; PR-79158-NEXT: [[T31:%.*]] = sub i32 0, [[TMP1]]
+; PR-79158-NEXT: [[T42:%.*]] = mul i32 [[T31]], [[TMP2]]
+; PR-79158-NEXT: ret i32 [[T42]]
;
%t1 = uitofp i8 %a to float
%t2 = uitofp i8 %b to float
@@ -105,15 +222,48 @@ define i32 @simple6(i8 %a, i8 %b) {
; cause failure of the other.
define i32 @multi1(i8 %a, i8 %b, i8 %c, float %d) {
-; CHECK-LABEL: @multi1(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
-; CHECK-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
-; CHECK-NEXT: [[X1:%.*]] = add i32 [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
-; CHECK-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
-; CHECK-NEXT: [[R:%.*]] = add i32 [[X1]], [[W]]
-; CHECK-NEXT: ret i32 [[R]]
+; NONE-LABEL: @multi1(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i32
+; NONE-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; NONE-NEXT: [[X1:%.*]] = add i32 [[TMP1]], [[TMP2]]
+; NONE-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; NONE-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; NONE-NEXT: [[R:%.*]] = add i32 [[X1]], [[W]]
+; NONE-NEXT: ret i32 [[R]]
+;
+; ONLY64-LABEL: @multi1(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i64
+; ONLY64-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; ONLY64-NEXT: [[X1:%.*]] = add i64 [[TMP1]], [[TMP2]]
+; ONLY64-NEXT: [[TMP3:%.*]] = trunc i64 [[X1]] to i32
+; ONLY64-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; ONLY64-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; ONLY64-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; ONLY64-NEXT: ret i32 [[R]]
+;
+; MULTIPLE-LABEL: @multi1(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; MULTIPLE-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; MULTIPLE-NEXT: [[X1:%.*]] = add i16 [[TMP1]], [[TMP2]]
+; MULTIPLE-NEXT: [[TMP3:%.*]] = zext i16 [[X1]] to i32
+; MULTIPLE-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; MULTIPLE-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; MULTIPLE-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; MULTIPLE-NEXT: ret i32 [[R]]
+;
+; PR-79158-LABEL: @multi1(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16
+; PR-79158-NEXT: [[FC:%.*]] = uitofp i8 [[C:%.*]] to float
+; PR-79158-NEXT: [[X1:%.*]] = add i16 [[TMP1]], [[TMP2]]
+; PR-79158-NEXT: [[TMP3:%.*]] = zext i16 [[X1]] to i32
+; PR-79158-NEXT: [[Z:%.*]] = fadd float [[FC]], [[D:%.*]]
+; PR-79158-NEXT: [[W:%.*]] = fptoui float [[Z]] to i32
+; PR-79158-NEXT: [[R:%.*]] = add i32 [[TMP3]], [[W]]
+; PR-79158-NEXT: ret i32 [[R]]
;
%fa = uitofp i8 %a to float
%fb = uitofp i8 %b to float
@@ -127,11 +277,27 @@ define i32 @multi1(i8 %a, i8 %b, i8 %c, float %d) {
}
define i16 @simple_negzero(i8 %a) {
-; CHECK-LABEL: @simple_negzero(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple_negzero(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = add i32 [[TMP1]], 0
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple_negzero(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = add i64 [[TMP1]], 0
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple_negzero(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 0
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple_negzero(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = add i16 [[TMP1]], 0
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fadd fast float %t1, -0.0
@@ -140,12 +306,33 @@ define i16 @simple_negzero(i8 %a) {
}
define i32 @simple_negative(i8 %call) {
-; CHECK-LABEL: @simple_negative(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i32
-; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[TMP1]], -3
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[MUL1]] to i8
-; CHECK-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
-; CHECK-NEXT: ret i32 [[CONV3]]
+; NONE-LABEL: @simple_negative(
+; NONE-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i32
+; NONE-NEXT: [[MUL1:%.*]] = mul i32 [[TMP1]], -3
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[MUL1]] to i8
+; NONE-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; NONE-NEXT: ret i32 [[CONV3]]
+;
+; ONLY64-LABEL: @simple_negative(
+; ONLY64-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i64
+; ONLY64-NEXT: [[MUL1:%.*]] = mul i64 [[TMP1]], -3
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[MUL1]] to i8
+; ONLY64-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; ONLY64-NEXT: ret i32 [[CONV3]]
+;
+; MULTIPLE-LABEL: @simple_negative(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i16
+; MULTIPLE-NEXT: [[MUL1:%.*]] = mul i16 [[TMP1]], -3
+; MULTIPLE-NEXT: [[TMP2:%.*]] = trunc i16 [[MUL1]] to i8
+; MULTIPLE-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; MULTIPLE-NEXT: ret i32 [[CONV3]]
+;
+; PR-79158-LABEL: @simple_negative(
+; PR-79158-NEXT: [[TMP1:%.*]] = sext i8 [[CALL:%.*]] to i16
+; PR-79158-NEXT: [[MUL1:%.*]] = mul i16 [[TMP1]], -3
+; PR-79158-NEXT: [[TMP2:%.*]] = trunc i16 [[MUL1]] to i8
+; PR-79158-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32
+; PR-79158-NEXT: ret i32 [[CONV3]]
;
%conv1 = sitofp i8 %call to float
%mul = fmul float %conv1, -3.000000e+00
@@ -155,11 +342,27 @@ define i32 @simple_negative(i8 %call) {
}
define i16 @simple_fneg(i8 %a) {
-; CHECK-LABEL: @simple_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
-; CHECK-NEXT: [[T21:%.*]] = sub i32 0, [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
-; CHECK-NEXT: ret i16 [[TMP2]]
+; NONE-LABEL: @simple_fneg(
+; NONE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; NONE-NEXT: [[T21:%.*]] = sub i32 0, [[TMP1]]
+; NONE-NEXT: [[TMP2:%.*]] = trunc i32 [[T21]] to i16
+; NONE-NEXT: ret i16 [[TMP2]]
+;
+; ONLY64-LABEL: @simple_fneg(
+; ONLY64-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i64
+; ONLY64-NEXT: [[T21:%.*]] = sub i64 0, [[TMP1]]
+; ONLY64-NEXT: [[TMP2:%.*]] = trunc i64 [[T21]] to i16
+; ONLY64-NEXT: ret i16 [[TMP2]]
+;
+; MULTIPLE-LABEL: @simple_fneg(
+; MULTIPLE-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; MULTIPLE-NEXT: [[T21:%.*]] = sub i16 0, [[TMP1]]
+; MULTIPLE-NEXT: ret i16 [[T21]]
+;
+; PR-79158-LABEL: @simple_fneg(
+; PR-79158-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16
+; PR-79158-NEXT: [[T21:%.*]] = sub i16 0, [[TMP1]]
+; PR-79158-NEXT: ret i16 [[T21]]
;
%t1 = uitofp i8 %a to float
%t2 = fneg fast float %t1
diff --git a/llvm/test/Transforms/Float2Int/pr79158.ll b/llvm/test/Transforms/Float2Int/pr79158.ll
new file mode 100644
index 000000000000..639a8ac9934f
--- /dev/null
+++ b/llvm/test/Transforms/Float2Int/pr79158.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=float2int -S | FileCheck %s -check-prefixes=CHECK,NONE
+; RUN: opt < %s -passes=float2int -S --data-layout="n64" | FileCheck %s -check-prefixes=CHECK,ONLY64
+; RUN: opt < %s -passes=float2int -S --data-layout="n8:16:32:64"| FileCheck %s -check-prefixes=CHECK,MULTIPLE
+; RUN: opt < %s -passes=float2int -S --data-layout="e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"| FileCheck %s -check-prefixes=CHECK,PR-79158
+
+define i32 @pr79158(i32 %x) {
+; CHECK-LABEL: define i32 @pr79158(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i64
+; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[TMP0]], 4294967295
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[MUL1]] to i32
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+entry:
+ %cmp = icmp sgt i32 %x, 0
+ %conv = uitofp i1 %cmp to double
+ %mul = fmul double %conv, 0x41EFFFFFFFE00000
+ %conv1 = fptoui double %mul to i32
+ ret i32 %conv1
+}
+
+define i32 @pr79158_2(i32 %x) {
+; NONE-LABEL: define i32 @pr79158_2(
+; NONE-SAME: i32 [[X:%.*]]) {
+; NONE-NEXT: entry:
+; NONE-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; NONE-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i32
+; NONE-NEXT: [[MUL1:%.*]] = mul i32 [[TMP0]], 255
+; NONE-NEXT: [[TMP1:%.*]] = trunc i32 [[MUL1]] to i8
+; NONE-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; NONE-NEXT: ret i32 [[CONV2]]
+;
+; ONLY64-LABEL: define i32 @pr79158_2(
+; ONLY64-SAME: i32 [[X:%.*]]) {
+; ONLY64-NEXT: entry:
+; ONLY64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; ONLY64-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i64
+; ONLY64-NEXT: [[MUL1:%.*]] = mul i64 [[TMP0]], 255
+; ONLY64-NEXT: [[TMP1:%.*]] = trunc i64 [[MUL1]] to i8
+; ONLY64-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; ONLY64-NEXT: ret i32 [[CONV2]]
+;
+; MULTIPLE-LABEL: define i32 @pr79158_2(
+; MULTIPLE-SAME: i32 [[X:%.*]]) {
+; MULTIPLE-NEXT: entry:
+; MULTIPLE-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; MULTIPLE-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i16
+; MULTIPLE-NEXT: [[MUL1:%.*]] = mul i16 [[TMP0]], 255
+; MULTIPLE-NEXT: [[TMP1:%.*]] = trunc i16 [[MUL1]] to i8
+; MULTIPLE-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; MULTIPLE-NEXT: ret i32 [[CONV2]]
+;
+; PR-79158-LABEL: define i32 @pr79158_2(
+; PR-79158-SAME: i32 [[X:%.*]]) {
+; PR-79158-NEXT: entry:
+; PR-79158-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; PR-79158-NEXT: [[TMP0:%.*]] = zext i1 [[CMP]] to i16
+; PR-79158-NEXT: [[MUL1:%.*]] = mul i16 [[TMP0]], 255
+; PR-79158-NEXT: [[TMP1:%.*]] = trunc i16 [[MUL1]] to i8
+; PR-79158-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; PR-79158-NEXT: ret i32 [[CONV2]]
+;
+entry:
+ %cmp = icmp sgt i32 %x, 0
+ %conv = uitofp i1 %cmp to float
+ %mul = fmul float %conv, 2.550000e+02
+ %conv1 = fptoui float %mul to i8
+ %conv2 = zext i8 %conv1 to i32
+ ret i32 %conv2
+}
diff --git a/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll b/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll
new file mode 100644
index 000000000000..b3b5916c7f26
--- /dev/null
+++ b/llvm/test/Transforms/GVNHoist/hoist-merge-geps.ll
@@ -0,0 +1,63 @@
+; RUN: opt -S -passes=gvn-hoist < %s | FileCheck %s
+
+define dso_local void @func(i32 noundef %a, ptr noundef %b) !dbg !10 {
+; Check the merged debug location of hoisted GEP
+; CHECK: entry
+; CHECK: %{{[a-zA-Z0-9_]*}} = getelementptr {{.*}} !dbg [[MERGED_DL:![0-9]+]]
+; CHECK: [[MERGED_DL]] = !DILocation(line: 0, scope: !{{[0-9]+}})
+entry:
+ tail call void @llvm.dbg.value(metadata i32 %a, metadata !16, metadata !DIExpression()), !dbg !17
+ tail call void @llvm.dbg.value(metadata ptr %b, metadata !18, metadata !DIExpression()), !dbg !17
+ %tobool = icmp ne i32 %a, 0, !dbg !19
+ br i1 %tobool, label %if.then, label %if.else, !dbg !21
+
+if.then: ; preds = %entry
+ %arrayidx = getelementptr inbounds i32, ptr %b, i64 1, !dbg !22
+ store i32 1, ptr %arrayidx, align 4, !dbg !24
+ br label %if.end, !dbg !25
+
+if.else: ; preds = %entry
+ %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 1, !dbg !26
+ store i32 1, ptr %arrayidx1, align 4, !dbg !28
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ ret void, !dbg !29
+}
+
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7, !8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "main.c", directory: "/root/llvm-test/GVNHoist")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"PIE Level", i32 2}
+!7 = !{i32 7, !"uwtable", i32 2}
+!8 = !{i32 7, !"frame-pointer", i32 2}
+!10 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 1, type: !11, scopeLine: 1, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !15)
+!11 = !DISubroutineType(types: !12)
+!12 = !{null, !13, !14}
+!13 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!14 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !13, size: 64)
+!15 = !{}
+!16 = !DILocalVariable(name: "a", arg: 1, scope: !10, file: !1, line: 1, type: !13)
+!17 = !DILocation(line: 0, scope: !10)
+!18 = !DILocalVariable(name: "b", arg: 2, scope: !10, file: !1, line: 1, type: !14)
+!19 = !DILocation(line: 2, column: 9, scope: !20)
+!20 = distinct !DILexicalBlock(scope: !10, file: !1, line: 2, column: 9)
+!21 = !DILocation(line: 2, column: 9, scope: !10)
+!22 = !DILocation(line: 3, column: 9, scope: !23)
+!23 = distinct !DILexicalBlock(scope: !20, file: !1, line: 2, column: 12)
+!24 = !DILocation(line: 3, column: 14, scope: !23)
+!25 = !DILocation(line: 4, column: 5, scope: !23)
+!26 = !DILocation(line: 5, column: 9, scope: !27)
+!27 = distinct !DILexicalBlock(scope: !20, file: !1, line: 4, column: 12)
+!28 = !DILocation(line: 5, column: 14, scope: !27)
+!29 = !DILocation(line: 7, column: 1, scope: !10)
diff --git a/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll b/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
index 188210782edd..4c5a448d12c4 100644
--- a/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
+++ b/llvm/test/Transforms/GlobalOpt/inalloca-varargs.ll
@@ -23,7 +23,7 @@ define internal i32 @i(ptr inalloca(ptr) %a, ...) {
; CHECK-LABEL: define {{[^@]+}}@i
; CHECK-SAME: (ptr inalloca(ptr) [[A:%.*]], ...) unnamed_addr {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[AP]], align 4
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
; CHECK-NEXT: ret i32 [[L]]
diff --git a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
index ef365d6eaddb..38dfd25e039e 100644
--- a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
@@ -17,10 +17,10 @@ define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
@@ -52,10 +52,10 @@ define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[V:%.*]], ptr [[AP]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
diff --git a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
index 9f565de96057..2d526086fae4 100644
--- a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
+++ b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
@@ -51,7 +51,7 @@ entry:
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
@@ -70,7 +70,7 @@ entry:
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
@@ -84,8 +84,8 @@ entry:
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
-; CHECK-NEXT: call void @llvm.va_copy(ptr [[TMP0]], ptr [[TMP1]])
-; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[TMP0]], ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
diff --git a/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll b/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll
new file mode 100644
index 000000000000..b626a229a737
--- /dev/null
+++ b/llvm/test/Transforms/Inline/RISCV/inline-target-features.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -mtriple=riscv64-unknown-linux-gnu -S -passes=inline | FileCheck %s
+; RUN: opt < %s -mtriple=riscv64-unknown-linux-gnu -S -passes='cgscc(inline)' | FileCheck %s
+; Check that we only inline when we have compatible target attributes.
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+define i32 @foo() #0 {
+entry:
+ %call = call i32 (...) @baz()
+ ret i32 %call
+; CHECK-LABEL: foo
+; CHECK: call i32 (...) @baz()
+}
+declare i32 @baz(...) #0
+
+define i32 @bar() #1 {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+; CHECK-LABEL: bar
+; CHECK: call i32 (...) @baz()
+}
+
+define i32 @qux() #0 {
+entry:
+ %call = call i32 @bar()
+ ret i32 %call
+; CHECK-LABEL: qux
+; CHECK: call i32 @bar()
+}
+
+attributes #0 = { "target-cpu"="generic-rv64" "target-features"="+f,+d" }
+attributes #1 = { "target-cpu"="generic-rv64" "target-features"="+f,+d,+m,+v" }
diff --git a/llvm/test/Transforms/Inline/RISCV/lit.local.cfg b/llvm/test/Transforms/Inline/RISCV/lit.local.cfg
new file mode 100644
index 000000000000..17351748513d
--- /dev/null
+++ b/llvm/test/Transforms/Inline/RISCV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "RISCV" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/Inline/update_invoke_prof.ll b/llvm/test/Transforms/Inline/update_invoke_prof.ll
new file mode 100644
index 000000000000..5f09c7cf8fe0
--- /dev/null
+++ b/llvm/test/Transforms/Inline/update_invoke_prof.ll
@@ -0,0 +1,64 @@
+; A pre-commit test to show that branch weights and value profiles associated with invoke are not updated.
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -S | FileCheck %s
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @caller(ptr %func) personality ptr @__gxx_personality_v0 !prof !15 {
+ call void @callee(ptr %func), !prof !16
+ ret void
+}
+
+declare void @inner_callee(ptr %func)
+
+define void @callee(ptr %func) personality ptr @__gxx_personality_v0 !prof !17 {
+ invoke void %func()
+ to label %next unwind label %lpad, !prof !18
+
+next:
+ invoke void @inner_callee(ptr %func)
+ to label %ret unwind label %lpad, !prof !19
+
+lpad:
+ %exn = landingpad {ptr, i32}
+ cleanup
+ unreachable
+
+ret:
+ ret void
+}
+
+!llvm.module.flags = !{!1}
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"SampleProfile"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 2000}
+!8 = !{!"NumCounts", i64 2}
+!9 = !{!"NumFunctions", i64 2}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!15 = !{!"function_entry_count", i64 1000}
+!16 = !{!"branch_weights", i64 1000}
+!17 = !{!"function_entry_count", i32 1500}
+!18 = !{!"VP", i32 0, i64 1500, i64 123, i64 900, i64 456, i64 600}
+!19 = !{!"branch_weights", i32 1500}
+
+; CHECK-LABEL: @caller(
+; CHECK: invoke void %func(
+; CHECK-NEXT: {{.*}} !prof ![[PROF1:[0-9]+]]
+; CHECK: invoke void @inner_callee(
+; CHECK-NEXT: {{.*}} !prof ![[PROF2:[0-9]+]]
+
+; CHECK-LABL: @callee(
+; CHECK: invoke void %func(
+; CHECK-NEXT: {{.*}} !prof ![[PROF1]]
+; CHECK: invoke void @inner_callee(
+; CHECK-NEXT: {{.*}} !prof ![[PROF2]]
+
+; CHECK: ![[PROF1]] = !{!"VP", i32 0, i64 1500, i64 123, i64 900, i64 456, i64 600}
+; CHECK: ![[PROF2]] = !{!"branch_weights", i32 1500}
diff --git a/llvm/test/Transforms/Inline/update_value_profile.ll b/llvm/test/Transforms/Inline/update_value_profile.ll
new file mode 100644
index 000000000000..daa95e93b68e
--- /dev/null
+++ b/llvm/test/Transforms/Inline/update_value_profile.ll
@@ -0,0 +1,81 @@
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=100 -S | FileCheck %s
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; When 'callee' is inlined into caller1 and caller2, the indirect call value
+; profiles of the inlined copy should be scaled based on callers' profiles,
+; and the indirect call value profiles in 'callee' should be updated.
+define i32 @callee(ptr %0, i32 %1) !prof !20 {
+; CHECK-LABEL: define i32 @callee(
+; CHECK-SAME: ptr [[TMP0:%.*]], i32 [[TMP1:%.*]]) !prof [[PROF0:![0-9]+]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP0]], i32 [[TMP1]]), !prof [[PROF1:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %3 = load ptr, ptr %0
+ %5 = getelementptr inbounds i8, ptr %3, i64 8
+ %6 = load ptr, ptr %5
+ %7 = tail call i32 %6(ptr %0, i32 %1), !prof !17
+ ret i32 %7
+}
+
+define i32 @caller1(i32 %0) !prof !18 {
+; CHECK-LABEL: define i32 @caller1(
+; CHECK-SAME: i32 [[TMP0:%.*]]) !prof [[PROF2:![0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = tail call ptr @_Z10createTypei(i32 [[TMP0]])
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP2]], i32 [[TMP0]]), !prof [[PROF3:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %2 = tail call ptr @_Z10createTypei(i32 %0)
+ %3 = tail call i32 @callee(ptr %2, i32 %0)
+ ret i32 %3
+}
+
+define i32 @caller2(i32 %0) !prof !19 {
+; CHECK-LABEL: define i32 @caller2(
+; CHECK-SAME: i32 [[TMP0:%.*]]) !prof [[PROF4:![0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = tail call ptr @_Z10createTypei(i32 [[TMP0]])
+; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 8
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 [[TMP5]](ptr [[TMP2]], i32 [[TMP0]]), !prof [[PROF5:![0-9]+]]
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+ %2 = tail call ptr @_Z10createTypei(i32 %0)
+ %3 = tail call i32 @callee(ptr %2, i32 %0)
+ ret i32 %3
+}
+
+declare ptr @_Z10createTypei(i32)
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 10000}
+!5 = !{!"MaxCount", i64 10}
+!6 = !{!"MaxInternalCount", i64 1}
+!7 = !{!"MaxFunctionCount", i64 1000}
+!8 = !{!"NumCounts", i64 3}
+!9 = !{!"NumFunctions", i64 3}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13, !14}
+!12 = !{i32 10000, i64 100, i32 1}
+!13 = !{i32 999000, i64 100, i32 1}
+!14 = !{i32 999999, i64 1, i32 2}
+!17 = !{!"VP", i32 0, i64 1600, i64 123, i64 1000, i64 456, i64 600}
+!18 = !{!"function_entry_count", i64 1000}
+!19 = !{!"function_entry_count", i64 600}
+!20 = !{!"function_entry_count", i64 1700}
+;.
+; CHECK: [[PROF0]] = !{!"function_entry_count", i64 100}
+; CHECK: [[PROF1]] = !{!"VP", i32 0, i64 94, i64 123, i64 58, i64 456, i64 35}
+; CHECK: [[PROF2]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF3]] = !{!"VP", i32 0, i64 941, i64 123, i64 588, i64 456, i64 352}
+; CHECK: [[PROF4]] = !{!"function_entry_count", i64 600}
+; CHECK: [[PROF5]] = !{!"VP", i32 0, i64 564, i64 123, i64 352, i64 456, i64 211}
+;.
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
index 9b990480709c..80d8e1b16ed2 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
@@ -39,10 +39,9 @@ define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -117,10 +116,9 @@ define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -191,10 +189,9 @@ define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -269,10 +266,9 @@ define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -343,10 +339,9 @@ define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -421,10 +416,9 @@ define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -495,10 +489,9 @@ define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -573,10 +566,9 @@ define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -981,9 +973,8 @@ define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x flo
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1011,9 +1002,8 @@ define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1060,9 +1050,8 @@ define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1086,9 +1075,8 @@ define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x doub
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1129,9 +1117,8 @@ define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1159,9 +1146,8 @@ define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1206,9 +1192,8 @@ define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1232,9 +1217,8 @@ define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1275,9 +1259,8 @@ define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[C]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1305,9 +1288,8 @@ define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1352,9 +1334,8 @@ define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[C]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1378,9 +1359,8 @@ define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1423,9 +1403,8 @@ define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x float> [[C]], float [[TMP8]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP9]]
;
@@ -1457,9 +1436,8 @@ define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: ret float [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1532,9 +1510,8 @@ define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> [[C]], double [[TMP8]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP9]]
;
@@ -1562,9 +1539,8 @@ define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: ret double [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1632,9 +1608,8 @@ define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x f
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[C]], float [[TMP9]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP10]]
;
@@ -1668,9 +1643,8 @@ define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: ret float [[TMP9]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1747,9 +1721,8 @@ define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[C]], double [[TMP9]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP10]]
;
@@ -1779,9 +1752,8 @@ define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: ret double [[TMP9]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll b/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
index c10c922f6643..906e84b60748 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-avx512.ll
@@ -39,10 +39,9 @@ define <4 x float> @test_add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -117,10 +116,9 @@ define <2 x double> @test_add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fadd double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -191,10 +189,9 @@ define <4 x float> @test_sub_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -269,10 +266,9 @@ define <2 x double> @test_sub_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fsub double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -343,10 +339,9 @@ define <4 x float> @test_mul_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -421,10 +416,9 @@ define <2 x double> @test_mul_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fmul double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -495,10 +489,9 @@ define <4 x float> @test_div_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv float [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP5]], float [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], float [[TMP3]], float [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -573,10 +566,9 @@ define <2 x double> @test_div_sd_mask(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[A:%.*]], i64 0
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = fdiv double [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP4]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK:%.*]] to i1
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP5]], double [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP4]], double [[TMP3]], double [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -981,9 +973,8 @@ define <4 x float> @test_mask_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x flo
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1011,9 +1002,8 @@ define float @test_mask_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP1]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP1]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1060,9 +1050,8 @@ define <2 x double> @test_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1086,9 +1075,8 @@ define double @test_mask_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x doub
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP1]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP1]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1129,9 +1117,8 @@ define <4 x float> @test_maskz_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[A]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1159,9 +1146,8 @@ define float @test_maskz_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float 0.000000e+00, float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float 0.000000e+00
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %a, float 1.000000e+00, i32 1
@@ -1206,9 +1192,8 @@ define <2 x double> @test_maskz_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[A]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1232,9 +1217,8 @@ define double @test_maskz_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double 0.000000e+00, double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double 0.000000e+00
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %a, double 1.000000e+00, i32 1
@@ -1275,9 +1259,8 @@ define <4 x float> @test_mask3_vfmadd_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x float> [[C]], float [[TMP6]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP7]]
;
@@ -1305,9 +1288,8 @@ define float @test_mask3_vfmadd_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], float [[TMP3]], float [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], float [[TMP4]], float [[TMP3]]
; CHECK-NEXT: ret float [[TMP6]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1352,9 +1334,8 @@ define <2 x double> @test_mask3_vfmadd_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[C]], double [[TMP6]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP7]]
;
@@ -1378,9 +1359,8 @@ define double @test_mask3_vfmadd_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[B:%.*]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[C:%.*]], i64 0
; CHECK-NEXT: [[TMP4:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[DOTNOT]], double [[TMP3]], double [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], double [[TMP4]], double [[TMP3]]
; CHECK-NEXT: ret double [[TMP6]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1423,9 +1403,8 @@ define <4 x float> @test_mask3_vfmsub_ss(<4 x float> %a, <4 x float> %b, <4 x fl
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x float> [[C]], float [[TMP8]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP9]]
;
@@ -1457,9 +1436,8 @@ define float @test_mask3_vfmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float>
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fma.f32(float [[TMP1]], float [[TMP2]], float [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], float [[TMP6]], float [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], float [[TMP5]], float [[TMP6]]
; CHECK-NEXT: ret float [[TMP8]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1532,9 +1510,8 @@ define <2 x double> @test_mask3_vfmsub_sd(<2 x double> %a, <2 x double> %b, <2 x
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x double> [[C]], double [[TMP8]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP9]]
;
@@ -1562,9 +1539,8 @@ define double @test_mask3_vfmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x dou
; CHECK-NEXT: [[TMP4:%.*]] = fneg double [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call double @llvm.fma.f64(double [[TMP1]], double [[TMP2]], double [[TMP4]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP7]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[DOTNOT]], double [[TMP6]], double [[TMP5]]
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], double [[TMP5]], double [[TMP6]]
; CHECK-NEXT: ret double [[TMP8]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
@@ -1632,9 +1608,8 @@ define <4 x float> @test_mask3_vfnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x f
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[C]], float [[TMP9]], i64 0
; CHECK-NEXT: ret <4 x float> [[TMP10]]
;
@@ -1668,9 +1643,8 @@ define float @test_mask3_vfnmsub_ss_0(<4 x float> %a, <4 x float> %b, <4 x float
; CHECK-NEXT: [[TMP5:%.*]] = fneg float [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.fma.f32(float [[TMP2]], float [[TMP3]], float [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x float> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], float [[TMP7]], float [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], float [[TMP6]], float [[TMP7]]
; CHECK-NEXT: ret float [[TMP9]]
;
%1 = insertelement <4 x float> %c, float 1.000000e+00, i32 1
@@ -1747,9 +1721,8 @@ define <2 x double> @test_mask3_vfnmsub_sd(<2 x double> %a, <2 x double> %b, <2
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> [[C]], double [[TMP9]], i64 0
; CHECK-NEXT: ret <2 x double> [[TMP10]]
;
@@ -1779,9 +1752,8 @@ define double @test_mask3_vfnmsub_sd_0(<2 x double> %a, <2 x double> %b, <2 x do
; CHECK-NEXT: [[TMP5:%.*]] = fneg double [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call double @llvm.fma.f64(double [[TMP2]], double [[TMP3]], double [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[C]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = and i8 [[MASK:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP8]], 0
-; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT]], double [[TMP7]], double [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i8 [[MASK:%.*]] to i1
+; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], double [[TMP6]], double [[TMP7]]
; CHECK-NEXT: ret double [[TMP9]]
;
%1 = insertelement <2 x double> %c, double 1.000000e+00, i32 1
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 522dcf8db27f..ec3aca26514c 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -3986,5 +3986,81 @@ define i32 @add_reduce_sqr_sum_varC_invalid2(i32 %a, i32 %b) {
ret i32 %ab2
}
+define i32 @fold_sext_addition_or_disjoint(i8 %x) {
+; CHECK-LABEL: @fold_sext_addition_or_disjoint(
+; CHECK-NEXT: [[SE:%.*]] = sext i8 [[XX:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], 1246
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or disjoint i8 %x, 12
+ %se = sext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_sext_addition_fail(i8 %x) {
+; CHECK-LABEL: @fold_sext_addition_fail(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 12
+; CHECK-NEXT: [[SE:%.*]] = sext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], 1234
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 12
+ %se = sext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_or_disjoint(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_or_disjoint(
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SE]], 1246
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or disjoint i8 %x, 12
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_or_disjoint2(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_or_disjoint2(
+; CHECK-NEXT: [[XX:%.*]] = add nuw i8 [[X:%.*]], 4
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: ret i32 [[SE]]
+;
+ %xx = or disjoint i8 %x, 18
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, -14
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_fail(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_fail(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 12
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SE]], 1234
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 12
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, 1234
+ ret i32 %r
+}
+
+define i32 @fold_zext_addition_fail2(i8 %x) {
+; CHECK-LABEL: @fold_zext_addition_fail2(
+; CHECK-NEXT: [[XX:%.*]] = or i8 [[X:%.*]], 18
+; CHECK-NEXT: [[SE:%.*]] = zext i8 [[XX]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[SE]], -14
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %xx = or i8 %x, 18
+ %se = zext i8 %xx to i32
+ %r = add i32 %se, -14
+ ret i32 %r
+}
+
+
declare void @llvm.assume(i1)
declare void @fake_func(i32)
diff --git a/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll b/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
index 2d72a4ff8c0d..e2346987737a 100644
--- a/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shl-trunc.ll
@@ -3,9 +3,8 @@
define i1 @test0(i39 %X, i39 %A) {
; CHECK-LABEL: @test0(
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i39 1, [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i39 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT: [[D:%.*]] = icmp ne i39 [[TMP2]], 0
+; CHECK-NEXT: [[B:%.*]] = lshr i39 [[X:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = trunc i39 [[B]] to i1
; CHECK-NEXT: ret i1 [[D]]
;
%B = lshr i39 %X, %A
@@ -15,9 +14,8 @@ define i1 @test0(i39 %X, i39 %A) {
define i1 @test1(i799 %X, i799 %A) {
; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i799 1, [[A:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = and i799 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT: [[D:%.*]] = icmp ne i799 [[TMP2]], 0
+; CHECK-NEXT: [[B:%.*]] = lshr i799 [[X:%.*]], [[A:%.*]]
+; CHECK-NEXT: [[D:%.*]] = trunc i799 [[B]] to i1
; CHECK-NEXT: ret i1 [[D]]
;
%B = lshr i799 %X, %A
diff --git a/llvm/test/Transforms/InstCombine/binop-itofp.ll b/llvm/test/Transforms/InstCombine/binop-itofp.ll
index 82cdb3ce6bee..cd9ec1e59203 100644
--- a/llvm/test/Transforms/InstCombine/binop-itofp.ll
+++ b/llvm/test/Transforms/InstCombine/binop-itofp.ll
@@ -1012,7 +1012,7 @@ define float @missed_nonzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
; CHECK-NEXT: [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
-; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul float [[CONV1_I]], 0.000000e+00
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[CONV1_I]])
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
; CHECK-NEXT: ret float [[MUL3_I_I]]
;
@@ -1031,7 +1031,7 @@ define <2 x float> @missed_nonzero_check_on_constant_for_si_fmul_vec(i1 %c, i1 %
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
-; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul <2 x float> [[CONV1_I]], zeroinitializer
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[CONV1_I]])
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
;
@@ -1050,7 +1050,8 @@ define float @negzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345)
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
; CHECK-NEXT: [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
-; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul float [[CONV1_I]], -0.000000e+00
+; CHECK-NEXT: [[TMP1:%.*]] = fneg float [[CONV1_I]]
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
; CHECK-NEXT: ret float [[MUL3_I_I]]
;
@@ -1069,7 +1070,7 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
-; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul <2 x float> [[CONV1_I]], <float undef, float 0.000000e+00>
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[CONV1_I]])
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
;
@@ -1111,7 +1112,8 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(i1 %c
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
-; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul <2 x float> [[CONV1_I]], <float undef, float -0.000000e+00>
+; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[CONV1_I]]
+; CHECK-NEXT: [[MUL3_I_I:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[TMP1]])
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
;
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 85433a99f2ca..97554e946204 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -1399,8 +1399,7 @@ define float @sitofp_zext(i16 %a) {
define i1 @PR23309(i32 %A, i32 %B) {
; ALL-LABEL: @PR23309(
; ALL-NEXT: [[SUB:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
-; ALL-NEXT: [[TMP1:%.*]] = and i32 [[SUB]], 1
-; ALL-NEXT: [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
+; ALL-NEXT: [[TRUNC:%.*]] = trunc i32 [[SUB]] to i1
; ALL-NEXT: ret i1 [[TRUNC]]
;
%add = add i32 %A, -4
@@ -1412,8 +1411,7 @@ define i1 @PR23309(i32 %A, i32 %B) {
define i1 @PR23309v2(i32 %A, i32 %B) {
; ALL-LABEL: @PR23309v2(
; ALL-NEXT: [[SUB:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
-; ALL-NEXT: [[TMP1:%.*]] = and i32 [[SUB]], 1
-; ALL-NEXT: [[TRUNC:%.*]] = icmp ne i32 [[TMP1]], 0
+; ALL-NEXT: [[TRUNC:%.*]] = trunc i32 [[SUB]] to i1
; ALL-NEXT: ret i1 [[TRUNC]]
;
%add = add i32 %A, -4
diff --git a/llvm/test/Transforms/InstCombine/catchswitch-phi.ll b/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
index 038847609b0f..cb87ee67a451 100644
--- a/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
+++ b/llvm/test/Transforms/InstCombine/catchswitch-phi.ll
@@ -24,11 +24,11 @@ define void @test0(i1 %c1) personality ptr @__gxx_wasm_personality_v0 {
; CHECK: bb1:
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i32 4
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[BB3:%.*]] unwind label [[BB4:%.*]]
+; CHECK-NEXT: to label [[BB3:%.*]] unwind label [[BB4:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i32 4
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[BB3]] unwind label [[BB4]]
+; CHECK-NEXT: to label [[BB3]] unwind label [[BB4]]
; CHECK: bb3:
; CHECK-NEXT: unreachable
; CHECK: bb4:
@@ -37,7 +37,7 @@ define void @test0(i1 %c1) personality ptr @__gxx_wasm_personality_v0 {
; CHECK: bb5:
; CHECK-NEXT: [[TMP5:%.*]] = catchpad within [[TMP4]] [ptr null]
; CHECK-NEXT: invoke void @foo() [ "funclet"(token [[TMP5]]) ]
-; CHECK-NEXT: to label [[BB6:%.*]] unwind label [[BB7]]
+; CHECK-NEXT: to label [[BB6:%.*]] unwind label [[BB7]]
; CHECK: bb6:
; CHECK-NEXT: unreachable
; CHECK: bb7:
@@ -89,10 +89,10 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH1:%.*]]
+; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[CATCH_DISPATCH1:%.*]]
; CHECK: invoke.cont:
; CHECK-NEXT: [[CALL:%.*]] = invoke i32 @baz()
-; CHECK-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: invoke.cont1:
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
@@ -101,7 +101,7 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK: if.end:
; CHECK-NEXT: [[AP_0:%.*]] = phi i8 [ 1, [[IF_THEN]] ], [ 0, [[INVOKE_CONT1]] ]
; CHECK-NEXT: invoke void @foo()
-; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[CATCH_DISPATCH]]
+; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[CATCH_DISPATCH]]
; CHECK: invoke.cont2:
; CHECK-NEXT: br label [[TRY_CONT:%.*]]
; CHECK: catch.dispatch:
@@ -114,17 +114,16 @@ define void @test1() personality ptr @__gxx_wasm_personality_v0 {
; CHECK-NEXT: catchret from [[TMP1]] to label [[TRY_CONT]]
; CHECK: rethrow:
; CHECK-NEXT: invoke void @llvm.wasm.rethrow() #[[ATTR0:[0-9]+]] [ "funclet"(token [[TMP1]]) ]
-; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH1]]
+; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH1]]
; CHECK: catch.dispatch1:
; CHECK-NEXT: [[AP_2:%.*]] = phi i8 [ [[AP_1]], [[CATCH_DISPATCH]] ], [ [[AP_1]], [[RETHROW]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = catchswitch within none [label %catch.start1] unwind to caller
; CHECK: catch.start1:
; CHECK-NEXT: [[TMP3:%.*]] = catchpad within [[TMP2]] [ptr null]
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[AP_2]], 1
-; CHECK-NEXT: [[TOBOOL1_NOT:%.*]] = icmp eq i8 [[TMP0]], 0
+; CHECK-NEXT: [[TOBOOL1_NOT:%.*]] = trunc i8 [[AP_2]] to i1
; CHECK-NEXT: br i1 [[TOBOOL1_NOT]], label [[IF_END1:%.*]], label [[IF_THEN1:%.*]]
; CHECK: if.then1:
-; CHECK-NEXT: br label [[IF_END1]]
+; CHECK-NEXT: br label [[IF_THEN1]]
; CHECK: if.end1:
; CHECK-NEXT: catchret from [[TMP3]] to label [[TRY_CONT]]
; CHECK: try.cont:
diff --git a/llvm/test/Transforms/InstCombine/div.ll b/llvm/test/Transforms/InstCombine/div.ll
index 1309dee817cf..e8a25ff44d02 100644
--- a/llvm/test/Transforms/InstCombine/div.ll
+++ b/llvm/test/Transforms/InstCombine/div.ll
@@ -1810,3 +1810,25 @@ define i6 @udiv_distribute_mul_nsw_add_nuw(i6 %x) {
%div = udiv i6 %add, 3
ret i6 %div
}
+
+define i32 @fold_disjoint_or_over_sdiv(i32 %x) {
+; CHECK-LABEL: @fold_disjoint_or_over_sdiv(
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %mul = mul nsw i32 %x, 9
+ %or = or disjoint i32 %mul, 81
+ %r = sdiv i32 %or, 9
+ ret i32 %r
+}
+
+define i32 @fold_disjoint_or_over_udiv(i32 %x) {
+; CHECK-LABEL: @fold_disjoint_or_over_udiv(
+; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %mul = mul nuw i32 %x, 9
+ %or = or disjoint i32 %mul, 81
+ %r = udiv i32 %or, 9
+ ret i32 %r
+}
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 96e57939d285..f6435f003289 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1250,7 +1250,7 @@ define half @mul_zero_nnan(half %x) {
define <2 x float> @mul_zero_nnan_vec_poison(<2 x float> %x) {
; CHECK-LABEL: @mul_zero_nnan_vec_poison(
-; CHECK-NEXT: [[R:%.*]] = call nnan <2 x float> @llvm.copysign.v2f32(<2 x float> <float 0.000000e+00, float poison>, <2 x float> [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = call nnan <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[X:%.*]])
; CHECK-NEXT: ret <2 x float> [[R]]
;
%r = fmul nnan <2 x float> %x, <float 0.0, float poison>
@@ -1268,13 +1268,104 @@ define half @mul_zero(half %x) {
ret half %r
}
-; TODO: This could be fneg+copysign.
-
define half @mul_negzero_nnan(half %x) {
; CHECK-LABEL: @mul_negzero_nnan(
-; CHECK-NEXT: [[R:%.*]] = fmul nnan half [[X:%.*]], 0xH8000
+; CHECK-NEXT: [[TMP1:%.*]] = fneg nnan half [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = call nnan half @llvm.copysign.f16(half 0xH0000, half [[TMP1]])
; CHECK-NEXT: ret half [[R]]
;
%r = fmul nnan half %x, -0.0
ret half %r
}
+
+define float @mul_pos_zero_nnan_ninf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan_ninf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[A:%.*]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_pos_zero_nnan(float nofpclass(nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = fmul float [[A:%.*]], 0.000000e+00
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_pos_zero_nnan_ninf_fmf(float nofpclass(nan) %a) {
+; CHECK-LABEL: @mul_pos_zero_nnan_ninf_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = call ninf float @llvm.copysign.f32(float 0.000000e+00, float [[A:%.*]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul ninf float %a, 0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_ninf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul float %a, -0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_fmf(float %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul nnan float %a, -0.000000e+00
+ ret float %ret
+}
+
+define float @mul_neg_zero_nnan_ninf_fmf(float nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf_fmf(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg nnan ninf float [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call nnan ninf float @llvm.copysign.f32(float 0.000000e+00, float [[TMP0]])
+; CHECK-NEXT: ret float [[RET]]
+;
+entry:
+ %ret = fmul nnan ninf float %a, -0.000000e+00
+ ret float %ret
+}
+
+define <3 x float> @mul_neg_zero_nnan_ninf_vec(<3 x float> nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_neg_zero_nnan_ninf_vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = fneg <3 x float> [[A:%.*]]
+; CHECK-NEXT: [[RET:%.*]] = call <3 x float> @llvm.copysign.v3f32(<3 x float> zeroinitializer, <3 x float> [[TMP0]])
+; CHECK-NEXT: ret <3 x float> [[RET]]
+;
+entry:
+ %ret = fmul <3 x float> %a, <float -0.0, float undef, float poison>
+ ret <3 x float> %ret
+}
+
+define <3 x float> @mul_mixed_zero_nnan_ninf_vec(<3 x float> nofpclass(inf nan) %a) {
+; CHECK-LABEL: @mul_mixed_zero_nnan_ninf_vec(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[RET:%.*]] = fmul <3 x float> [[A:%.*]], <float -0.000000e+00, float 0.000000e+00, float poison>
+; CHECK-NEXT: ret <3 x float> [[RET]]
+;
+entry:
+ %ret = fmul <3 x float> %a, <float -0.0, float 0.0, float poison>
+ ret <3 x float> %ret
+}
diff --git a/llvm/test/Transforms/InstCombine/fpcast.ll b/llvm/test/Transforms/InstCombine/fpcast.ll
index 32bfdb52bb5f..ac4b88fcddd7 100644
--- a/llvm/test/Transforms/InstCombine/fpcast.ll
+++ b/llvm/test/Transforms/InstCombine/fpcast.ll
@@ -424,10 +424,7 @@ define i32 @fptosi_select(i1 %cond) {
define i32 @mul_pos_zero_convert(i32 %a) {
; CHECK-LABEL: @mul_pos_zero_convert(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[FP:%.*]] = sitofp i32 [[A:%.*]] to float
-; CHECK-NEXT: [[RET:%.*]] = fmul float [[FP]], 0.000000e+00
-; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[RET]] to i32
-; CHECK-NEXT: ret i32 [[CONV]]
+; CHECK-NEXT: ret i32 0
;
entry:
%fp = sitofp i32 %a to float
diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll
index da59101d5710..e8105b6287d0 100644
--- a/llvm/test/Transforms/InstCombine/freeze.ll
+++ b/llvm/test/Transforms/InstCombine/freeze.ll
@@ -1049,7 +1049,7 @@ exit:
define ptr @freeze_load_noundef(ptr %ptr) {
; CHECK-LABEL: @freeze_load_noundef(
-; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !noundef !0
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !noundef [[META0:![0-9]+]]
; CHECK-NEXT: ret ptr [[P]]
;
%p = load ptr, ptr %ptr, !noundef !0
@@ -1059,7 +1059,7 @@ define ptr @freeze_load_noundef(ptr %ptr) {
define ptr @freeze_load_dereferenceable(ptr %ptr) {
; CHECK-LABEL: @freeze_load_dereferenceable(
-; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable !1
+; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[PTR:%.*]], align 8, !dereferenceable [[META1:![0-9]+]]
; CHECK-NEXT: ret ptr [[P]]
;
%p = load ptr, ptr %ptr, !dereferenceable !1
@@ -1138,6 +1138,17 @@ define i32 @propagate_drop_flags_or(i32 %arg) {
ret i32 %v1.fr
}
+define i32 @propagate_drop_flags_trunc(i64 %arg) {
+; CHECK-LABEL: @propagate_drop_flags_trunc(
+; CHECK-NEXT: [[ARG_FR:%.*]] = freeze i64 [[ARG:%.*]]
+; CHECK-NEXT: [[V1:%.*]] = trunc i64 [[ARG_FR]] to i32
+; CHECK-NEXT: ret i32 [[V1]]
+;
+ %v1 = trunc nsw nuw i64 %arg to i32
+ %v1.fr = freeze i32 %v1
+ ret i32 %v1.fr
+}
+
!0 = !{}
!1 = !{i64 4}
!2 = !{i32 0, i32 100}
@@ -1145,8 +1156,8 @@ define i32 @propagate_drop_flags_or(i32 %arg) {
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; CHECK: attributes #[[ATTR1]] = { nounwind }
;.
-; CHECK: [[META0:![0-9]+]] = !{}
-; CHECK: [[META1:![0-9]+]] = !{i64 4}
+; CHECK: [[META0]] = !{}
+; CHECK: [[META1]] = !{i64 4}
; CHECK: [[RNG2]] = !{i32 0, i32 100}
; CHECK: [[RNG3]] = !{i32 0, i32 33}
;.
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
index d5f5641392c0..7e7f087ca711 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-and.ll
@@ -267,10 +267,10 @@ define i1 @pr51551_neg1(i32 %x, i32 %y) {
define i1 @pr51551_neg2(i32 %x, i32 %y) {
; CHECK-LABEL: @pr51551_neg2(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Y:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[TMP1]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y:%.*]] to i1
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], 7
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT: [[DOTNOT:%.*]] = xor i1 [[TMP1]], true
; CHECK-NEXT: [[CMP:%.*]] = select i1 [[DOTNOT]], i1 true, i1 [[CMP1]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
index adf78723b130..d858c91becb5 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
@@ -128,12 +128,12 @@ define i1 @PR46561(i1 %a, i1 %x, i1 %y, i8 %z) {
; CHECK-NEXT: br i1 [[A:%.*]], label [[COND_TRUE:%.*]], label [[END:%.*]]
; CHECK: cond.true:
; CHECK-NEXT: [[MULBOOL:%.*]] = and i1 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[Z:%.*]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[Z:%.*]] to i1
; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[MULBOOL]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
; CHECK-NEXT: br label [[END]]
; CHECK: end:
-; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP2]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP3]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]
; CHECK-NEXT: ret i1 [[P]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index 58c283815cf9..5305c78f6912 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -483,5 +483,56 @@ if.else:
ret i64 13
}
+define i1 @test_icmp_or_distjoint(i8 %n, i1 %other) {
+; CHECK-LABEL: @test_icmp_or_distjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[N_OR:%.*]] = or disjoint i8 [[N:%.*]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[N_OR]], -111
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i1 true
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 [[OTHER:%.*]]
+;
+entry:
+ %n_or = or disjoint i8 %n, 16
+ %cmp = icmp ugt i8 %n_or, 145
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %r = icmp slt i8 %n, 0
+ ret i1 %r
+
+if.else:
+ ret i1 %other
+}
+
+define i1 @test_icmp_or_fail_missing_disjoint(i8 %n, i1 %other) {
+; CHECK-LABEL: @test_icmp_or_fail_missing_disjoint(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[N_OR:%.*]] = or i8 [[N:%.*]], 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[N_OR]], -111
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[N]], 0
+; CHECK-NEXT: ret i1 [[R]]
+; CHECK: if.else:
+; CHECK-NEXT: ret i1 [[OTHER:%.*]]
+;
+entry:
+ %n_or = or i8 %n, 16
+ %cmp = icmp ugt i8 %n_or, 145
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %r = icmp slt i8 %n, 0
+ ret i1 %r
+
+if.else:
+ ret i1 %other
+}
+
+
+
declare void @use(i1)
declare void @sink(i8)
diff --git a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
index da7cc2db0978..e940ae3fec16 100644
--- a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
+++ b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll
@@ -214,9 +214,8 @@ define i64 @scalar_mul_bit_x0_y0_uses(i64 %x, i64 %y) {
define i64 @scalar_mul_bit_x0_y1(i64 %x, i64 %y) {
; CHECK-LABEL: @scalar_mul_bit_x0_y1(
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 2
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[TMP1]], 0
-; CHECK-NEXT: [[MUL:%.*]] = select i1 [[DOTNOT]], i64 0, i64 [[AND2]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i1
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[TMP1]], i64 [[AND2]], i64 0
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
@@ -228,9 +227,8 @@ define i64 @scalar_mul_bit_x0_y1(i64 %x, i64 %y) {
define i64 @scalar_mul_bit_x0_yC(i64 %x, i64 %y, i64 %c) {
; CHECK-LABEL: @scalar_mul_bit_x0_yC(
; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[TMP1]], 0
-; CHECK-NEXT: [[MUL:%.*]] = select i1 [[DOTNOT]], i64 0, i64 [[AND2]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i1
+; CHECK-NEXT: [[MUL:%.*]] = select i1 [[TMP1]], i64 [[AND2]], i64 0
; CHECK-NEXT: ret i64 [[MUL]]
;
%and1 = and i64 %x, 1
diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index a176d16f2cdf..d4a689c60786 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -684,9 +684,8 @@ define <2 x i32> @signbit_mul_vec_commute(<2 x i32> %a, <2 x i32> %b) {
define i32 @lowbit_mul(i32 %a, i32 %b) {
; CHECK-LABEL: @lowbit_mul(
-; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 1
-; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i32 [[TMP1]], 0
-; CHECK-NEXT: [[E:%.*]] = select i1 [[DOTNOT]], i32 0, i32 [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i1
+; CHECK-NEXT: [[E:%.*]] = select i1 [[TMP1]], i32 [[B:%.*]], i32 0
; CHECK-NEXT: ret i32 [[E]]
;
%d = and i32 %a, 1
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index e1ae6c1ea475..7eb508ebb553 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -116,8 +116,8 @@ define i32 @test6(i16 %A, i1 %b) {
; CHECK: BB1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: BB2:
-; CHECK-NEXT: [[B:%.*]] = zext i16 [[A:%.*]] to i32
-; CHECK-NEXT: ret i32 [[B]]
+; CHECK-NEXT: [[C:%.*]] = zext i16 [[A:%.*]] to i32
+; CHECK-NEXT: ret i32 [[C]]
;
BB0:
%X = zext i16 %A to i32
@@ -129,8 +129,8 @@ BB1:
BB2:
;; Suck casts into phi
- %B = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
- ret i32 %B
+ %c = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32 %c
}
define i32 @test_dead_cycle(i32 %A, i1 %cond) {
@@ -232,8 +232,8 @@ define ptr @test8(ptr %A, i1 %b) {
; CHECK: BB1:
; CHECK-NEXT: br label [[BB2]]
; CHECK: BB2:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
-; CHECK-NEXT: ret ptr [[B]]
+; CHECK-NEXT: [[C:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
+; CHECK-NEXT: ret ptr [[C]]
;
BB0:
%X = getelementptr inbounds { i32, i32 }, ptr %A, i32 0, i32 1
@@ -245,8 +245,8 @@ BB1:
BB2:
;; Suck GEPs into phi
- %B = phi ptr [ %X, %BB0 ], [ %Y, %BB1 ]
- ret ptr %B
+ %c = phi ptr [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret ptr %c
}
define i32 @test9(ptr %A, ptr %B) {
@@ -489,9 +489,8 @@ define i64 @test15b(i64 %A, i1 %b) {
; CHECK-NEXT: [[Y_OFF0:%.*]] = phi i64 [ [[A]], [[ENTRY]] ], [ [[C]], [[ONE]] ]
; CHECK-NEXT: [[Y_OFF64]] = phi i64 [ [[A]], [[ENTRY]] ], [ 0, [[ONE]] ]
; CHECK-NEXT: [[D:%.*]] = call i64 @test15a(i64 [[Y_OFF64]])
-; CHECK-NEXT: [[TMP0:%.*]] = and i64 [[D]], 1
-; CHECK-NEXT: [[D1_NOT:%.*]] = icmp eq i64 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[D1_NOT]], label [[END:%.*]], label [[ONE]]
+; CHECK-NEXT: [[D1:%.*]] = trunc i64 [[D]] to i1
+; CHECK-NEXT: br i1 [[D1]], label [[ONE]], label [[END:%.*]]
; CHECK: end:
; CHECK-NEXT: ret i64 [[Y_OFF0]]
;
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 43e34c889106..6c0575e8b719 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -313,7 +313,7 @@ define double @fdiv_pow_powi(double %x) {
; CHECK-NEXT: [[DIV:%.*]] = fmul reassoc nnan double [[X:%.*]], [[X]]
; CHECK-NEXT: ret double [[DIV]]
;
- %p1 = call double @llvm.powi.f64.i32(double %x, i32 3)
+ %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 3)
%div = fdiv reassoc nnan double %p1, %x
ret double %div
}
@@ -323,7 +323,7 @@ define float @fdiv_powf_powi(float %x) {
; CHECK-NEXT: [[DIV:%.*]] = call reassoc nnan float @llvm.powi.f32.i32(float [[X:%.*]], i32 99)
; CHECK-NEXT: ret float [[DIV]]
;
- %p1 = call float @llvm.powi.f32.i32(float %x, i32 100)
+ %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100)
%div = fdiv reassoc nnan float %p1, %x
ret float %div
}
@@ -347,10 +347,21 @@ define double @fdiv_pow_powi_multi_use(double %x) {
define float @fdiv_powf_powi_missing_reassoc(float %x) {
; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc(
; CHECK-NEXT: [[P1:%.*]] = call float @llvm.powi.f32.i32(float [[X:%.*]], i32 100)
-; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float [[P1]], [[X]]
+; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan float [[P1]], [[X]]
; CHECK-NEXT: ret float [[DIV]]
;
%p1 = call float @llvm.powi.f32.i32(float %x, i32 100)
+ %div = fdiv reassoc nnan float %p1, %x
+ ret float %div
+}
+
+define float @fdiv_powf_powi_missing_reassoc1(float %x) {
+; CHECK-LABEL: @fdiv_powf_powi_missing_reassoc1(
+; CHECK-NEXT: [[P1:%.*]] = call reassoc float @llvm.powi.f32.i32(float [[X:%.*]], i32 100)
+; CHECK-NEXT: [[DIV:%.*]] = fdiv nnan float [[P1]], [[X]]
+; CHECK-NEXT: ret float [[DIV]]
+;
+ %p1 = call reassoc float @llvm.powi.f32.i32(float %x, i32 100)
%div = fdiv nnan float %p1, %x
ret float %div
}
diff --git a/llvm/test/Transforms/InstCombine/ptr-int-cast.ll b/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
index 6f5814e1a282..69b8f6953d61 100644
--- a/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
+++ b/llvm/test/Transforms/InstCombine/ptr-int-cast.ll
@@ -6,8 +6,7 @@ define i1 @test1(ptr %x) nounwind {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[X:%.*]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i1
; CHECK-NEXT: ret i1 [[TMP2]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
index bbb8d848be6f..ad55b506a108 100644
--- a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
@@ -5,8 +5,7 @@ define i1 @reduce_add_self(<8 x i1> %x) {
; CHECK-LABEL: @reduce_add_self(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT: [[RES:%.*]] = icmp ne i8 [[TMP3]], 0
+; CHECK-NEXT: [[RES:%.*]] = trunc i8 [[TMP2]] to i1
; CHECK-NEXT: ret i1 [[RES]]
;
%res = call i1 @llvm.vector.reduce.add.v8i32(<8 x i1> %x)
diff --git a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
index 97b6f7b6d96c..84ac9369b5ff 100644
--- a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
@@ -5,8 +5,7 @@ define i1 @reduce_xor_self(<8 x i1> %x) {
; CHECK-LABEL: @reduce_xor_self(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT: [[RES:%.*]] = icmp ne i8 [[TMP3]], 0
+; CHECK-NEXT: [[RES:%.*]] = trunc i8 [[TMP2]] to i1
; CHECK-NEXT: ret i1 [[RES]]
;
%res = call i1 @llvm.vector.reduce.xor.v8i32(<8 x i1> %x)
@@ -17,9 +16,8 @@ define i32 @reduce_xor_sext(<4 x i1> %x) {
; CHECK-LABEL: @reduce_xor_sext(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i1> [[X:%.*]] to i4
; CHECK-NEXT: [[TMP2:%.*]] = call i4 @llvm.ctpop.i4(i4 [[TMP1]]), !range [[RNG1:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = and i4 [[TMP2]], 1
-; CHECK-NEXT: [[SEXT:%.*]] = sub nsw i4 0, [[TMP3]]
-; CHECK-NEXT: [[RES:%.*]] = sext i4 [[SEXT]] to i32
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i4 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[RES]]
;
%sext = sext <4 x i1> %x to <4 x i32>
@@ -57,9 +55,8 @@ define i8 @reduce_xor_zext_long(<128 x i1> %x) {
; CHECK-LABEL: @reduce_xor_zext_long(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3:![0-9]+]]
-; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP3]], 1
-; CHECK-NEXT: [[RES:%.*]] = sub nsw i8 0, [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i8
; CHECK-NEXT: ret i8 [[RES]]
;
%sext = sext <128 x i1> %x to <128 x i8>
@@ -72,9 +69,8 @@ define i8 @reduce_xor_zext_long_external_use(<128 x i1> %x) {
; CHECK-LABEL: @reduce_xor_zext_long_external_use(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
; CHECK-NEXT: [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3]]
-; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
-; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP3]], 1
-; CHECK-NEXT: [[RES:%.*]] = sub nsw i8 0, [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i128 [[TMP2]] to i1
+; CHECK-NEXT: [[RES:%.*]] = sext i1 [[TMP3]] to i8
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <128 x i1> [[X]], i64 0
; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[TMP5]] to i8
; CHECK-NEXT: store i8 [[EXT]], ptr @glob, align 1
diff --git a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
index 4b37ccbe3370..729ca03ddfd1 100644
--- a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
@@ -122,3 +122,35 @@ define { i32, i1 } @fold_sub_simple(i32 %x) {
%b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 30)
ret { i32, i1 } %b
}
+
+define { i32, i1 } @fold_with_distjoin_or(i32 %x) {
+; CHECK-LABEL: @fold_with_distjoin_or(
+; CHECK-NEXT: [[B:%.*]] = add i32 [[X:%.*]], 6
+; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[B]], 0
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
+;
+ %a = or disjoint i32 %x, 13
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 -7)
+ ret { i32, i1 } %b
+}
+
+define { i32, i1 } @fold_with_disjoint_or2(i32 %x) {
+; CHECK-LABEL: @fold_with_disjoint_or2(
+; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 127)
+; CHECK-NEXT: ret { i32, i1 } [[B]]
+;
+ %a = or disjoint i32 %x, 100
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 27)
+ ret { i32, i1 } %b
+}
+
+define { i32, i1 } @fold_with_or_fail(i32 %x) {
+; CHECK-LABEL: @fold_with_or_fail(
+; CHECK-NEXT: [[A:%.*]] = or i32 [[X:%.*]], 100
+; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A]], i32 27)
+; CHECK-NEXT: ret { i32, i1 } [[B]]
+;
+ %a = or i32 %x, 100
+ %b = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 27)
+ ret { i32, i1 } %b
+}
diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll
index fe6dc526bd50..7e645ef7e883 100644
--- a/llvm/test/Transforms/InstCombine/scalarization.ll
+++ b/llvm/test/Transforms/InstCombine/scalarization.ll
@@ -341,6 +341,17 @@ define i1 @extractelt_vector_fcmp_constrhs_dynidx(<2 x float> %arg, i32 %idx) {
ret i1 %ext
}
+define i1 @extractelt_vector_fcmp_copy_flags(<4 x float> %x) {
+; CHECK-LABEL: @extractelt_vector_fcmp_copy_flags(
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = fcmp nsz arcp oeq float [[TMP1]], 0.000000e+00
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %cmp = fcmp nsz arcp oeq <4 x float> %x, zeroinitializer
+ %r = extractelement <4 x i1> %cmp, i32 2
+ ret i1 %r
+}
+
define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> %arg0, <2 x float> %arg1, <2 x float> %arg2, i32 %idx) {
;
; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(
diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll
index 1b2567505993..aa3a238e0949 100644
--- a/llvm/test/Transforms/InstCombine/shift-add.ll
+++ b/llvm/test/Transforms/InstCombine/shift-add.ll
@@ -775,3 +775,32 @@ define <3 x i32> @add3_i96(<3 x i32> %0, <3 x i32> %1) {
%25 = insertelement <3 x i32> %24, i32 %20, i32 2
ret <3 x i32> %25
}
+
+define i8 @shl_fold_or_disjoint_cnt(i8 %x) {
+; CHECK-LABEL: @shl_fold_or_disjoint_cnt(
+; CHECK-NEXT: [[R:%.*]] = shl i8 16, [[X:%.*]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %a = or disjoint i8 %x, 3
+ %r = shl i8 2, %a
+ ret i8 %r
+}
+
+define <2 x i8> @ashr_fold_or_disjoint_cnt(<2 x i8> %x) {
+; CHECK-LABEL: @ashr_fold_or_disjoint_cnt(
+; CHECK-NEXT: [[R:%.*]] = lshr <2 x i8> <i8 0, i8 1>, [[X:%.*]]
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %a = or disjoint <2 x i8> %x, <i8 3, i8 1>
+ %r = ashr <2 x i8> <i8 2, i8 3>, %a
+ ret <2 x i8> %r
+}
+
+define <2 x i8> @lshr_fold_or_disjoint_cnt_out_of_bounds(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_fold_or_disjoint_cnt_out_of_bounds(
+; CHECK-NEXT: ret <2 x i8> zeroinitializer
+;
+ %a = or disjoint <2 x i8> %x, <i8 3, i8 8>
+ %r = lshr <2 x i8> <i8 2, i8 3>, %a
+ ret <2 x i8> %r
+}
diff --git a/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll b/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
index 44ec77e471bb..f573ff36d2ce 100644
--- a/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/shuffle_select-inseltpoison.ll
@@ -336,7 +336,18 @@ define <4 x i32> @srem(<4 x i32> %v) {
; Try FP ops/types.
-define <4 x float> @fadd(<4 x float> %v) {
+define <4 x float> @fadd_maybe_nan(<4 x float> %v) {
+; CHECK-LABEL: @fadd_maybe_nan(
+; CHECK-NEXT: [[B:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float poison, float poison>
+; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x float> [[B]], <4 x float> [[V]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: ret <4 x float> [[S]]
+;
+ %b = fadd <4 x float> %v, <float 41.0, float 42.0, float 43.0, float 44.0>
+ %s = shufflevector <4 x float> %b, <4 x float> %v, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x float> %s
+}
+
+define <4 x float> @fadd(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fadd(
; CHECK-NEXT: [[S:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float -0.000000e+00, float -0.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -359,7 +370,7 @@ define <4 x double> @fsub(<4 x double> %v) {
; Propagate any FMF.
-define <4 x float> @fmul(<4 x float> %v) {
+define <4 x float> @fmul(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fmul(
; CHECK-NEXT: [[S:%.*]] = fmul nnan ninf <4 x float> [[V:%.*]], <float 4.100000e+01, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -380,7 +391,7 @@ define <4 x double> @fdiv_constant_op0(<4 x double> %v) {
ret <4 x double> %s
}
-define <4 x double> @fdiv_constant_op1(<4 x double> %v) {
+define <4 x double> @fdiv_constant_op1(<4 x double> nofpclass(nan) %v) {
; CHECK-LABEL: @fdiv_constant_op1(
; CHECK-NEXT: [[S:%.*]] = fdiv reassoc <4 x double> [[V:%.*]], <double undef, double 1.000000e+00, double 4.300000e+01, double 4.400000e+01>
; CHECK-NEXT: ret <4 x double> [[S]]
diff --git a/llvm/test/Transforms/InstCombine/shuffle_select.ll b/llvm/test/Transforms/InstCombine/shuffle_select.ll
index a1b0d782b554..efadb5c3c109 100644
--- a/llvm/test/Transforms/InstCombine/shuffle_select.ll
+++ b/llvm/test/Transforms/InstCombine/shuffle_select.ll
@@ -336,7 +336,18 @@ define <4 x i32> @srem(<4 x i32> %v) {
; Try FP ops/types.
-define <4 x float> @fadd(<4 x float> %v) {
+define <4 x float> @fadd_maybe_nan(<4 x float> %v) {
+; CHECK-LABEL: @fadd_maybe_nan(
+; CHECK-NEXT: [[B:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float poison, float poison>
+; CHECK-NEXT: [[S:%.*]] = shufflevector <4 x float> [[B]], <4 x float> [[V]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+; CHECK-NEXT: ret <4 x float> [[S]]
+;
+ %b = fadd <4 x float> %v, <float 41.0, float 42.0, float 43.0, float 44.0>
+ %s = shufflevector <4 x float> %b, <4 x float> %v, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x float> %s
+}
+
+define <4 x float> @fadd(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fadd(
; CHECK-NEXT: [[S:%.*]] = fadd <4 x float> [[V:%.*]], <float 4.100000e+01, float 4.200000e+01, float -0.000000e+00, float -0.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -359,7 +370,7 @@ define <4 x double> @fsub(<4 x double> %v) {
; Propagate any FMF.
-define <4 x float> @fmul(<4 x float> %v) {
+define <4 x float> @fmul(<4 x float> nofpclass(nan) %v) {
; CHECK-LABEL: @fmul(
; CHECK-NEXT: [[S:%.*]] = fmul nnan ninf <4 x float> [[V:%.*]], <float 4.100000e+01, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
; CHECK-NEXT: ret <4 x float> [[S]]
@@ -380,7 +391,7 @@ define <4 x double> @fdiv_constant_op0(<4 x double> %v) {
ret <4 x double> %s
}
-define <4 x double> @fdiv_constant_op1(<4 x double> %v) {
+define <4 x double> @fdiv_constant_op1(<4 x double> nofpclass(nan) %v) {
; CHECK-LABEL: @fdiv_constant_op1(
; CHECK-NEXT: [[S:%.*]] = fdiv reassoc <4 x double> [[V:%.*]], <double undef, double 1.000000e+00, double 4.300000e+01, double 4.400000e+01>
; CHECK-NEXT: ret <4 x double> [[S]]
diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll
index c6bc06d666d0..760825d6b1da 100644
--- a/llvm/test/Transforms/InstCombine/trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc.ll
@@ -1021,3 +1021,40 @@ define i16 @PR44545(i32 %t0, i32 %data) {
%sub = add nsw i16 %cast, -1
ret i16 %sub
}
+
+; Make sure that SimplifyDemandedBits drops the nowrap flags
+define i8 @drop_nsw_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_nsw_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nsw i16 %and2 to i8
+ ret i8 %res
+}
+
+define i8 @drop_nuw_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_nuw_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[B:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[B]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nuw i16 %and2 to i8
+ ret i8 %res
+}
+
+define i8 @drop_both_trunc(i16 %x, i16 %y) {
+; CHECK-LABEL: @drop_both_trunc(
+; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = trunc i16 [[AND2]] to i8
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %and = and i16 %x, 255
+ %and2 = and i16 %and, %y
+ %res = trunc nuw nsw i16 %and2 to i8
+ ret i8 %res
+}
diff --git a/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll b/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
index 28d309baaa41..fd5d38bb38dd 100644
--- a/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll
@@ -124,3 +124,26 @@ define { i32, i1 } @no_fold_wrapped_add(i32 %x) {
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a)
ret { i32, i1 } %b
}
+
+
+define { <2 x i32>, <2 x i1> } @fold_simple_splat_with_disjoint_or_constant(<2 x i32> %x) {
+; CHECK-LABEL: @fold_simple_splat_with_disjoint_or_constant(
+; CHECK-NEXT: [[B:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+;
+ %a = or disjoint <2 x i32> %x, <i32 12, i32 12>
+ %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
+ ret { <2 x i32>, <2 x i1> } %b
+}
+
+
+define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant_with_or_fail(<2 x i32> %x) {
+; CHECK-LABEL: @fold_simple_splat_constant_with_or_fail(
+; CHECK-NEXT: [[A:%.*]] = or <2 x i32> [[X:%.*]], <i32 12, i32 12>
+; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+;
+ %a = or <2 x i32> %x, <i32 12, i32 12>
+ %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
+ ret { <2 x i32>, <2 x i1> } %b
+}
diff --git a/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll b/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
index 92ff099afb1c..daa64f2e2ea7 100644
--- a/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
+++ b/llvm/test/Transforms/LoopIdiom/AArch64/byte-compare-index.ll
@@ -2,6 +2,9 @@
; RUN: opt -aarch64-lit -aarch64-lit-verify -verify-dom-info -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s
; RUN: opt -aarch64-lit -simplifycfg -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=LOOP-DEL
; RUN: opt -aarch64-lit -mtriple aarch64-unknown-linux-gnu -S < %s | FileCheck %s --check-prefix=NO-TRANSFORM
+; RUN: opt -p aarch64-lit -aarch64-lit-verify -verify-dom-info -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s
+; RUN: opt -passes='function(loop(aarch64-lit)),simplifycfg' -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=LOOP-DEL
+; RUN: opt -p aarch64-lit -mtriple aarch64-unknown-linux-gnu -S < %s | FileCheck %s --check-prefix=NO-TRANSFORM
define i32 @compare_bytes_simple(ptr %a, ptr %b, i32 %len, i32 %extra, i32 %n) {
; CHECK-LABEL: define i32 @compare_bytes_simple(
diff --git a/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll b/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
index 4b0bc908eddf..63470a943515 100644
--- a/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
+++ b/llvm/test/Transforms/LoopLoadElim/versioning-scev-invalidation.ll
@@ -63,8 +63,8 @@ define void @g(ptr %dst.1, ptr %start, i64 %N) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 8
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[LCSSA_PTR_IV_1]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP5]], align 8
diff --git a/llvm/test/Transforms/LoopRotate/update-branch-weights.ll b/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
index 5d742b64e0ad..9a1f36ec5ff2 100644
--- a/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
+++ b/llvm/test/Transforms/LoopRotate/update-branch-weights.ll
@@ -232,6 +232,46 @@ loop_exit:
ret void
}
+; BFI_BEFORE-LABEL: block-frequency-info: func6_inaccurate_branch_weight
+; BFI_BEFORE: - entry: {{.*}} count = 1024
+; BFI_BEFORE: - loop_header: {{.*}} count = 2047
+; BFI_BEFORE: - loop_body: {{.*}} count = 1023
+; BFI_BEFORE: - loop_exit: {{.*}} count = 1024
+
+; BFI_AFTER-LABEL: block-frequency-info: func6_inaccurate_branch_weight
+; BFI_AFTER: - entry: {{.*}} count = 1024
+; BFI_AFTER: - loop_body: {{.*}} count = 1024
+; BFI_AFTER: - loop_exit: {{.*}} count = 1024
+
+; IR-LABEL: define void @func6_inaccurate_branch_weight(
+; IR: entry:
+; IR: br label %loop_body
+; IR: loop_body:
+; IR: br i1 %cmp, label %loop_body, label %loop_exit, !prof [[PROF_FUNC6_0:![0-9]+]]
+; IR: loop_exit:
+; IR: ret void
+
+; Branch weight from sample-based PGO may be inaccurate due to sampling.
+; Count for loop_body in following case should be not less than loop_exit.
+; However this may not hold for Sample-based PGO.
+define void @func6_inaccurate_branch_weight() !prof !3 {
+entry:
+ br label %loop_header
+
+loop_header:
+ %i = phi i32 [0, %entry], [%i_inc, %loop_body]
+ %cmp = icmp slt i32 %i, 2
+ br i1 %cmp, label %loop_body, label %loop_exit, !prof !9
+
+loop_body:
+ store volatile i32 %i, ptr @g, align 4
+ %i_inc = add i32 %i, 1
+ br label %loop_header
+
+loop_exit:
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1}
!1 = !{!"branch_weights", i32 1000, i32 1}
!2 = !{!"branch_weights", i32 3000, i32 1000}
@@ -241,6 +281,7 @@ loop_exit:
!6 = !{!"branch_weights", i32 0, i32 1}
!7 = !{!"branch_weights", i32 1, i32 0}
!8 = !{!"branch_weights", i32 0, i32 0}
+!9 = !{!"branch_weights", i32 1023, i32 1024}
; IR: [[PROF_FUNC0_0]] = !{!"branch_weights", i32 2000, i32 1000}
; IR: [[PROF_FUNC0_1]] = !{!"branch_weights", i32 999, i32 1}
@@ -251,3 +292,4 @@ loop_exit:
; IR: [[PROF_FUNC3_0]] = !{!"branch_weights", i32 0, i32 1}
; IR: [[PROF_FUNC4_0]] = !{!"branch_weights", i32 1, i32 0}
; IR: [[PROF_FUNC5_0]] = !{!"branch_weights", i32 0, i32 0}
+; IR: [[PROF_FUNC6_0]] = !{!"branch_weights", i32 0, i32 1024}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
index 24c59fdb47b6..00ec396107dc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll
@@ -11,76 +11,74 @@ define void @test_widen_ptr_induction(ptr %ptr.start.1) {
; CHECK: vector.main.loop.iter.check:
; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START_1:%.*]], i64 10000
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x ptr> [[TMP2]], ptr [[NEXT_GEP1]], i32 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP2]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[NEXT_GEP3]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = icmp ne <2 x ptr> [[TMP3]], zeroinitializer
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <2 x ptr> [[TMP7]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP10]])
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP11]])
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP9]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP12]])
-; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP9]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 2
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP14]], align 1
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP15]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[PTR_START_1:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x ptr> [[TMP6]], ptr [[TMP5]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP8]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x ptr> [[TMP10]], ptr [[TMP9]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <2 x ptr> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <2 x ptr> [[TMP11]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP12]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]])
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP13]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP13]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP17]])
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP4]], i32 2
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP18]], align 1
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP19]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
-; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
+; CHECK-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
; CHECK-NEXT: br i1 true, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 10000
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT11:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX8]], 0
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP17]]
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX8]], 1
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP9]], i32 0
-; CHECK-NEXT: [[TMP20:%.*]] = insertelement <2 x ptr> [[TMP19]], ptr [[NEXT_GEP10]], i32 1
-; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <2 x ptr> [[TMP20]], zeroinitializer
-; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i1> [[TMP21]], i32 0
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP22]])
-; CHECK-NEXT: [[TMP23:%.*]] = extractelement <2 x i1> [[TMP21]], i32 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP23]])
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[NEXT_GEP9]], i32 0
-; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP24]], align 1
-; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[INDEX8]], 2
-; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT11]], 10000
-; CHECK-NEXT: br i1 [[TMP25]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
+; CHECK-NEXT: [[INDEX3:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX3]], 0
+; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX3]], 1
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[PTR_START_1]], i64 [[TMP22]]
+; CHECK-NEXT: [[TMP25:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP23]], i32 0
+; CHECK-NEXT: [[TMP26:%.*]] = insertelement <2 x ptr> [[TMP25]], ptr [[TMP24]], i32 1
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <2 x ptr> [[TMP26]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP27]], i32 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP28]])
+; CHECK-NEXT: [[TMP29:%.*]] = extractelement <2 x i1> [[TMP27]], i32 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP29]])
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[TMP23]], i32 0
+; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX3]], 2
+; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 10000
+; CHECK-NEXT: br i1 [[TMP31]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], {{!llvm.loop ![0-9]+}}
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL7:%.*]] = phi ptr [ [[IND_END5]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END6]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 10000, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR_START_1]], [[ITER_CHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL4]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL7]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[CMP_I_I_I_I:%.*]] = icmp ne ptr [[PTR_IV]], null
; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP_I_I_I_I]])
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index 1e79c3e1e8dc..b91579106261 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -99,7 +99,7 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
@@ -117,7 +117,6 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP14]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2
; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]]
@@ -254,7 +253,7 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
@@ -284,7 +283,6 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP25]], i32 8, <vscale x 2 x i1> [[TMP23]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP28]], i32 8, <vscale x 2 x i1> [[TMP24]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT5]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 2
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = add i64 [[INDEX_NEXT]], [[TMP30]]
@@ -437,7 +435,7 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
@@ -469,7 +467,6 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[TMP25]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[TMP26]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT5]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2
; TFA_INTERLEAVE-NEXT: [[TMP33:%.*]] = add i64 [[INDEX_NEXT]], [[TMP32]]
@@ -771,7 +768,7 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
@@ -789,7 +786,6 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP13]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP14]], ptr [[TMP18]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2
; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]]
@@ -970,7 +966,7 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; TFA_INTERLEAVE: vector.body:
-; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ]
@@ -997,7 +993,6 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> [[TMP14]], <vscale x 2 x double> shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double -0.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
; TFA_INTERLEAVE-NEXT: [[TMP26]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[TMP24]], <vscale x 2 x double> [[TMP25]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT4]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 2
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = add i64 [[INDEX_NEXT]], [[TMP28]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index 1970ac966535..809d2e8f7ea1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -20,7 +20,7 @@ define i32 @pr70988() {
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ult i64 1, [[UMAX]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT6:%.*]], [[PRED_LOAD_CONTINUE5:%.*]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE5:%.*]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_LOAD_CONTINUE5]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY1]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT7:%.*]], [[PRED_LOAD_CONTINUE5]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[PRED_LOAD_CONTINUE5]] ]
@@ -50,7 +50,6 @@ define i32 @pr70988() {
; CHECK-NEXT: [[TMP17]] = select i1 [[ACTIVE_LANE_MASK]], i32 [[TMP15]], i32 [[VEC_PHI]]
; CHECK-NEXT: [[TMP18]] = select i1 [[ACTIVE_LANE_MASK2]], i32 [[TMP16]], i32 [[VEC_PHI3]]
; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[INDEX_NEXT6]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX_NEXT]], 1
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX_NEXT]], [[UMAX]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT7]] = icmp ult i64 [[TMP19]], [[UMAX]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index fc67fb5aded6..ad6e8534f318 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -403,21 +403,6 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -492,9 +477,9 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP78:%.*]] = mul i64 [[TMP77]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP79:%.*]] = add i64 [[INDEX]], [[TMP78]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP73]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP76]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP79]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP73]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP76]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP79]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP80:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP81:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT12]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP82:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT13]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
@@ -1715,21 +1700,6 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -1826,9 +1796,9 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP96:%.*]] = mul i64 [[TMP95]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP97:%.*]] = add i64 [[INDEX]], [[TMP96]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP98:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP99:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT16]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP100:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT17]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
@@ -2129,21 +2099,6 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP7:%.*]] = sub i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[N]], [[TMP6]]
; CHECK-ORDERED-TF-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = sub i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
-; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[N]], [[TMP16]]
-; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-ORDERED-TF-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 32
-; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = sub i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[N]], [[TMP21]]
-; CHECK-ORDERED-TF-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-ORDERED-TF-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-ORDERED-TF-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-ORDERED-TF-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -2240,9 +2195,9 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[TMP96:%.*]] = mul i64 [[TMP95]], 24
; CHECK-ORDERED-TF-NEXT: [[TMP97:%.*]] = add i64 [[INDEX]], [[TMP96]]
; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP9]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP14]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP19]])
-; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP24]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP91]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT17]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP94]], i64 [[TMP9]])
+; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP97]], i64 [[TMP9]])
; CHECK-ORDERED-TF-NEXT: [[TMP98:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP99:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT16]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-ORDERED-TF-NEXT: [[TMP100:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT17]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
index 24d2127ee171..12889c2acc8e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll
@@ -146,13 +146,13 @@ define void @main_vf_vscale_x_16(ptr %A) #0 {
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP19]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TMP20]], i32 0
; CHECK-VF8-NEXT: store <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr [[TMP21]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -246,13 +246,13 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) {
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP19]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i32 0
; CHECK-NEXT: store <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, ptr [[TMP21]], align 1
-; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -315,13 +315,13 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) {
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0
+; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX1]], 0
; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP19]]
; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i32 0
; CHECK-VF8-NEXT: store <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>, ptr [[TMP21]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX2]], 8
-; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024
+; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024
; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
@@ -374,66 +374,65 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 32
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 16
; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], [[TMP11]]
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 16
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP15]]
-; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[TMP14]], i32 0
+; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 16
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP18]]
; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP16]], align 1
+; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP19]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
+; CHECK-NEXT: [[IND_END4:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP19]]
+; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 8
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP22]]
; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK: vec.epilog.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 8
-; CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 10000, [[TMP21]]
-; CHECK-NEXT: [[N_VEC4:%.*]] = sub i64 10000, [[N_MOD_VF3]]
-; CHECK-NEXT: [[IND_END6:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC4]]
-; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 8
+; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 8
+; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 10000, [[TMP24]]
+; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 10000, [[N_MOD_VF2]]
+; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC3]]
+; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 8
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK: vec.epilog.vector.body:
-; CHECK-NEXT: [[INDEX10:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX10]], 0
-; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[NEXT_GEP11]], i32 0
-; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP25]], align 1
-; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX10]], [[TMP23]]
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT12]], [[N_VEC4]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX7]], 0
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP28]], i32 0
+; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP29]], align 1
+; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: vec.epilog.middle.block:
-; CHECK-NEXT: [[CMP_N9:%.*]] = icmp eq i64 10000, [[N_VEC4]]
-; CHECK-NEXT: br i1 [[CMP_N9]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
+; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 10000, [[N_VEC3]]
+; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i64 [ [[N_VEC4]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END6]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END7]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL8]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: store i8 0, ptr [[PTR_IV]], align 1
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -457,57 +456,56 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 {
; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 10000, [[N_MOD_VF]]
; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 32
-; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[N_VEC]]
; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-VF8: vector.body:
; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-VF8-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0
-; CHECK-VF8-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
; CHECK-VF8-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-VF8-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
; CHECK-VF8-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 0
-; CHECK-VF8-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], [[TMP9]]
-; CHECK-VF8-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP10]]
-; CHECK-VF8-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-VF8-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-VF8-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 16
-; CHECK-VF8-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP13]]
-; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP11]], align 1
+; CHECK-VF8-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 1
+; CHECK-VF8-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP10]]
+; CHECK-VF8-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[START:%.*]], i64 [[TMP6]]
+; CHECK-VF8-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP11]]
+; CHECK-VF8-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[TMP12]], i32 0
+; CHECK-VF8-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-VF8-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
+; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP12]], i64 [[TMP16]]
; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP14]], align 1
+; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP17]], align 1
; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-VF8: middle.block:
; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]]
; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK-VF8: vec.epilog.iter.check:
-; CHECK-VF8-NEXT: [[IND_END4:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
+; CHECK-VF8-NEXT: [[IND_END1:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]]
; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]]
; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8
; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; CHECK-VF8: vec.epilog.ph:
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; CHECK-VF8-NEXT: [[IND_END3:%.*]] = getelementptr i8, ptr [[START]], i64 10000
+; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 10000
; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; CHECK-VF8: vec.epilog.vector.body:
-; CHECK-VF8-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; CHECK-VF8-NEXT: [[TMP16:%.*]] = add i64 [[INDEX7]], 0
-; CHECK-VF8-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP16]]
-; CHECK-VF8-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP8]], i32 0
-; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP17]], align 1
-; CHECK-VF8-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX7]], 8
-; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT9]], 10000
-; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-VF8-NEXT: [[INDEX3:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-VF8-NEXT: [[TMP19:%.*]] = add i64 [[INDEX3]], 0
+; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP19]]
+; CHECK-VF8-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i32 0
+; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP21]], align 1
+; CHECK-VF8-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX3]], 8
+; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 10000
+; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-VF8: vec.epilog.middle.block:
; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK-VF8: vec.epilog.scalar.ph:
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
-; CHECK-VF8-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[IND_END3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
+; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 10000, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-VF8-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START]], [[ITER_CHECK]] ]
; CHECK-VF8-NEXT: br label [[LOOP:%.*]]
; CHECK-VF8: loop:
-; CHECK-VF8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-VF8-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL5]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-VF8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-VF8-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-VF8-NEXT: store i8 0, ptr [[PTR_IV]], align 1
; CHECK-VF8-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 1
; CHECK-VF8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
index cfb0f9e59ecb..8b64d7a08366 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-live-out-pointer-induction.ll
@@ -23,54 +23,54 @@ define ptr @test(ptr %start.1, ptr %start.2, ptr %end) {
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START_1:%.*]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 8
; CHECK-NEXT: [[IND_END3:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP34:%.*]] = mul i64 [[TMP33]], 4
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_1]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 8, [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP11]], 0
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP14]], i64 0
+; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 2
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 2
+; CHECK-NEXT: [[TMP15:%.*]] = mul i64 8, [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP13]], 0
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP16]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP15]]
-; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP16]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP11]], 1
-; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP18]], i64 0
+; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP17]]
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP18]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP13]], 1
+; CHECK-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP20]], i64 0
; CHECK-NEXT: [[DOTSPLAT6:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT6]], [[TMP19]]
-; CHECK-NEXT: [[VECTOR_GEP7:%.*]] = mul <vscale x 2 x i64> [[TMP20]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP7]]
-; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP23]]
-; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2
-; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[TMP25]], 0
-; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], [[TMP26]]
+; CHECK-NEXT: [[TMP21:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT6]], [[TMP21]]
+; CHECK-NEXT: [[VECTOR_GEP7:%.*]] = mul <vscale x 2 x i64> [[TMP22]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 8, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP7]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 2
+; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 0
; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 8
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP31:%.*]] = mul i64 [[TMP30]], 2
-; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i64 [[TMP31]]
-; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP29]], align 8
+; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[OFFSET_IDX]], [[TMP28]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[START_2]], i64 [[TMP29]]
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i64, ptr [[TMP30]], i32 0
+; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP34:%.*]] = mul i64 [[TMP33]], 2
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr i64, ptr [[TMP30]], i64 [[TMP34]]
; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP32]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP34]]
-; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP13]]
-; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: store <vscale x 2 x i64> zeroinitializer, ptr [[TMP35]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: [[CMO:%.*]] = sub i64 [[N_VEC]], 1
-; CHECK-NEXT: [[TMP36:%.*]] = mul i64 [[CMO]], 8
-; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP36]]
+; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[CMO]], 8
+; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP37]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START_1]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
index 1a6e83a61ce7..2acc1ddbffea 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll
@@ -25,21 +25,6 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
-; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 16
-; CHECK-NEXT: [[TMP22:%.*]] = sub i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 4
; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -107,9 +92,9 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[TMP70:%.*]] = mul i64 [[TMP69]], 12
; CHECK-NEXT: [[TMP71:%.*]] = add i64 [[INDEX6]], [[TMP70]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP9]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT11]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP65]], i64 [[TMP14]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP68]], i64 [[TMP19]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP71]], i64 [[TMP24]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT11]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP65]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT12]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP68]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT13]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP71]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP72:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP73:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT11]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP74:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT12]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
@@ -167,21 +152,6 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[UMAX]], [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
-; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[UMAX]], [[TMP11]]
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 16
-; CHECK-NEXT: [[TMP17:%.*]] = sub i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp ugt i64 [[UMAX]], [[TMP16]]
-; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[TMP18]], i64 [[TMP17]], i64 0
-; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 16
-; CHECK-NEXT: [[TMP22:%.*]] = sub i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ugt i64 [[UMAX]], [[TMP21]]
-; CHECK-NEXT: [[TMP24:%.*]] = select i1 [[TMP23]], i64 [[TMP22]], i64 0
; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 4
; CHECK-NEXT: [[INDEX_PART_NEXT:%.*]] = add i64 0, [[TMP26]]
@@ -275,9 +245,9 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias %
; CHECK-NEXT: [[TMP92:%.*]] = mul i64 [[TMP91]], 12
; CHECK-NEXT: [[TMP93:%.*]] = add i64 [[INDEX6]], [[TMP92]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX6]], i64 [[TMP9]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP87]], i64 [[TMP14]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT15]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP90]], i64 [[TMP19]])
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP24]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP87]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT15]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP90]], i64 [[TMP9]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP94:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP95:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT14]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP96:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT15]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 899fcce5c02a..3bab341e1c24 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -19,10 +19,12 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
-; CHECK-NEXT: EMIT ir<%ptr.iv.1> = WIDEN-POINTER-INDUCTION ir<%start.1>, 8
; CHECK-NEXT: EMIT ir<%ptr.iv.2> = WIDEN-POINTER-INDUCTION ir<%start.2>, 1
+; CHECK-NEXT: vp<[[PTR_IDX:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<8>
+; CHECK-NEXT: vp<[[PTR_IDX_STEPS:%.+]]> = SCALAR-STEPS vp<[[PTR_IDX]]>, ir<8>
+; CHECK-NEXT: EMIT vp<[[PTR_IV_1:%.+]]> = ptradd ir<%start.1>, vp<[[PTR_IDX_STEPS]]>
; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr inbounds ir<%ptr.iv.2>, ir<1>
-; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer ir<%ptr.iv.1>
+; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer vp<[[PTR_IV_1]]>
; CHECK-NEXT: WIDEN store vp<[[VEC_PTR]]>, ir<%ptr.iv.2.next>
; CHECK-NEXT: vp<[[VEC_PTR2:%.+]]> = vector-pointer ir<%ptr.iv.2>
; CHECK-NEXT: WIDEN ir<%lv> = load vp<[[VEC_PTR2]]>
@@ -59,9 +61,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 1
@@ -73,6 +72,9 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP12]]
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP13]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, <vscale x 2 x ptr> [[TMP14]], i64 1
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP15]], ptr [[TMP16]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 7226048c478d..126ceac7325a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -32,21 +32,20 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[C]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 5
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[C]], i64 [[TMP8]]
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[TMP10]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <vscale x 8 x i32>, ptr [[NEXT_GEP2]], align 4
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 5
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP9]], i64 [[TMP7]]
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <vscale x 8 x i32>, ptr [[TMP10]], align 4
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC3]])
-; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC4]], 1
+; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.experimental.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC2]])
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 0
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC3]], 1
; CHECK-NEXT: [[TMP15:%.*]] = add nsw <vscale x 4 x i32> [[TMP11]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP16:%.*]] = add nsw <vscale x 4 x i32> [[TMP13]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
@@ -148,21 +147,21 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP8]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl nuw nsw i64 [[TMP9]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i64 [[TMP10]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[NEXT_GEP]], align 4
-; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i64 [[TMP10]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD7]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP13:%.*]] = shl nsw <vscale x 4 x i32> [[WIDE_LOAD5]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP14]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[NEXT_GEP5]], i64 [[TMP15]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[NEXT_GEP5]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP15]]
+; CHECK-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP8]], align 4
; CHECK-NEXT: store <vscale x 4 x i32> [[TMP13]], ptr [[TMP16]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -246,12 +245,12 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = shl <vscale x 2 x i64> [[TMP9]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 2, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP11]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x ptr> [[TMP10]], i64 0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP13]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[NEXT_GEP]], align 8
+; CHECK-NEXT: store <vscale x 2 x ptr> [[TMP10]], ptr [[TMP11]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
index 4957bbeda671..d8f14f30295b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll
@@ -40,9 +40,6 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]])
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP6]])
; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]])
@@ -71,7 +68,7 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6
; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1
; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP7]])
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]])
; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
@@ -129,9 +126,6 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]])
-; INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
-; INTERLEAVE-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP6]])
; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]])
@@ -160,7 +154,7 @@ define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i3
; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1
; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]]
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]])
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP7]])
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]])
; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
@@ -207,7 +201,6 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; INTERLEAVE-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; INTERLEAVE-NEXT: entry:
; INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 2)
-; INTERLEAVE-NEXT: [[TMP1:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 2)
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ne i64 [[N]], 0
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ugt i64 [[N]], 1
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -237,7 +230,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
; INTERLEAVE-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1
; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX]], [[TMP0]]
-; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = icmp ult i64 [[TMP11]], [[TMP1]]
+; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = icmp ult i64 [[TMP11]], [[TMP0]]
; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK_NEXT]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP4:![0-9]+]]
; INTERLEAVE: for.cond.cleanup:
; INTERLEAVE-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
index e9541c1ee035..6516b05ab4ed 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll
@@ -639,87 +639,84 @@ define void @test_gather_not_profitable_pr48429(i32 %d, ptr readonly noalias %pt
; AVX512: vector.ph:
; AVX512-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 16
; AVX512-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
-; AVX512-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 4
-; AVX512-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP13]]
-; AVX512-NEXT: [[TMP14:%.*]] = mul i64 [[N_VEC]], 64
-; AVX512-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP14]]
+; AVX512-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 64
+; AVX512-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP13]]
; AVX512-NEXT: br label [[VECTOR_BODY:%.*]]
; AVX512: vector.body:
; AVX512-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[DEST]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; AVX512-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AVX512-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
-; AVX512-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; AVX512-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP16]]
-; AVX512-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <16 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448, i64 512, i64 576, i64 640, i64 704, i64 768, i64 832, i64 896, i64 960>
-; AVX512-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP19]], align 4, !alias.scope !8
-; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD]], <16 x ptr> [[TMP17]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !11, !noalias !13
-; AVX512-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[NEXT_GEP]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP20]], align 4, !alias.scope !15
-; AVX512-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, <16 x ptr> [[TMP17]], i64 1
-; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD8]], <16 x ptr> [[TMP21]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !11, !noalias !13
+; AVX512-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <16 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448, i64 512, i64 576, i64 640, i64 704, i64 768, i64 832, i64 896, i64 960>
+; AVX512-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; AVX512-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
+; AVX512-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP15]]
+; AVX512-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM]]
+; AVX512-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD:%.*]] = load <16 x float>, ptr [[TMP18]], align 4, !alias.scope [[META8:![0-9]+]]
+; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD]], <16 x ptr> [[TMP14]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]]
+; AVX512-NEXT: [[TMP19:%.*]] = getelementptr float, ptr [[TMP16]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x float>, ptr [[TMP19]], align 4, !alias.scope [[META15:![0-9]+]]
+; AVX512-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, <16 x ptr> [[TMP14]], i64 1
+; AVX512-NEXT: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> [[WIDE_LOAD8]], <16 x ptr> [[TMP20]], i32 4, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META11]], !noalias [[META13]]
; AVX512-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; AVX512-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 1024
-; AVX512-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; AVX512-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; AVX512-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; AVX512-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; AVX512: middle.block:
; AVX512-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; AVX512-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; AVX512: vec.epilog.iter.check:
-; AVX512-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC]], 64
-; AVX512-NEXT: [[IND_END17:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP23]]
-; AVX512-NEXT: [[TMP24:%.*]] = mul i64 [[N_VEC]], 4
-; AVX512-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP24]]
+; AVX512-NEXT: [[TMP22:%.*]] = mul i64 [[N_VEC]], 64
+; AVX512-NEXT: [[IND_END15:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP22]]
+; AVX512-NEXT: [[TMP23:%.*]] = mul i64 [[N_VEC]], 4
+; AVX512-NEXT: [[IND_END12:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP23]]
; AVX512-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8
; AVX512-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
; AVX512: vec.epilog.ph:
-; AVX512-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; AVX512-NEXT: [[BC_RESUME_VAL10:%.*]] = phi ptr [ [[IND_END9]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; AVX512-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
-; AVX512-NEXT: [[N_MOD_VF11:%.*]] = urem i64 [[TMP3]], 8
-; AVX512-NEXT: [[N_VEC12:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF11]]
-; AVX512-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC12]], 4
-; AVX512-NEXT: [[IND_END13:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP25]]
-; AVX512-NEXT: [[TMP26:%.*]] = mul i64 [[N_VEC12]], 64
-; AVX512-NEXT: [[IND_END16:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP26]]
+; AVX512-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[TMP3]], 8
+; AVX512-NEXT: [[N_VEC10:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF9]]
+; AVX512-NEXT: [[TMP24:%.*]] = mul i64 [[N_VEC10]], 4
+; AVX512-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP24]]
+; AVX512-NEXT: [[TMP25:%.*]] = mul i64 [[N_VEC10]], 64
+; AVX512-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP25]]
; AVX512-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; AVX512: vec.epilog.vector.body:
-; AVX512-NEXT: [[POINTER_PHI22:%.*]] = phi ptr [ [[BC_RESUME_VAL10]], [[VEC_EPILOG_PH]] ], [ [[PTR_IND23:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; AVX512-NEXT: [[INDEX20:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT26:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; AVX512-NEXT: [[TMP27:%.*]] = add i64 [[INDEX20]], 0
-; AVX512-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 4
-; AVX512-NEXT: [[NEXT_GEP21:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP28]]
-; AVX512-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[POINTER_PHI22]], <8 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448>
-; AVX512-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP21]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD24:%.*]] = load <8 x float>, ptr [[TMP31]], align 4, !alias.scope !17
-; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD24]], <8 x ptr> [[TMP29]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !20, !noalias !22
-; AVX512-NEXT: [[TMP32:%.*]] = getelementptr float, ptr [[NEXT_GEP21]], i32 0
-; AVX512-NEXT: [[WIDE_LOAD25:%.*]] = load <8 x float>, ptr [[TMP32]], align 4, !alias.scope !24
-; AVX512-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, <8 x ptr> [[TMP29]], i64 1
-; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD25]], <8 x ptr> [[TMP33]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope !20, !noalias !22
-; AVX512-NEXT: [[INDEX_NEXT26]] = add nuw i64 [[INDEX20]], 8
-; AVX512-NEXT: [[PTR_IND23]] = getelementptr i8, ptr [[POINTER_PHI22]], i64 512
-; AVX512-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT26]], [[N_VEC12]]
-; AVX512-NEXT: br i1 [[TMP34]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; AVX512-NEXT: [[POINTER_PHI19:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[PTR_IND20:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; AVX512-NEXT: [[INDEX18:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT24:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; AVX512-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[POINTER_PHI19]], <8 x i64> <i64 0, i64 64, i64 128, i64 192, i64 256, i64 320, i64 384, i64 448>
+; AVX512-NEXT: [[OFFSET_IDX21:%.*]] = mul i64 [[INDEX18]], 4
+; AVX512-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX21]], 0
+; AVX512-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP27]]
+; AVX512-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[IDXPROM]]
+; AVX512-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[TMP29]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD22:%.*]] = load <8 x float>, ptr [[TMP30]], align 4, !alias.scope [[META17:![0-9]+]]
+; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD22]], <8 x ptr> [[TMP26]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META20:![0-9]+]], !noalias [[META22:![0-9]+]]
+; AVX512-NEXT: [[TMP31:%.*]] = getelementptr float, ptr [[TMP28]], i32 0
+; AVX512-NEXT: [[WIDE_LOAD23:%.*]] = load <8 x float>, ptr [[TMP31]], align 4, !alias.scope [[META24:![0-9]+]]
+; AVX512-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, <8 x ptr> [[TMP26]], i64 1
+; AVX512-NEXT: call void @llvm.masked.scatter.v8f32.v8p0(<8 x float> [[WIDE_LOAD23]], <8 x ptr> [[TMP32]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>), !alias.scope [[META20]], !noalias [[META22]]
+; AVX512-NEXT: [[INDEX_NEXT24]] = add nuw i64 [[INDEX18]], 8
+; AVX512-NEXT: [[PTR_IND20]] = getelementptr i8, ptr [[POINTER_PHI19]], i64 512
+; AVX512-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT24]], [[N_VEC10]]
+; AVX512-NEXT: br i1 [[TMP33]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; AVX512: vec.epilog.middle.block:
-; AVX512-NEXT: [[CMP_N19:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC12]]
-; AVX512-NEXT: br i1 [[CMP_N19]], label [[FOR_END]], label [[VEC_EPILOG_SCALAR_PH]]
+; AVX512-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC10]]
+; AVX512-NEXT: br i1 [[CMP_N17]], label [[FOR_END]], label [[VEC_EPILOG_SCALAR_PH]]
; AVX512: vec.epilog.scalar.ph:
-; AVX512-NEXT: [[BC_RESUME_VAL15:%.*]] = phi ptr [ [[IND_END13]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END14]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MEMCHECK]] ], [ [[PTR]], [[ITER_CHECK]] ]
-; AVX512-NEXT: [[BC_RESUME_VAL18:%.*]] = phi ptr [ [[IND_END16]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END17]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MEMCHECK]] ], [ [[DEST]], [[ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL13:%.*]] = phi ptr [ [[IND_END11]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END12]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[PTR]], [[VECTOR_MEMCHECK]] ], [ [[PTR]], [[ITER_CHECK]] ]
+; AVX512-NEXT: [[BC_RESUME_VAL16:%.*]] = phi ptr [ [[IND_END14]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END15]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[DEST]], [[VECTOR_MEMCHECK]] ], [ [[DEST]], [[ITER_CHECK]] ]
; AVX512-NEXT: br label [[FOR_BODY:%.*]]
; AVX512: for.body:
-; AVX512-NEXT: [[PTR_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL15]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; AVX512-NEXT: [[DEST_ADDR_011:%.*]] = phi ptr [ [[BC_RESUME_VAL18]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD_PTR6:%.*]], [[FOR_BODY]] ]
+; AVX512-NEXT: [[PTR_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL13]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; AVX512-NEXT: [[DEST_ADDR_011:%.*]] = phi ptr [ [[BC_RESUME_VAL16]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD_PTR6:%.*]], [[FOR_BODY]] ]
; AVX512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[PTR_ADDR_012]], i64 [[IDXPROM]]
-; AVX512-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; AVX512-NEXT: store float [[TMP35]], ptr [[DEST_ADDR_011]], align 4
-; AVX512-NEXT: [[TMP36:%.*]] = load float, ptr [[PTR_ADDR_012]], align 4
+; AVX512-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; AVX512-NEXT: store float [[TMP34]], ptr [[DEST_ADDR_011]], align 4
+; AVX512-NEXT: [[TMP35:%.*]] = load float, ptr [[PTR_ADDR_012]], align 4
; AVX512-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[DEST_ADDR_011]], i64 1
-; AVX512-NEXT: store float [[TMP36]], ptr [[ARRAYIDX5]], align 4
+; AVX512-NEXT: store float [[TMP35]], ptr [[ARRAYIDX5]], align 4
; AVX512-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[PTR_ADDR_012]], i64 1
; AVX512-NEXT: [[ADD_PTR6]] = getelementptr inbounds float, ptr [[DEST_ADDR_011]], i64 16
; AVX512-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[ADD_PTR]]
@@ -774,30 +771,29 @@ define void @test_gather_not_profitable_pr48429(i32 %d, ptr readonly noalias %pt
; FVW2-NEXT: br label [[VECTOR_BODY:%.*]]
; FVW2: vector.body:
; FVW2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FVW2-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
-; FVW2-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4
-; FVW2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP16]]
-; FVW2-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 0
-; FVW2-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 64
-; FVW2-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP18]]
-; FVW2-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 1
-; FVW2-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 64
-; FVW2-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP20]]
-; FVW2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; FVW2-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
+; FVW2-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
+; FVW2-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP15]]
+; FVW2-NEXT: [[OFFSET_IDX9:%.*]] = mul i64 [[INDEX]], 64
+; FVW2-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX9]], 0
+; FVW2-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX9]], 64
+; FVW2-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP17]]
+; FVW2-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP18]]
+; FVW2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM]]
; FVW2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 0
-; FVW2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP22]], align 4, !alias.scope !8
+; FVW2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP22]], align 4, !alias.scope [[META8:![0-9]+]]
; FVW2-NEXT: [[TMP23:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 0
-; FVW2-NEXT: store float [[TMP23]], ptr [[NEXT_GEP9]], align 4, !alias.scope !11, !noalias !13
+; FVW2-NEXT: store float [[TMP23]], ptr [[TMP19]], align 4, !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]]
; FVW2-NEXT: [[TMP24:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 1
-; FVW2-NEXT: store float [[TMP24]], ptr [[NEXT_GEP10]], align 4, !alias.scope !11, !noalias !13
-; FVW2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[NEXT_GEP]], i32 0
-; FVW2-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x float>, ptr [[TMP25]], align 4, !alias.scope !15
-; FVW2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP9]], i64 1
-; FVW2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[NEXT_GEP10]], i64 1
-; FVW2-NEXT: [[TMP28:%.*]] = extractelement <2 x float> [[WIDE_LOAD11]], i32 0
-; FVW2-NEXT: store float [[TMP28]], ptr [[TMP26]], align 4, !alias.scope !11, !noalias !13
-; FVW2-NEXT: [[TMP29:%.*]] = extractelement <2 x float> [[WIDE_LOAD11]], i32 1
-; FVW2-NEXT: store float [[TMP29]], ptr [[TMP27]], align 4, !alias.scope !11, !noalias !13
+; FVW2-NEXT: store float [[TMP24]], ptr [[TMP20]], align 4, !alias.scope [[META11]], !noalias [[META13]]
+; FVW2-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP16]], i32 0
+; FVW2-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x float>, ptr [[TMP25]], align 4, !alias.scope [[META15:![0-9]+]]
+; FVW2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 1
+; FVW2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 1
+; FVW2-NEXT: [[TMP28:%.*]] = extractelement <2 x float> [[WIDE_LOAD10]], i32 0
+; FVW2-NEXT: store float [[TMP28]], ptr [[TMP26]], align 4, !alias.scope [[META11]], !noalias [[META13]]
+; FVW2-NEXT: [[TMP29:%.*]] = extractelement <2 x float> [[WIDE_LOAD10]], i32 1
+; FVW2-NEXT: store float [[TMP29]], ptr [[TMP27]], align 4, !alias.scope [[META11]], !noalias [[META13]]
; FVW2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; FVW2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; FVW2-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
index 022912f3b855..1b0118e137e7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-opaque-pointers.ll
@@ -25,15 +25,14 @@ define void @test_pr55375_interleave_opaque_ptr(ptr %start, ptr %end) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 16
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x ptr> poison, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> [[TMP9]], ptr [[NEXT_GEP3]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x ptr> [[TMP9]], ptr [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr ptr, ptr [[TMP7]], i32 0
; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x ptr> zeroinitializer, <2 x ptr> [[TMP10]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x ptr> [[TMP12]], <4 x ptr> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
; CHECK-NEXT: store <4 x ptr> [[INTERLEAVED_VEC]], ptr [[TMP11]], align 8
@@ -53,7 +52,7 @@ define void @test_pr55375_interleave_opaque_ptr(ptr %start, ptr %end) {
; CHECK-NEXT: store ptr null, ptr [[IV]], align 8
; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds [[PAIR]], ptr [[IV]], i64 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
index 14acb6f57aa0..3f38abc75a58 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
@@ -29,7 +29,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i64> [[VEC_IND]], <i64 1, i64 1, i64 1, i64 1>
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP0]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[ARR]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP6]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index 51d264820503..dc474fbf67ce 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -273,64 +273,58 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE19:%.*]] ]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT12:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT13:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT12]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT13]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE20:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[OFFSET_IDX8:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT13:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT14:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT13]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT14]], <i64 0, i64 1, i64 2, i64 3>
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP5]]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[NEXT_GEP8]], align 16
-; CHECK-NEXT: store i32 [[TMP7]], ptr [[NEXT_GEP]], align 16
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q:%.*]], i64 [[OFFSET_IDX8]]
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP]], align 16
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
-; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
-; CHECK: pred.store.if14:
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 4
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP9]], align 16
-; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP5]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; CHECK: pred.store.continue15:
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
-; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17:%.*]]
-; CHECK: pred.store.if16:
-; CHECK-NEXT: [[TMP15:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[TMP15]], 8
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP16]]
-; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16
-; CHECK-NEXT: store i32 [[TMP19]], ptr [[NEXT_GEP6]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE17]]
-; CHECK: pred.store.continue17:
-; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
-; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF18:%.*]], label [[PRED_STORE_CONTINUE19]]
-; CHECK: pred.store.if18:
-; CHECK-NEXT: [[TMP21:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = or disjoint i64 [[TMP21]], 12
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP23:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP24:%.*]] = or disjoint i64 [[TMP23]], 12
-; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP24]]
-; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[NEXT_GEP11]], align 16
-; CHECK-NEXT: store i32 [[TMP25]], ptr [[NEXT_GEP7]], align 16
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE19]]
-; CHECK: pred.store.continue19:
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP3]], i64 1
+; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16:%.*]]
+; CHECK: pred.store.if15:
+; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 4
+; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[NEXT_GEP10]], align 16
+; CHECK-NEXT: store i32 [[TMP9]], ptr [[NEXT_GEP5]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
+; CHECK: pred.store.continue16:
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP3]], i64 2
+; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF17:%.*]], label [[PRED_STORE_CONTINUE18:%.*]]
+; CHECK: pred.store.if17:
+; CHECK-NEXT: [[TMP11:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 8
+; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[NEXT_GEP11]], align 16
+; CHECK-NEXT: store i32 [[TMP13]], ptr [[NEXT_GEP6]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE18]]
+; CHECK: pred.store.continue18:
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i64 3
+; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20]]
+; CHECK: pred.store.if19:
+; CHECK-NEXT: [[TMP15:%.*]] = or disjoint i64 [[OFFSET_IDX]], 12
+; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP16:%.*]] = or disjoint i64 [[OFFSET_IDX8]], 12
+; CHECK-NEXT: [[NEXT_GEP12:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[NEXT_GEP12]], align 16
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP7]], align 16
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
+; CHECK: pred.store.continue20:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -410,24 +404,24 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[TMP2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX4]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw <4 x i32> [[TMP3]], <i32 7, i32 7, i32 7, i32 7>
-; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[NEXT_GEP4]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw <4 x i32> [[TMP1]], <i32 7, i32 7, i32 7, i32 7>
+; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP5]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[TMP7:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br i1 true, label [[TMP5:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[TMP6:%.*]]
-; CHECK: 6:
-; CHECK-NEXT: br i1 poison, label [[TMP7]], label [[TMP6]], !llvm.loop [[LOOP11:![0-9]+]]
-; CHECK: 7:
+; CHECK-NEXT: br label [[TMP4:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: 5:
; CHECK-NEXT: ret void
;
br label %1
@@ -457,7 +451,9 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE16:%.*]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE17:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i64 [[INDEX]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[VEC_IV:%.*]] = or disjoint <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
@@ -465,71 +461,63 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst)
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i64 0
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP4]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[NEXT_GEP]], align 2
-; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = shl nuw nsw i32 [[TMP6]], 7
-; CHECK-NEXT: store i32 [[TMP7]], ptr [[NEXT_GEP7]], align 4
+; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST:%.*]], i64 [[OFFSET_IDX7]]
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[NEXT_GEP]], align 2
+; CHECK-NEXT: [[TMP4:%.*]] = zext i16 [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i32 [[TMP4]], 7
+; CHECK-NEXT: store i32 [[TMP5]], ptr [[NEXT_GEP8]], align 4
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
-; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12:%.*]]
-; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[TMP9]], 4
-; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP10]]
-; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = or disjoint i64 [[TMP11]], 2
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP13:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2
-; CHECK-NEXT: [[TMP14:%.*]] = zext i16 [[TMP13]] to i32
-; CHECK-NEXT: [[TMP15:%.*]] = shl nuw nsw i32 [[TMP14]], 7
-; CHECK-NEXT: store i32 [[TMP15]], ptr [[NEXT_GEP8]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
-; CHECK: pred.store.continue12:
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
-; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF13:%.*]], label [[PRED_STORE_CONTINUE14:%.*]]
-; CHECK: pred.store.if13:
-; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP18:%.*]] = or disjoint i64 [[TMP17]], 8
-; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[TMP19]], 4
-; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP5]], align 2
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP1]], i64 1
+; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
+; CHECK: pred.store.if12:
+; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 4
+; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = or disjoint i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[NEXT_GEP4]], align 2
+; CHECK-NEXT: [[TMP10:%.*]] = zext i16 [[TMP9]] to i32
+; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i32 [[TMP10]], 7
+; CHECK-NEXT: store i32 [[TMP11]], ptr [[NEXT_GEP9]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE13]]
+; CHECK: pred.store.continue13:
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i64 2
+; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
+; CHECK: pred.store.if14:
+; CHECK-NEXT: [[TMP13:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 8
+; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[OFFSET_IDX]], 4
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i16, ptr [[NEXT_GEP5]], align 2
+; CHECK-NEXT: [[TMP16:%.*]] = zext i16 [[TMP15]] to i32
+; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i32 [[TMP16]], 7
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[NEXT_GEP10]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE15]]
+; CHECK: pred.store.continue15:
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
+; CHECK-NEXT: br i1 [[TMP18]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17]]
+; CHECK: pred.store.if16:
+; CHECK-NEXT: [[TMP19:%.*]] = or disjoint i64 [[OFFSET_IDX7]], 12
+; CHECK-NEXT: [[NEXT_GEP11:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = or disjoint i64 [[OFFSET_IDX]], 6
+; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[NEXT_GEP6]], align 2
; CHECK-NEXT: [[TMP22:%.*]] = zext i16 [[TMP21]] to i32
; CHECK-NEXT: [[TMP23:%.*]] = shl nuw nsw i32 [[TMP22]], 7
-; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP9]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE14]]
-; CHECK: pred.store.continue14:
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i1> [[TMP1]], i64 3
-; CHECK-NEXT: br i1 [[TMP24]], label [[PRED_STORE_IF15:%.*]], label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.if15:
-; CHECK-NEXT: [[TMP25:%.*]] = shl i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP26:%.*]] = or disjoint i64 [[TMP25]], 12
-; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP26]]
-; CHECK-NEXT: [[TMP27:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP28:%.*]] = or disjoint i64 [[TMP27]], 6
-; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP28]]
-; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[NEXT_GEP6]], align 2
-; CHECK-NEXT: [[TMP30:%.*]] = zext i16 [[TMP29]] to i32
-; CHECK-NEXT: [[TMP31:%.*]] = shl nuw nsw i32 [[TMP30]], 7
-; CHECK-NEXT: store i32 [[TMP31]], ptr [[NEXT_GEP10]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE16]]
-; CHECK: pred.store.continue16:
+; CHECK-NEXT: store i32 [[TMP23]], ptr [[NEXT_GEP11]], align 4
+; CHECK-NEXT: br label [[PRED_STORE_CONTINUE17]]
+; CHECK: pred.store.continue17:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
-; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
+; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br i1 true, label [[TMP34:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br i1 true, label [[TMP26:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: br label [[TMP33:%.*]]
-; CHECK: 33:
-; CHECK-NEXT: br i1 poison, label [[TMP34]], label [[TMP33]], !llvm.loop [[LOOP13:![0-9]+]]
-; CHECK: 34:
+; CHECK-NEXT: br label [[TMP25:%.*]]
+; CHECK: 25:
+; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: 26:
; CHECK-NEXT: ret void
;
br label %1
diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
index cc7c1d8a6188..1e23f02ee2b1 100644
--- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
+++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll
@@ -311,15 +311,12 @@ for.end:
; INTER: vector.body
; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; INTER: %[[I0:.+]] = shl i64 %index, 4
+; INTER: %[[I1:.+]] = or disjoint i64 %[[I0]], 16
+; INTER: %[[I2:.+]] = or disjoint i64 %[[I0]], 32
+; INTER: %[[I3:.+]] = or disjoint i64 %[[I0]], 48
; INTER: %next.gep = getelementptr i8, ptr %a, i64 %[[I0]]
-; INTER: %[[S1:.+]] = shl i64 %index, 4
-; INTER: %[[I1:.+]] = or disjoint i64 %[[S1]], 16
; INTER: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]]
-; INTER: %[[S2:.+]] = shl i64 %index, 4
-; INTER: %[[I2:.+]] = or disjoint i64 %[[S2]], 32
; INTER: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]]
-; INTER: %[[S3:.+]] = shl i64 %index, 4
-; INTER: %[[I3:.+]] = or disjoint i64 %[[S3]], 48
; INTER: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]]
; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
;
@@ -361,15 +358,12 @@ for.end:
; CHECK: vector.body
; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
; CHECK: [[SHL1:%.+]] = shl i64 %index, 4
+; CHECK: %[[I1:.+]] = or disjoint i64 [[SHL1]], 16
+; CHECK: %[[I2:.+]] = or disjoint i64 [[SHL1]], 32
+; CHECK: %[[I3:.+]] = or disjoint i64 [[SHL1]], 48
; CHECK: %next.gep = getelementptr i8, ptr %a, i64 [[SHL1]]
-; CHECK: [[SHL2:%.+]] = shl i64 %index, 4
-; CHECK: %[[I1:.+]] = or disjoint i64 [[SHL2]], 16
; CHECK: %next.gep2 = getelementptr i8, ptr %a, i64 %[[I1]]
-; CHECK: [[SHL3:%.+]] = shl i64 %index, 4
-; CHECK: %[[I2:.+]] = or disjoint i64 [[SHL3]], 32
; CHECK: %next.gep3 = getelementptr i8, ptr %a, i64 %[[I2]]
-; CHECK: [[SHL4:%.+]] = shl i64 %index, 4
-; CHECK: %[[I3:.+]] = or disjoint i64 [[SHL4]], 48
; CHECK: %next.gep4 = getelementptr i8, ptr %a, i64 %[[I3]]
; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
;
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
index b451d4b4e546..0a37e5ea0ca0 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -1537,92 +1537,85 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VEC_PHI9:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP49:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
-; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 3
-; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
-; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 4
-; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP9]]
-; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 5
-; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP11]]
-; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 6
-; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP13]]
-; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 7
-; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 200
-; UNROLL-NO-IC-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP15]]
-; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP20:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP5]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP6]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP22:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP7]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP8]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP16]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP17]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP18]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP19]], align 8
+; UNROLL-NO-IC-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP40:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP41:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP31:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 400
+; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 600
+; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 800
+; UNROLL-NO-IC-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1000
+; UNROLL-NO-IC-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1200
+; UNROLL-NO-IC-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 1400
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP6]]
+; UNROLL-NO-IC-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
+; UNROLL-NO-IC-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP5]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP6]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP7]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP8]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP9]], i64 [[IDXPROM]]
+; UNROLL-NO-IC-NEXT: [[TMP16:%.*]] = load double, ptr [[TMP8]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP9]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = load double, ptr [[TMP10]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP19:%.*]] = load double, ptr [[TMP11]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP20:%.*]] = insertelement <4 x double> poison, double [[TMP16]], i32 0
+; UNROLL-NO-IC-NEXT: [[TMP21:%.*]] = insertelement <4 x double> [[TMP20]], double [[TMP17]], i32 1
+; UNROLL-NO-IC-NEXT: [[TMP22:%.*]] = insertelement <4 x double> [[TMP21]], double [[TMP18]], i32 2
+; UNROLL-NO-IC-NEXT: [[TMP23:%.*]] = insertelement <4 x double> [[TMP22]], double [[TMP19]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP12]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP13]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP14]], align 8
+; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP15]], align 8
; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = insertelement <4 x double> poison, double [[TMP24]], i32 0
; UNROLL-NO-IC-NEXT: [[TMP29:%.*]] = insertelement <4 x double> [[TMP28]], double [[TMP25]], i32 1
; UNROLL-NO-IC-NEXT: [[TMP30:%.*]] = insertelement <4 x double> [[TMP29]], double [[TMP26]], i32 2
-; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = insertelement <4 x double> [[TMP30]], double [[TMP27]], i32 3
-; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP20]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load double, ptr [[TMP21]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load double, ptr [[TMP22]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP23]], align 8
-; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = insertelement <4 x double> poison, double [[TMP32]], i32 0
-; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = insertelement <4 x double> [[TMP36]], double [[TMP33]], i32 1
-; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = insertelement <4 x double> [[TMP37]], double [[TMP34]], i32 2
-; UNROLL-NO-IC-NEXT: [[TMP39]] = insertelement <4 x double> [[TMP38]], double [[TMP35]], i32 3
-; UNROLL-NO-IC-NEXT: [[TMP40:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP31]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = shufflevector <4 x double> [[TMP31]], <4 x double> [[TMP39]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = fmul <4 x double> [[TMP40]], [[TMP31]]
-; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = fmul <4 x double> [[TMP41]], [[TMP39]]
-; UNROLL-NO-IC-NEXT: [[TMP44:%.*]] = fcmp une <4 x double> [[TMP42]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP45:%.*]] = fcmp une <4 x double> [[TMP43]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP46:%.*]] = zext <4 x i1> [[TMP44]] to <4 x i32>
-; UNROLL-NO-IC-NEXT: [[TMP47:%.*]] = zext <4 x i1> [[TMP45]] to <4 x i32>
-; UNROLL-NO-IC-NEXT: [[TMP48]] = add <4 x i32> [[VEC_PHI]], [[TMP46]]
-; UNROLL-NO-IC-NEXT: [[TMP49]] = add <4 x i32> [[VEC_PHI9]], [[TMP47]]
+; UNROLL-NO-IC-NEXT: [[TMP31]] = insertelement <4 x double> [[TMP30]], double [[TMP27]], i32 3
+; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP23]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = shufflevector <4 x double> [[TMP23]], <4 x double> [[TMP31]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = fmul <4 x double> [[TMP32]], [[TMP23]]
+; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = fmul <4 x double> [[TMP33]], [[TMP31]]
+; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = fcmp une <4 x double> [[TMP34]], zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = fcmp une <4 x double> [[TMP35]], zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = zext <4 x i1> [[TMP36]] to <4 x i32>
+; UNROLL-NO-IC-NEXT: [[TMP39:%.*]] = zext <4 x i1> [[TMP37]] to <4 x i32>
+; UNROLL-NO-IC-NEXT: [[TMP40]] = add <4 x i32> [[VEC_PHI]], [[TMP38]]
+; UNROLL-NO-IC-NEXT: [[TMP41]] = add <4 x i32> [[VEC_PHI2]], [[TMP39]]
; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; UNROLL-NO-IC-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; UNROLL-NO-IC-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; UNROLL-NO-IC-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; UNROLL-NO-IC-NEXT: br i1 [[TMP42]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; UNROLL-NO-IC: middle.block:
-; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP49]], [[TMP48]]
-; UNROLL-NO-IC-NEXT: [[TMP51:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP39]], i32 3
+; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP41]], [[TMP40]]
+; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
+; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP31]], i32 3
; UNROLL-NO-IC-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; UNROLL-NO-IC: scalar.ph:
; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; UNROLL-NO-IC-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
-; UNROLL-NO-IC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-IC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP43]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: br label [[FOR_BODY:%.*]]
; UNROLL-NO-IC: for.cond.cleanup:
-; UNROLL-NO-IC-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-IC-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP43]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[A_1_LCSSA]]
; UNROLL-NO-IC: for.body:
; UNROLL-NO-IC-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP52:%.*]], [[FOR_BODY]] ]
+; UNROLL-NO-IC-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP44:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; UNROLL-NO-IC-NEXT: [[TMP52]] = load double, ptr [[ARRAYIDX]], align 8
-; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP52]]
+; UNROLL-NO-IC-NEXT: [[TMP44]] = load double, ptr [[ARRAYIDX]], align 8
+; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP44]]
; UNROLL-NO-IC-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; UNROLL-NO-IC-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; UNROLL-NO-IC-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
@@ -1640,35 +1633,34 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-VF-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-VF: vector.body:
; UNROLL-NO-VF-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VEC_PHI3:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[VECTOR_RECUR:%.*]] = phi double [ [[J:%.*]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; UNROLL-NO-VF-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; UNROLL-NO-VF-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; UNROLL-NO-VF-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; UNROLL-NO-VF-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; UNROLL-NO-VF-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP6:%.*]] = load double, ptr [[TMP4]], align 8
-; UNROLL-NO-VF-NEXT: [[TMP7]] = load double, ptr [[TMP5]], align 8
-; UNROLL-NO-VF-NEXT: [[TMP8:%.*]] = fmul double [[VECTOR_RECUR]], [[TMP6]]
-; UNROLL-NO-VF-NEXT: [[TMP9:%.*]] = fmul double [[TMP6]], [[TMP7]]
-; UNROLL-NO-VF-NEXT: [[TMP10:%.*]] = fcmp une double [[TMP8]], 0.000000e+00
-; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = fcmp une double [[TMP9]], 0.000000e+00
-; UNROLL-NO-VF-NEXT: [[TMP12:%.*]] = zext i1 [[TMP10]] to i32
-; UNROLL-NO-VF-NEXT: [[TMP13:%.*]] = zext i1 [[TMP11]] to i32
-; UNROLL-NO-VF-NEXT: [[TMP14]] = add i32 [[VEC_PHI]], [[TMP12]]
-; UNROLL-NO-VF-NEXT: [[TMP15]] = add i32 [[VEC_PHI3]], [[TMP13]]
+; UNROLL-NO-VF-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[VECTOR_RECUR:%.*]] = phi double [ [[J:%.*]], [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; UNROLL-NO-VF-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; UNROLL-NO-VF-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; UNROLL-NO-VF-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; UNROLL-NO-VF-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; UNROLL-NO-VF-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; UNROLL-NO-VF-NEXT: [[TMP4:%.*]] = load double, ptr [[TMP2]], align 8
+; UNROLL-NO-VF-NEXT: [[TMP5]] = load double, ptr [[TMP3]], align 8
+; UNROLL-NO-VF-NEXT: [[TMP6:%.*]] = fmul double [[VECTOR_RECUR]], [[TMP4]]
+; UNROLL-NO-VF-NEXT: [[TMP7:%.*]] = fmul double [[TMP4]], [[TMP5]]
+; UNROLL-NO-VF-NEXT: [[TMP8:%.*]] = fcmp une double [[TMP6]], 0.000000e+00
+; UNROLL-NO-VF-NEXT: [[TMP9:%.*]] = fcmp une double [[TMP7]], 0.000000e+00
+; UNROLL-NO-VF-NEXT: [[TMP10:%.*]] = zext i1 [[TMP8]] to i32
+; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = zext i1 [[TMP9]] to i32
+; UNROLL-NO-VF-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP10]]
+; UNROLL-NO-VF-NEXT: [[TMP13]] = add i32 [[VEC_PHI2]], [[TMP11]]
; UNROLL-NO-VF-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; UNROLL-NO-VF-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; UNROLL-NO-VF-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; UNROLL-NO-VF-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; UNROLL-NO-VF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; UNROLL-NO-VF: middle.block:
-; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP14]]
+; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP12]]
; UNROLL-NO-VF-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; UNROLL-NO-VF: scalar.ph:
-; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
+; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; UNROLL-NO-VF-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
; UNROLL-NO-VF-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
@@ -1680,10 +1672,10 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; UNROLL-NO-VF-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP17:%.*]], [[FOR_BODY]] ]
+; UNROLL-NO-VF-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP15:%.*]], [[FOR_BODY]] ]
; UNROLL-NO-VF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; UNROLL-NO-VF-NEXT: [[TMP17]] = load double, ptr [[ARRAYIDX]], align 8
-; UNROLL-NO-VF-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP17]]
+; UNROLL-NO-VF-NEXT: [[TMP15]] = load double, ptr [[ARRAYIDX]], align 8
+; UNROLL-NO-VF-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP15]]
; UNROLL-NO-VF-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; UNROLL-NO-VF-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; UNROLL-NO-VF-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
@@ -1702,61 +1694,58 @@ define i32 @PR33613(ptr %b, double %j, i32 %d) {
; SINK-AFTER-NEXT: br label [[VECTOR_BODY:%.*]]
; SINK-AFTER: vector.body:
; SINK-AFTER-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
-; SINK-AFTER-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; SINK-AFTER-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
-; SINK-AFTER-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1
-; SINK-AFTER-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
-; SINK-AFTER-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2
-; SINK-AFTER-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP5]]
-; SINK-AFTER-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 3
-; SINK-AFTER-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 200
-; SINK-AFTER-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP7]]
-; SINK-AFTER-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP12:%.*]] = load double, ptr [[TMP8]], align 8
-; SINK-AFTER-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP9]], align 8
-; SINK-AFTER-NEXT: [[TMP14:%.*]] = load double, ptr [[TMP10]], align 8
-; SINK-AFTER-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP11]], align 8
-; SINK-AFTER-NEXT: [[TMP16:%.*]] = insertelement <4 x double> poison, double [[TMP12]], i32 0
-; SINK-AFTER-NEXT: [[TMP17:%.*]] = insertelement <4 x double> [[TMP16]], double [[TMP13]], i32 1
-; SINK-AFTER-NEXT: [[TMP18:%.*]] = insertelement <4 x double> [[TMP17]], double [[TMP14]], i32 2
-; SINK-AFTER-NEXT: [[TMP19]] = insertelement <4 x double> [[TMP18]], double [[TMP15]], i32 3
-; SINK-AFTER-NEXT: [[TMP20:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP19]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; SINK-AFTER-NEXT: [[TMP21:%.*]] = fmul <4 x double> [[TMP20]], [[TMP19]]
-; SINK-AFTER-NEXT: [[TMP22:%.*]] = fcmp une <4 x double> [[TMP21]], zeroinitializer
-; SINK-AFTER-NEXT: [[TMP23:%.*]] = zext <4 x i1> [[TMP22]] to <4 x i32>
-; SINK-AFTER-NEXT: [[TMP24]] = add <4 x i32> [[VEC_PHI]], [[TMP23]]
+; SINK-AFTER-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
+; SINK-AFTER-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x double> [ [[VECTOR_RECUR_INIT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; SINK-AFTER-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 200
+; SINK-AFTER-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; SINK-AFTER-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 200
+; SINK-AFTER-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 400
+; SINK-AFTER-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 600
+; SINK-AFTER-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]]
+; SINK-AFTER-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP1]]
+; SINK-AFTER-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]]
+; SINK-AFTER-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP3]]
+; SINK-AFTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP2]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP3]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[NEXT_GEP4]], i64 [[IDXPROM]]
+; SINK-AFTER-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8
+; SINK-AFTER-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8
+; SINK-AFTER-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8
+; SINK-AFTER-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8
+; SINK-AFTER-NEXT: [[TMP12:%.*]] = insertelement <4 x double> poison, double [[TMP8]], i32 0
+; SINK-AFTER-NEXT: [[TMP13:%.*]] = insertelement <4 x double> [[TMP12]], double [[TMP9]], i32 1
+; SINK-AFTER-NEXT: [[TMP14:%.*]] = insertelement <4 x double> [[TMP13]], double [[TMP10]], i32 2
+; SINK-AFTER-NEXT: [[TMP15]] = insertelement <4 x double> [[TMP14]], double [[TMP11]], i32 3
+; SINK-AFTER-NEXT: [[TMP16:%.*]] = shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> [[TMP15]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; SINK-AFTER-NEXT: [[TMP17:%.*]] = fmul <4 x double> [[TMP16]], [[TMP15]]
+; SINK-AFTER-NEXT: [[TMP18:%.*]] = fcmp une <4 x double> [[TMP17]], zeroinitializer
+; SINK-AFTER-NEXT: [[TMP19:%.*]] = zext <4 x i1> [[TMP18]] to <4 x i32>
+; SINK-AFTER-NEXT: [[TMP20]] = add <4 x i32> [[VEC_PHI]], [[TMP19]]
; SINK-AFTER-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; SINK-AFTER-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
-; SINK-AFTER-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SINK-AFTER-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10240
+; SINK-AFTER-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; SINK-AFTER: middle.block:
-; SINK-AFTER-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP19]], i32 3
+; SINK-AFTER-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP20]])
+; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x double> [[TMP15]], i32 3
; SINK-AFTER-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; SINK-AFTER: scalar.ph:
; SINK-AFTER-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi double [ [[J]], [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[B]], [[ENTRY]] ]
; SINK-AFTER-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ 10240, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
-; SINK-AFTER-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; SINK-AFTER-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: br label [[FOR_BODY:%.*]]
; SINK-AFTER: for.cond.cleanup:
-; SINK-AFTER-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP26]], [[MIDDLE_BLOCK]] ]
+; SINK-AFTER-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[A_1_LCSSA]]
; SINK-AFTER: for.body:
; SINK-AFTER-NEXT: [[B_ADDR_012:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[I_011:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[A_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_1]], [[FOR_BODY]] ]
-; SINK-AFTER-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP27:%.*]], [[FOR_BODY]] ]
+; SINK-AFTER-NEXT: [[SCALAR_RECUR:%.*]] = phi double [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[TMP23:%.*]], [[FOR_BODY]] ]
; SINK-AFTER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[B_ADDR_012]], i64 [[IDXPROM]]
-; SINK-AFTER-NEXT: [[TMP27]] = load double, ptr [[ARRAYIDX]], align 8
-; SINK-AFTER-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP27]]
+; SINK-AFTER-NEXT: [[TMP23]] = load double, ptr [[ARRAYIDX]], align 8
+; SINK-AFTER-NEXT: [[MUL:%.*]] = fmul double [[SCALAR_RECUR]], [[TMP23]]
; SINK-AFTER-NEXT: [[TOBOOL:%.*]] = fcmp une double [[MUL]], 0.000000e+00
; SINK-AFTER-NEXT: [[INC:%.*]] = zext i1 [[TOBOOL]] to i32
; SINK-AFTER-NEXT: [[A_1]] = add nsw i32 [[A_010]], [[INC]]
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
index 410947704fea..31d862a3438a 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction-unroll.ll
@@ -32,35 +32,36 @@ define void @non_constant_scalar_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
-; STRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP5]]
-; STRIDED-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 1
-; STRIDED-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP7]]
-; STRIDED-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 2
-; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP9]]
-; STRIDED-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 3
-; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], [[TMP1]]
-; STRIDED-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP11]]
; STRIDED-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32
; STRIDED-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]]
-; STRIDED-NEXT: [[TMP12:%.*]] = add i32 [[OFFSET_IDX]], 0
-; STRIDED-NEXT: [[TMP13:%.*]] = add i32 [[OFFSET_IDX]], 1
-; STRIDED-NEXT: [[TMP14:%.*]] = add i32 [[OFFSET_IDX]], 2
-; STRIDED-NEXT: [[TMP15:%.*]] = add i32 [[OFFSET_IDX]], 3
-; STRIDED-NEXT: [[TMP16:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP12]]
-; STRIDED-NEXT: [[TMP17:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP13]]
-; STRIDED-NEXT: [[TMP18:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP14]]
-; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP15]]
-; STRIDED-NEXT: store ptr [[NEXT_GEP]], ptr [[TMP16]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP2]], ptr [[TMP17]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP3]], ptr [[TMP18]], align 4
-; STRIDED-NEXT: store ptr [[NEXT_GEP4]], ptr [[TMP19]], align 4
+; STRIDED-NEXT: [[TMP3:%.*]] = add i32 [[OFFSET_IDX]], 0
+; STRIDED-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX]], 1
+; STRIDED-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 2
+; STRIDED-NEXT: [[TMP6:%.*]] = add i32 [[OFFSET_IDX]], 3
+; STRIDED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], [[TMP1]]
+; STRIDED-NEXT: [[TMP7:%.*]] = mul i64 0, [[TMP1]]
+; STRIDED-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP7]]
+; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP1]]
+; STRIDED-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP9]]
+; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 2, [[TMP1]]
+; STRIDED-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP11]]
+; STRIDED-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP1]]
+; STRIDED-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX2]], [[TMP13]]
+; STRIDED-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr null, i64 [[TMP8]]
+; STRIDED-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr null, i64 [[TMP10]]
+; STRIDED-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr null, i64 [[TMP12]]
+; STRIDED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr null, i64 [[TMP14]]
+; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[TMP3]]
+; STRIDED-NEXT: [[TMP20:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP4]]
+; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP5]]
+; STRIDED-NEXT: [[TMP22:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP6]]
+; STRIDED-NEXT: store ptr [[TMP15]], ptr [[TMP19]], align 4
+; STRIDED-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4
+; STRIDED-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4
+; STRIDED-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 4
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; STRIDED-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967264
-; STRIDED-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967264
+; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; STRIDED: middle.block:
; STRIDED-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; STRIDED: scalar.ph:
@@ -68,13 +69,13 @@ define void @non_constant_scalar_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[ENTRY]] ]
; STRIDED-NEXT: br label [[FOR_COND:%.*]]
; STRIDED: for.cond:
-; STRIDED-NEXT: [[TMP21:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
+; STRIDED-NEXT: [[TMP24:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
; STRIDED-NEXT: [[P_0:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_COND]] ]
; STRIDED-NEXT: [[ADD_PTR]] = getelementptr i8, ptr [[P_0]], i32 [[MUL]]
-; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP21]]
+; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP24]]
; STRIDED-NEXT: store ptr [[P_0]], ptr [[ARRAYIDX]], align 4
-; STRIDED-NEXT: [[INC]] = add i32 [[TMP21]], 1
-; STRIDED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP21]], 0
+; STRIDED-NEXT: [[INC]] = add i32 [[TMP24]], 1
+; STRIDED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP24]], 0
; STRIDED-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END]], label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
; STRIDED: for.end:
; STRIDED-NEXT: ret void
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
index 27f6f7b1cb4e..35037968160c 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
@@ -23,56 +23,53 @@ define void @a(ptr readnone %b) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], -1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 -3
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 0, [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr null, i64 [[TMP2]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 -3
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq <4 x i8> [[REVERSE]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP7]], <i1 true, i1 true, i1 true, i1 true>
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
-; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i8> [[REVERSE]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP6]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
+; CHECK-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP10]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP9]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1
-; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
+; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
; CHECK: pred.store.if5:
-; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], -1
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP13]]
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP14]], align 1
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], -1
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr null, i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP2]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP12]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
; CHECK: pred.store.continue6:
-; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2
-; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
+; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
; CHECK: pred.store.if7:
-; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], -1
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP17]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP18]], align 1
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], -2
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP3]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP15]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
; CHECK: pred.store.continue8:
-; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3
-; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]]
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
+; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.if9:
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], -1
-; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP21]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP4]], i64 -1
-; CHECK-NEXT: store i8 95, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], -3
+; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr null, i64 [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[NEXT_GEP4]], i64 -1
+; CHECK-NEXT: store i8 95, ptr [[TMP18]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.continue10:
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
@@ -86,8 +83,8 @@ define void @a(ptr readnone %b) {
; CHECK: for.body:
; CHECK-NEXT: [[C_05:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[C_05]], i64 -1
-; CHECK-NEXT: [[TMP24:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
-; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP24]], 0
+; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[INCDEC_PTR]], align 1
+; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP20]], 0
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]]
; CHECK: if.then:
; CHECK-NEXT: store i8 95, ptr [[INCDEC_PTR]], align 1
@@ -145,22 +142,22 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 8
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START_1]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 1, i64 2, i64 3>
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, <4 x ptr> [[TMP3]], i64 1
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: store <4 x ptr> [[TMP4]], ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1>
-; CHECK-NEXT: store <4 x i8> [[TMP8]], ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, <4 x ptr> [[TMP1]], i64 1
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: store <4 x ptr> [[TMP3]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: store <4 x i8> [[TMP7]], ptr [[TMP6]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 4
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
index 0c659a550b31..92ca77bc841c 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -92,6 +92,7 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE12:%.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
@@ -102,39 +103,35 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[VEC_IV6]], 14
; CHECK-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP4]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
; CHECK: pred.store.if7:
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
-; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP1]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
; CHECK: pred.store.continue8:
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
; CHECK: pred.store.if9:
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 8
-; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 16
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP6]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP2]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
; CHECK: pred.store.continue10:
; CHECK-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12]]
; CHECK: pred.store.if11:
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 8
-; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 24
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP7]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP3]], align 8
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
; CHECK: pred.store.continue12:
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
index 1dddbfe20a2e..ca9dfdc6f6d2 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-before-execute.ll
@@ -23,11 +23,12 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
-; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[PADD:%.+]]> = ptradd ir<%A>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
-; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VTC]]>
@@ -54,11 +55,12 @@ define void @test_tc_less_than_16(ptr %A, i64 %N) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: EMIT ir<%p.src> = WIDEN-POINTER-INDUCTION ir<%A>, 1
-; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
+; CHECK-NEXT: EMIT vp<[[PADD:%.+]]> = ptradd ir<%A>, vp<[[STEPS]]>
+; CHECK-NEXT: vp<[[VPTR:%.]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VPTR]]>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%l>, ir<10>
-; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer ir<%p.src>
+; CHECK-NEXT: vp<[[VPTR2:%.+]]> = vector-pointer vp<[[PADD]]>
; CHECK-NEXT: WIDEN store vp<[[VPTR2]]>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV:%.+]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-cond ir<true>
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
index 9b9c3e704852..89b3a6da16c1 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
@@ -1113,8 +1113,10 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
-; CHECK-NEXT: EMIT ir<%ptr.iv> = WIDEN-POINTER-INDUCTION ir<%start>, -1
-; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
+; CHECK-NEXT: vp<[[DEV_IV:%.+]]> = DERIVED-IV ir<0> + vp<%3> * ir<-1>
+; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DEV_IV]]>, ir<-1>
+; CHECK-NEXT: EMIT vp<[[PTR_IV:%.+]]> = ptradd ir<%start>, vp<[[STEPS]]>
+; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr inbounds vp<[[PTR_IV]]>, ir<-1>
; CHECK-NEXT: vp<[[VEC_PTR:%.+]]> = vector-pointer (reverse) ir<%ptr.iv.next>
; CHECK-NEXT: WIDEN ir<%l> = load vp<[[VEC_PTR]]>
; CHECK-NEXT: WIDEN ir<%c.1> = icmp eq ir<%l>, ir<0>
@@ -1127,7 +1129,7 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
-; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
+; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr inbounds vp<[[PTR_IV]]>, ir<-1>
; CHECK-NEXT: REPLICATE store ir<95>, ir<%ptr.iv.next>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/NewGVN/pr31483.ll b/llvm/test/Transforms/NewGVN/pr31483.ll
index 0e7461c2612b..82e9a2ab286e 100644
--- a/llvm/test/Transforms/NewGVN/pr31483.ll
+++ b/llvm/test/Transforms/NewGVN/pr31483.ll
@@ -41,7 +41,7 @@ define signext i32 @ham(ptr %arg, ptr %arg1) #0 {
; CHECK: bb22:
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb23:
-; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[TMP]])
; CHECK-NEXT: ret i32 undef
;
bb:
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index 73bcee5fb74f..36bcda4c43ca 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -49,7 +49,7 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER16]]
-; CHECK: while.body.preheader16:
+; CHECK: while.body.preheader18:
; CHECK-NEXT: [[BLKCNT_06_PH:%.*]] = phi i32 [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[PSRCA_ADDR_05_PH:%.*]] = phi ptr [ [[PSRCA]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END7]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[PDST_ADDR_04_PH:%.*]] = phi ptr [ [[PDST]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END9]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
index 8f1c52c59163..708cdc9ca45e 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/merge-functions.ll
@@ -14,8 +14,7 @@ define i1 @test1(i32 %c) {
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 20
; CHECK-NEXT: [[SWITCH_CAST:%.*]] = trunc i32 [[SWITCH_TABLEIDX]] to i20
; CHECK-NEXT: [[SWITCH_DOWNSHIFT:%.*]] = lshr i20 -490991, [[SWITCH_CAST]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i20 [[SWITCH_DOWNSHIFT]], 1
-; CHECK-NEXT: [[SWITCH_MASKED:%.*]] = icmp ne i20 [[TMP1]], 0
+; CHECK-NEXT: [[SWITCH_MASKED:%.*]] = trunc i20 [[SWITCH_DOWNSHIFT]] to i1
; CHECK-NEXT: [[I_0:%.*]] = select i1 [[TMP0]], i1 [[SWITCH_MASKED]], i1 false
; CHECK-NEXT: ret i1 [[I_0]]
;
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
index e61b254b7a5f..495ec0a63399 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr67803.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK
-; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK
-; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512
+; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s
+; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s
+; RUN: opt < %s -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: @PR67803(
@@ -11,16 +11,14 @@ define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i32> [[TMP0]], [[TMP1]]
; CHECK-NEXT: [[CMP_I21:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[SEXT_I22:%.*]] = sext <4 x i1> [[CMP_I21]] to <4 x i32>
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[SEXT_I22]] to <2 x i64>
; CHECK-NEXT: [[CMP_I:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[SEXT_I]] to <2 x i64>
-; CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[SEXT_I22]], <4 x i32> [[SEXT_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <32 x i8> [[TMP5]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <32 x i8> [[TMP7]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i64> [[SHUFFLE_I]] to <32 x i8>
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <32 x i8> [[TMP9]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP11:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP6]], <16 x i8> [[TMP8]], <16 x i8> [[TMP10]])
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
@@ -28,42 +26,13 @@ define <4 x i64> @PR67803(<4 x i64> %x, <4 x i64> %y, <4 x i64> %a, <4 x i64> %b
; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <32 x i8> [[TMP13]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP15:%.*]] = bitcast <4 x i64> [[B]] to <32 x i8>
; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <32 x i8> [[TMP15]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT: [[TMP17:%.*]] = bitcast <4 x i64> [[SHUFFLE_I]] to <32 x i8>
+; CHECK-NEXT: [[TMP17:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <32 x i8> [[TMP17]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP19:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP14]], <16 x i8> [[TMP16]], <16 x i8> [[TMP18]])
; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <2 x i64>
; CHECK-NEXT: [[SHUFFLE_I23:%.*]] = shufflevector <2 x i64> [[TMP12]], <2 x i64> [[TMP20]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: ret <4 x i64> [[SHUFFLE_I23]]
;
-; AVX512-LABEL: @PR67803(
-; AVX512-NEXT: entry:
-; AVX512-NEXT: [[TMP0:%.*]] = bitcast <4 x i64> [[X:%.*]] to <8 x i32>
-; AVX512-NEXT: [[TMP1:%.*]] = bitcast <4 x i64> [[Y:%.*]] to <8 x i32>
-; AVX512-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i32> [[TMP0]], [[TMP1]]
-; AVX512-NEXT: [[CMP_I21:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; AVX512-NEXT: [[SEXT_I22:%.*]] = sext <4 x i1> [[CMP_I21]] to <4 x i32>
-; AVX512-NEXT: [[CMP_I:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
-; AVX512-NEXT: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
-; AVX512-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[SEXT_I22]], <4 x i32> [[SEXT_I]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; AVX512-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
-; AVX512-NEXT: [[TMP5:%.*]] = shufflevector <32 x i8> [[TMP4]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; AVX512-NEXT: [[TMP6:%.*]] = bitcast <4 x i64> [[B:%.*]] to <32 x i8>
-; AVX512-NEXT: [[TMP7:%.*]] = shufflevector <32 x i8> [[TMP6]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; AVX512-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; AVX512-NEXT: [[TMP9:%.*]] = shufflevector <32 x i8> [[TMP8]], <32 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; AVX512-NEXT: [[TMP10:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP5]], <16 x i8> [[TMP7]], <16 x i8> [[TMP9]])
-; AVX512-NEXT: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP10]] to <2 x i64>
-; AVX512-NEXT: [[TMP12:%.*]] = bitcast <4 x i64> [[A]] to <32 x i8>
-; AVX512-NEXT: [[TMP13:%.*]] = shufflevector <32 x i8> [[TMP12]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; AVX512-NEXT: [[TMP14:%.*]] = bitcast <4 x i64> [[B]] to <32 x i8>
-; AVX512-NEXT: [[TMP15:%.*]] = shufflevector <32 x i8> [[TMP14]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; AVX512-NEXT: [[TMP16:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; AVX512-NEXT: [[TMP17:%.*]] = shufflevector <32 x i8> [[TMP16]], <32 x i8> poison, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; AVX512-NEXT: [[TMP18:%.*]] = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[TMP13]], <16 x i8> [[TMP15]], <16 x i8> [[TMP17]])
-; AVX512-NEXT: [[TMP19:%.*]] = bitcast <16 x i8> [[TMP18]] to <2 x i64>
-; AVX512-NEXT: [[SHUFFLE_I23:%.*]] = shufflevector <2 x i64> [[TMP11]], <2 x i64> [[TMP19]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; AVX512-NEXT: ret <4 x i64> [[SHUFFLE_I23]]
-;
entry:
%0 = bitcast <4 x i64> %x to <8 x i32>
%extract = shufflevector <8 x i32> %0, <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/llvm/test/Transforms/Reassociate/vaarg_movable.ll b/llvm/test/Transforms/Reassociate/vaarg_movable.ll
index 337877a54a90..4e45b219fccd 100644
--- a/llvm/test/Transforms/Reassociate/vaarg_movable.ll
+++ b/llvm/test/Transforms/Reassociate/vaarg_movable.ll
@@ -10,13 +10,13 @@ define i32 @func(i32 %dummy, ...) {
;
; CHECK-LABEL: @func(
; CHECK-NEXT: [[VARARGS:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.va_start(ptr [[VARARGS]])
+; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VARARGS]])
; CHECK-NEXT: [[V0:%.*]] = va_arg ptr [[VARARGS]], i32
; CHECK-NEXT: [[V1:%.*]] = va_arg ptr [[VARARGS]], i32
; CHECK-NEXT: [[V0_NEG:%.*]] = sub i32 0, [[V0]]
; CHECK-NEXT: [[SUB:%.*]] = add i32 [[V0_NEG]], 1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[V1]]
-; CHECK-NEXT: call void @llvm.va_end(ptr [[VARARGS]])
+; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VARARGS]])
; CHECK-NEXT: ret i32 [[ADD]]
;
%varargs = alloca ptr, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll
new file mode 100644
index 000000000000..436fba3261d6
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/init-ext-node-not-truncable.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s -slp-threshold=-5 | FileCheck %s
+
+@h = global [16 x i64] zeroinitializer
+
+define void @test() {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store <2 x i64> <i64 -1, i64 0>, ptr @h, align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %sext.0 = sext i8 0 to i32
+ %sext.1 = sext i8 0 to i32
+
+ %lshr.0 = lshr i32 0, %sext.0
+ %lshr.1 = lshr i32 0, %sext.1
+
+ %or.0 = or i32 %lshr.0, -1
+ %or.1 = or i32 %lshr.1, 0
+
+ %zext.0 = zext i32 %or.0 to i64
+ %zext.1 = zext i32 %or.1 to i64
+
+ store i64 %zext.0, ptr @h, align 8
+ store i64 %zext.1, ptr getelementptr inbounds ([16 x i64], ptr @h, i64 0, i64 1), align 8
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll
new file mode 100644
index 000000000000..6388cc2dedc7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/partial-vec-invalid-cost.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=slp-vectorizer -S | FileCheck %s
+
+target triple = "riscv64-unknown-linux-gnu"
+
+define void @partial_vec_invalid_cost() #0 {
+; CHECK-LABEL: define void @partial_vec_invalid_cost(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> zeroinitializer)
+; CHECK-NEXT: [[OP_RDX3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT: [[STORE_THIS:%.*]] = zext i32 [[OP_RDX3]] to i96
+; CHECK-NEXT: store i96 [[STORE_THIS]], ptr null, align 16
+; CHECK-NEXT: ret void
+;
+entry:
+
+ %lshr.1 = lshr i96 0, 0 ; These ops
+ %lshr.2 = lshr i96 0, 0 ; return an
+ %add.0 = add i96 0, 0 ; invalid
+ %add.1 = add i96 0, 0 ; vector cost.
+
+ %trunc.i96.1 = trunc i96 %lshr.1 to i32 ; These ops
+ %trunc.i96.2 = trunc i96 %lshr.2 to i32 ; return an
+ %trunc.i96.3 = trunc i96 %add.0 to i32 ; invalid
+ %trunc.i96.4 = trunc i96 %add.1 to i32 ; vector cost.
+
+ %or.0 = or i32 %trunc.i96.1, %trunc.i96.2
+ %or.1 = or i32 %or.0, %trunc.i96.3
+ %or.2 = or i32 %or.1, %trunc.i96.4
+
+ %zext.0 = zext i1 0 to i32 ; These
+ %zext.1 = zext i1 0 to i32 ; ops
+ %zext.2 = zext i1 0 to i32 ; are
+ %zext.3 = zext i1 0 to i32 ; vectorized
+
+ %or.3 = or i32 %or.2, %zext.0 ; users
+ %or.4 = or i32 %or.3, %zext.1 ; of
+ %or.5 = or i32 %or.4, %zext.2 ; vectorized
+ %or.6 = or i32 %or.5, %zext.3 ; ops
+
+ %store.this = zext i32 %or.6 to i96
+
+ store i96 %store.this, ptr null, align 16
+ ret void
+}
+
+attributes #0 = { "target-features"="+v" }
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll
new file mode 100644
index 000000000000..7771e8369b61
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reduction-extension-after-bitwidth.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=riscv64-unknown-linux-gnu -mattr="+v" --passes=slp-vectorizer < %s | FileCheck %s
+
+define i32 @test(ptr %0, ptr %1) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> <i8 1, i8 1, i8 1, i8 1>)
+; CHECK-NEXT: [[TMP3:%.*]] = sext i8 [[TMP2]] to i32
+; CHECK-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP3]], [[LOAD_5]]
+; CHECK-NEXT: ret i32 [[OP_RDX]]
+;
+entry:
+ %zext.0 = zext i8 1 to i32
+ %zext.1 = zext i8 1 to i32
+ %zext.2 = zext i8 1 to i32
+ %zext.3 = zext i8 1 to i32
+ %select.zext.0 = select i1 false, i32 -1, i32 %zext.0
+ %select.zext.1 = select i1 false, i32 0, i32 %zext.1
+ %select.zext.2 = select i1 false, i32 0, i32 %zext.2
+ %select.zext.3 = select i1 false, i32 0, i32 %zext.3
+
+ %load.5 = load i32, ptr %1, align 4
+
+ %and.0 = and i32 %load.5, %select.zext.0
+ %and.1 = and i32 %and.0, %select.zext.1
+ %and.2 = and i32 %and.1, %select.zext.2
+ %and.3 = and i32 %and.2, %select.zext.3
+
+ ret i32 %and.3
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll
new file mode 100644
index 000000000000..26f3fcae3a33
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/small-tree-not-schedulable-bv-node.ll
@@ -0,0 +1,263 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=riscv64-unknown-linux-gnu -slp-threshold=-100 -mattr=+v < %s | FileCheck %s
+
+define void @test1() personality ptr null {
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr null {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CALL33:%.*]] = invoke ptr null(i64 0, ptr null)
+; CHECK-NEXT: to label [[INVOKE_CONT32:%.*]] unwind label [[LPAD31_LOOPEXIT:%.*]]
+; CHECK: invoke.cont32:
+; CHECK-NEXT: invoke void null(ptr null, ptr null)
+; CHECK-NEXT: to label [[INVOKE_CONT37:%.*]] unwind label [[LPAD34_LOOPEXIT:%.*]]
+; CHECK: invoke.cont37:
+; CHECK-NEXT: unreachable
+; CHECK: lpad31.loopexit:
+; CHECK-NEXT: [[LPAD_LOOPEXIT:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[EHCLEANUP47:%.*]]
+; CHECK: lpad34.loopexit:
+; CHECK-NEXT: [[DOTLCSSA101:%.*]] = phi ptr [ null, [[INVOKE_CONT32]] ]
+; CHECK-NEXT: [[CALL33_LCSSA96:%.*]] = phi ptr [ [[CALL33]], [[INVOKE_CONT32]] ]
+; CHECK-NEXT: [[LPAD_LOOPEXIT56:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[LPAD34_BODY:%.*]]
+; CHECK: lpad34.loopexit.split-lp:
+; CHECK-NEXT: [[LPAD_LOOPEXIT_SPLIT_LP57:%.*]] = landingpad { ptr, i32 }
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: br label [[LPAD34_BODY]]
+; CHECK: lpad34.body:
+; CHECK-NEXT: [[TMP0:%.*]] = phi ptr [ [[DOTLCSSA101]], [[LPAD34_LOOPEXIT]] ], [ null, [[LPAD34_LOOPEXIT_SPLIT_LP:%.*]] ]
+; CHECK-NEXT: [[CALL3399:%.*]] = phi ptr [ [[CALL33_LCSSA96]], [[LPAD34_LOOPEXIT]] ], [ null, [[LPAD34_LOOPEXIT_SPLIT_LP]] ]
+; CHECK-NEXT: br label [[EHCLEANUP47]]
+; CHECK: ehcleanup47:
+; CHECK-NEXT: resume { ptr, i32 } zeroinitializer
+;
+entry:
+ %call33 = invoke ptr null(i64 0, ptr null)
+ to label %invoke.cont32 unwind label %lpad31.loopexit
+
+invoke.cont32:
+ invoke void null(ptr null, ptr null)
+ to label %invoke.cont37 unwind label %lpad34.loopexit
+
+invoke.cont37:
+ unreachable
+
+lpad31.loopexit:
+ %lpad.loopexit = landingpad { ptr, i32 }
+ cleanup
+ br label %ehcleanup47
+
+lpad34.loopexit:
+ %.lcssa101 = phi ptr [ null, %invoke.cont32 ]
+ %call33.lcssa96 = phi ptr [ %call33, %invoke.cont32 ]
+ %lpad.loopexit56 = landingpad { ptr, i32 }
+ cleanup
+ br label %lpad34.body
+
+lpad34.loopexit.split-lp:
+ %lpad.loopexit.split-lp57 = landingpad { ptr, i32 }
+ cleanup
+ br label %lpad34.body
+
+lpad34.body:
+ %0 = phi ptr [ %.lcssa101, %lpad34.loopexit ], [ null, %lpad34.loopexit.split-lp ]
+ %call3399 = phi ptr [ %call33.lcssa96, %lpad34.loopexit ], [ null, %lpad34.loopexit.split-lp ]
+ br label %ehcleanup47
+
+ehcleanup47:
+ resume { ptr, i32 } zeroinitializer
+}
+
+define i32 @test2(i64 %idx.ext.i48.pre-phi) {
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: i64 [[IDX_EXT_I48_PRE_PHI:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[DO_ACTION:%.*]]
+; CHECK: do_action:
+; CHECK-NEXT: switch i32 0, label [[SW_DEFAULT:%.*]] [
+; CHECK-NEXT: i32 1, label [[CLEANUP185:%.*]]
+; CHECK-NEXT: i32 2, label [[CLEANUP185]]
+; CHECK-NEXT: i32 0, label [[CLEANUP185]]
+; CHECK-NEXT: i32 4, label [[CLEANUP185]]
+; CHECK-NEXT: i32 5, label [[CLEANUP185]]
+; CHECK-NEXT: i32 6, label [[CLEANUP185]]
+; CHECK-NEXT: i32 7, label [[CLEANUP185]]
+; CHECK-NEXT: i32 8, label [[CLEANUP185]]
+; CHECK-NEXT: i32 9, label [[CLEANUP185]]
+; CHECK-NEXT: i32 10, label [[CLEANUP185]]
+; CHECK-NEXT: i32 11, label [[CLEANUP185]]
+; CHECK-NEXT: i32 12, label [[CLEANUP185]]
+; CHECK-NEXT: i32 13, label [[CLEANUP185]]
+; CHECK-NEXT: i32 14, label [[CLEANUP185]]
+; CHECK-NEXT: i32 15, label [[CLEANUP185]]
+; CHECK-NEXT: i32 16, label [[CLEANUP185]]
+; CHECK-NEXT: i32 17, label [[CLEANUP185]]
+; CHECK-NEXT: i32 18, label [[CLEANUP185]]
+; CHECK-NEXT: i32 19, label [[CLEANUP185]]
+; CHECK-NEXT: i32 20, label [[CLEANUP185]]
+; CHECK-NEXT: i32 21, label [[CLEANUP185]]
+; CHECK-NEXT: i32 22, label [[CLEANUP185]]
+; CHECK-NEXT: i32 23, label [[CLEANUP185]]
+; CHECK-NEXT: i32 24, label [[CLEANUP185]]
+; CHECK-NEXT: i32 25, label [[CLEANUP185]]
+; CHECK-NEXT: i32 26, label [[CLEANUP185]]
+; CHECK-NEXT: i32 27, label [[CLEANUP185]]
+; CHECK-NEXT: i32 28, label [[CLEANUP185]]
+; CHECK-NEXT: i32 29, label [[CLEANUP185]]
+; CHECK-NEXT: i32 30, label [[CLEANUP185]]
+; CHECK-NEXT: i32 31, label [[CLEANUP185]]
+; CHECK-NEXT: i32 32, label [[CLEANUP185]]
+; CHECK-NEXT: i32 33, label [[CLEANUP185]]
+; CHECK-NEXT: i32 34, label [[CLEANUP185]]
+; CHECK-NEXT: i32 35, label [[CLEANUP185]]
+; CHECK-NEXT: i32 36, label [[CLEANUP185]]
+; CHECK-NEXT: i32 37, label [[CLEANUP185]]
+; CHECK-NEXT: i32 38, label [[CLEANUP185]]
+; CHECK-NEXT: i32 39, label [[CLEANUP185]]
+; CHECK-NEXT: i32 40, label [[CLEANUP185]]
+; CHECK-NEXT: i32 41, label [[CLEANUP185]]
+; CHECK-NEXT: i32 42, label [[CLEANUP185]]
+; CHECK-NEXT: i32 43, label [[CLEANUP185]]
+; CHECK-NEXT: i32 44, label [[CLEANUP185]]
+; CHECK-NEXT: i32 45, label [[CLEANUP185]]
+; CHECK-NEXT: i32 46, label [[CLEANUP185]]
+; CHECK-NEXT: i32 47, label [[CLEANUP185]]
+; CHECK-NEXT: i32 48, label [[CLEANUP185]]
+; CHECK-NEXT: i32 49, label [[CLEANUP185]]
+; CHECK-NEXT: i32 50, label [[CLEANUP185]]
+; CHECK-NEXT: i32 51, label [[CLEANUP185]]
+; CHECK-NEXT: i32 52, label [[CLEANUP185]]
+; CHECK-NEXT: i32 53, label [[CLEANUP185]]
+; CHECK-NEXT: i32 54, label [[CLEANUP185]]
+; CHECK-NEXT: i32 55, label [[CLEANUP185]]
+; CHECK-NEXT: i32 56, label [[CLEANUP185]]
+; CHECK-NEXT: i32 57, label [[DO_ACTION]]
+; CHECK-NEXT: i32 58, label [[CLEANUP185]]
+; CHECK-NEXT: i32 59, label [[CLEANUP185]]
+; CHECK-NEXT: i32 60, label [[DO_ACTION]]
+; CHECK-NEXT: i32 61, label [[DO_ACTION]]
+; CHECK-NEXT: i32 62, label [[CLEANUP185]]
+; CHECK-NEXT: i32 70, label [[SW_BB175:%.*]]
+; CHECK-NEXT: i32 64, label [[CLEANUP185]]
+; CHECK-NEXT: i32 65, label [[DO_ACTION]]
+; CHECK-NEXT: i32 66, label [[DO_ACTION]]
+; CHECK-NEXT: i32 67, label [[CLEANUP185]]
+; CHECK-NEXT: i32 72, label [[CLEANUP185]]
+; CHECK-NEXT: i32 69, label [[DO_ACTION]]
+; CHECK-NEXT: i32 71, label [[CLEANUP185]]
+; CHECK-NEXT: ]
+; CHECK: yy_get_previous_state.exit.loopexit:
+; CHECK-NEXT: br label [[YY_FIND_ACTION_BACKEDGE:%.*]]
+; CHECK: yy_find_action.backedge:
+; CHECK-NEXT: [[YY_BP_1_BE:%.*]] = phi ptr [ [[ADD_PTR_I49:%.*]], [[SW_BB175]] ], [ null, [[YY_GET_PREVIOUS_STATE_EXIT_LOOPEXIT:%.*]] ]
+; CHECK-NEXT: [[YY_CP_2_BE:%.*]] = phi ptr [ [[ARRAYIDX178:%.*]], [[SW_BB175]] ], [ null, [[YY_GET_PREVIOUS_STATE_EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: br label [[DO_ACTION]]
+; CHECK: sw.bb175:
+; CHECK-NEXT: [[ARRAYIDX178]] = getelementptr i8, ptr null, i64 0
+; CHECK-NEXT: [[ADD_PTR_I49]] = getelementptr i8, ptr null, i64 [[IDX_EXT_I48_PRE_PHI]]
+; CHECK-NEXT: [[CMP5_I50:%.*]] = icmp ult ptr [[ADD_PTR_I49]], [[ARRAYIDX178]]
+; CHECK-NEXT: br label [[YY_FIND_ACTION_BACKEDGE]]
+; CHECK: sw.default:
+; CHECK-NEXT: unreachable
+; CHECK: cleanup185:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br label %do_action
+
+do_action:
+ switch i32 0, label %sw.default [
+ i32 1, label %cleanup185
+ i32 2, label %cleanup185
+ i32 0, label %cleanup185
+ i32 4, label %cleanup185
+ i32 5, label %cleanup185
+ i32 6, label %cleanup185
+ i32 7, label %cleanup185
+ i32 8, label %cleanup185
+ i32 9, label %cleanup185
+ i32 10, label %cleanup185
+ i32 11, label %cleanup185
+ i32 12, label %cleanup185
+ i32 13, label %cleanup185
+ i32 14, label %cleanup185
+ i32 15, label %cleanup185
+ i32 16, label %cleanup185
+ i32 17, label %cleanup185
+ i32 18, label %cleanup185
+ i32 19, label %cleanup185
+ i32 20, label %cleanup185
+ i32 21, label %cleanup185
+ i32 22, label %cleanup185
+ i32 23, label %cleanup185
+ i32 24, label %cleanup185
+ i32 25, label %cleanup185
+ i32 26, label %cleanup185
+ i32 27, label %cleanup185
+ i32 28, label %cleanup185
+ i32 29, label %cleanup185
+ i32 30, label %cleanup185
+ i32 31, label %cleanup185
+ i32 32, label %cleanup185
+ i32 33, label %cleanup185
+ i32 34, label %cleanup185
+ i32 35, label %cleanup185
+ i32 36, label %cleanup185
+ i32 37, label %cleanup185
+ i32 38, label %cleanup185
+ i32 39, label %cleanup185
+ i32 40, label %cleanup185
+ i32 41, label %cleanup185
+ i32 42, label %cleanup185
+ i32 43, label %cleanup185
+ i32 44, label %cleanup185
+ i32 45, label %cleanup185
+ i32 46, label %cleanup185
+ i32 47, label %cleanup185
+ i32 48, label %cleanup185
+ i32 49, label %cleanup185
+ i32 50, label %cleanup185
+ i32 51, label %cleanup185
+ i32 52, label %cleanup185
+ i32 53, label %cleanup185
+ i32 54, label %cleanup185
+ i32 55, label %cleanup185
+ i32 56, label %cleanup185
+ i32 57, label %do_action
+ i32 58, label %cleanup185
+ i32 59, label %cleanup185
+ i32 60, label %do_action
+ i32 61, label %do_action
+ i32 62, label %cleanup185
+ i32 70, label %sw.bb175
+ i32 64, label %cleanup185
+ i32 65, label %do_action
+ i32 66, label %do_action
+ i32 67, label %cleanup185
+ i32 72, label %cleanup185
+ i32 69, label %do_action
+ i32 71, label %cleanup185
+ ]
+
+yy_get_previous_state.exit.loopexit:
+ br label %yy_find_action.backedge
+
+yy_find_action.backedge:
+ %yy_bp.1.be = phi ptr [ %add.ptr.i49, %sw.bb175 ], [ null, %yy_get_previous_state.exit.loopexit ]
+ %yy_cp.2.be = phi ptr [ %arrayidx178, %sw.bb175 ], [ null, %yy_get_previous_state.exit.loopexit ]
+ br label %do_action
+
+sw.bb175:
+ %arrayidx178 = getelementptr i8, ptr null, i64 0
+ %add.ptr.i49 = getelementptr i8, ptr null, i64 %idx.ext.i48.pre-phi
+ %cmp5.i50 = icmp ult ptr %add.ptr.i49, %arrayidx178
+ br label %yy_find_action.backedge
+
+sw.default:
+ unreachable
+
+cleanup185:
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll
new file mode 100644
index 000000000000..2d69c7c984dc
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/trunc-to-large-than-bw.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr="+v" < %s | FileCheck %s
+
+@c = global [12 x i64] zeroinitializer
+
+define i32 @test() {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i64(ptr align 8 @c, i64 24, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, i32 4)
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[TMP0]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], <i32 65535, i32 65535, i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i32> [[TMP2]], <i32 65535, i32 65535, i32 65535, i32 65535>
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP3]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.umax.i32(i32 [[TMP5]], i32 1)
+; CHECK-NEXT: ret i32 [[TMP6]]
+;
+entry:
+ %0 = load i64, ptr @c, align 8
+ %conv = trunc i64 %0 to i32
+ %conv3 = and i32 %conv, 65535
+ %conv4 = xor i32 %conv3, 65535
+ %.conv4 = tail call i32 @llvm.umax.i32(i32 1, i32 %conv4)
+ %1 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 3), align 8
+ %conv.1 = trunc i64 %1 to i32
+ %conv3.1 = and i32 %conv.1, 65535
+ %conv4.1 = xor i32 %conv3.1, 65535
+ %.conv4.1 = tail call i32 @llvm.umax.i32(i32 %.conv4, i32 %conv4.1)
+ %2 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 6), align 8
+ %conv.2 = trunc i64 %2 to i32
+ %conv3.2 = and i32 %conv.2, 65535
+ %conv4.2 = xor i32 %conv3.2, 65535
+ %.conv4.2 = tail call i32 @llvm.umax.i32(i32 %.conv4.1, i32 %conv4.2)
+ %3 = load i64, ptr getelementptr inbounds ([12 x i64], ptr @c, i64 0, i64 9), align 8
+ %conv.3 = trunc i64 %3 to i32
+ %conv3.3 = and i32 %conv.3, 65535
+ %conv4.3 = xor i32 %conv3.3, 65535
+ %.conv4.3 = tail call i32 @llvm.umax.i32(i32 %.conv4.2, i32 %conv4.3)
+ ret i32 %.conv4.3
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll b/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll
new file mode 100644
index 000000000000..7b4e2b0ce911
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/SystemZ/minbitwidth-root-trunc.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=systemz -mcpu=z15 %s | FileCheck %s
+
+define void @test(ptr %a, i8 %0, i16 %b.promoted.i) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[TMP0:%.*]], i16 [[B_PROMOTED_I:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0]] to i128
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> poison, i16 [[B_PROMOTED_I]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[TMP3]], <4 x i16> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i128> poison, i128 [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i128> [[TMP5]], <4 x i128> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = trunc <4 x i128> [[TMP6]] to <4 x i16>
+; CHECK-NEXT: [[TMP8:%.*]] = or <4 x i16> [[TMP4]], [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i16 [[TMP9]] to i64
+; CHECK-NEXT: [[OP_RDX:%.*]] = and i64 [[TMP11]], 1
+; CHECK-NEXT: store i64 [[OP_RDX]], ptr [[A]], align 8
+; CHECK-NEXT: ret void
+;
+ %2 = zext i8 %0 to i128
+ %3 = zext i16 %b.promoted.i to i128
+ %4 = or i128 %3, %2
+ %5 = trunc i128 %4 to i64
+ %6 = and i64 %5, 1
+ %7 = zext i16 %b.promoted.i to i128
+ %8 = or i128 %7, %2
+ %9 = trunc i128 %8 to i64
+ %10 = and i64 %6, %9
+ %11 = zext i16 %b.promoted.i to i128
+ %12 = or i128 %11, %2
+ %13 = trunc i128 %12 to i64
+ %14 = and i64 %10, %13
+ %15 = zext i16 %b.promoted.i to i128
+ %16 = or i128 %15, %2
+ %17 = trunc i128 %16 to i64
+ %18 = and i64 %14, %17
+ store i64 %18, ptr %a, align 8
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll
new file mode 100644
index 000000000000..27c9655f94d3
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/call-arg-reduced-by-minbitwidth.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-pc-windows-msvc19.34.0 < %s | FileCheck %s
+
+define void @test(ptr %0, i8 %1, i1 %cmp12.i) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[TMP0:%.*]], i8 [[TMP1:%.*]], i1 [[CMP12_I:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i1> poison, i1 [[CMP12_I]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i8> poison, i8 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP4]], <8 x i8> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: br label [[PRE:%.*]]
+; CHECK: pre:
+; CHECK-NEXT: [[TMP6:%.*]] = zext <8 x i8> [[TMP5]] to <8 x i32>
+; CHECK-NEXT: [[TMP7:%.*]] = call <8 x i32> @llvm.umax.v8i32(<8 x i32> [[TMP6]], <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i8>
+; CHECK-NEXT: [[TMP9:%.*]] = add <8 x i8> [[TMP8]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[TMP10:%.*]] = select <8 x i1> [[TMP3]], <8 x i8> [[TMP9]], <8 x i8> [[TMP5]]
+; CHECK-NEXT: store <8 x i8> [[TMP10]], ptr [[TMP0]], align 1
+; CHECK-NEXT: br label [[PRE]]
+;
+entry:
+ %idx11 = getelementptr i8, ptr %0, i64 1
+ %idx22 = getelementptr i8, ptr %0, i64 2
+ %idx33 = getelementptr i8, ptr %0, i64 3
+ %idx44 = getelementptr i8, ptr %0, i64 4
+ %idx55 = getelementptr i8, ptr %0, i64 5
+ %idx66 = getelementptr i8, ptr %0, i64 6
+ %idx77 = getelementptr i8, ptr %0, i64 7
+ br label %pre
+
+pre:
+ %conv.i = zext i8 %1 to i32
+ %2 = tail call i32 @llvm.umax.i32(i32 %conv.i, i32 1)
+ %.sroa.speculated.i = add i32 %2, 1
+ %intensity.0.i = select i1 %cmp12.i, i32 %.sroa.speculated.i, i32 %conv.i
+ %conv14.i = trunc i32 %intensity.0.i to i8
+ store i8 %conv14.i, ptr %0, align 1
+ %conv.i.1 = zext i8 %1 to i32
+ %3 = tail call i32 @llvm.umax.i32(i32 %conv.i.1, i32 1)
+ %ss1 = add i32 %3, 1
+ %ii1 = select i1 %cmp12.i, i32 %ss1, i32 %conv.i.1
+ %conv14.i.1 = trunc i32 %ii1 to i8
+ store i8 %conv14.i.1, ptr %idx11, align 1
+ %conv.i.2 = zext i8 %1 to i32
+ %4 = tail call i32 @llvm.umax.i32(i32 %conv.i.2, i32 1)
+ %ss2 = add i32 %4, 1
+ %ii2 = select i1 %cmp12.i, i32 %ss2, i32 %conv.i.2
+ %conv14.i.2 = trunc i32 %ii2 to i8
+ store i8 %conv14.i.2, ptr %idx22, align 1
+ %conv.i.3 = zext i8 %1 to i32
+ %5 = tail call i32 @llvm.umax.i32(i32 %conv.i.3, i32 1)
+ %ss3 = add i32 %5, 1
+ %ii3 = select i1 %cmp12.i, i32 %ss3, i32 %conv.i.3
+ %conv14.i.3 = trunc i32 %ii3 to i8
+ store i8 %conv14.i.3, ptr %idx33, align 1
+ %conv.i.4 = zext i8 %1 to i32
+ %6 = tail call i32 @llvm.umax.i32(i32 %conv.i.4, i32 1)
+ %ss4 = add i32 %6, 1
+ %ii4 = select i1 %cmp12.i, i32 %ss4, i32 %conv.i.4
+ %conv14.i.4 = trunc i32 %ii4 to i8
+ store i8 %conv14.i.4, ptr %idx44, align 1
+ %conv.i.5 = zext i8 %1 to i32
+ %7 = tail call i32 @llvm.umax.i32(i32 %conv.i.5, i32 1)
+ %ss5 = add i32 %7, 1
+ %ii5 = select i1 %cmp12.i, i32 %ss5, i32 %conv.i.5
+ %conv14.i.5 = trunc i32 %ii5 to i8
+ store i8 %conv14.i.5, ptr %idx55, align 1
+ %conv.i.6 = zext i8 %1 to i32
+ %8 = tail call i32 @llvm.umax.i32(i32 %conv.i.6, i32 1)
+ %ss6 = add i32 %8, 1
+ %ii6 = select i1 %cmp12.i, i32 %ss6, i32 %conv.i.6
+ %conv14.i.6 = trunc i32 %ii6 to i8
+ store i8 %conv14.i.6, ptr %idx66, align 1
+ %conv.i.7 = zext i8 %1 to i32
+ %9 = tail call i32 @llvm.umax.i32(i32 %conv.i.7, i32 1)
+ %ss7 = add i32 %9, 1
+ %ii7 = select i1 %cmp12.i, i32 %ss7, i32 %conv.i.7
+ %conv14.i.7 = trunc i32 %ii7 to i8
+ store i8 %conv14.i.7, ptr %idx77, align 1
+ br label %pre
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll b/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll
new file mode 100644
index 000000000000..2acbe89b9775
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/gather-nodes-different-bb.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux -mattr="-avx512pf,+avx512f,+avx512bw" -slp-threshold=-100 < %s | FileCheck %s
+
+define i1 @foo(i32 %a) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i32 0, [[A:%.*]]
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[LOCAL:%.*]] = sub nsw i32 0, 0
+; CHECK-NEXT: [[INS1:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[ADD:%.*]] = icmp eq i32 [[TMP0]], [[LOCAL]]
+; CHECK-NEXT: ret i1 [[ADD]]
+;
+entry:
+ %0 = sub nsw i32 0, %a
+ br label %bb1
+
+bb1:
+ %local = sub nsw i32 0, 0
+ %ins1 = insertelement <2 x i32> poison, i32 %0, i32 0
+ %add = icmp eq i32 %0, %local
+ ret i1 %add
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
index 5ee801607653..f4a471493f1b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/int-bitcast-minbitwidth.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-6 < %s | FileCheck %s
+; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-9 < %s | FileCheck %s
define void @t(i64 %v) {
; CHECK-LABEL: define void @t(
@@ -7,10 +7,9 @@ define void @t(i64 %v) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i64> poison, i64 [[V]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[TMP0]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i16>
-; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> [[TMP2]], <i16 5, i16 6, i16 3, i16 2>
-; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP3]])
-; CHECK-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[TMP1]] to <4 x i32>
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], <i32 5, i32 6, i32 3, i32 2>
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP3]])
; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 65535
; CHECK-NEXT: store i32 [[TMP6]], ptr null, align 4
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll
new file mode 100644
index 000000000000..f376ca71c776
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi-node-bitwidt-op-not.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i32 @test(ptr %b, ptr %c, i32 %0, ptr %a, i1 %tobool3.not) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ptr [[B:%.*]], ptr [[C:%.*]], i32 [[TMP0:%.*]], ptr [[A:%.*]], i1 [[TOBOOL3_NOT:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[TOBOOL3_NOT]], label [[BB1:%.*]], label [[BB2:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = ashr <4 x i32> [[TMP2]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <4 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i1> [[TMP4]] to <4 x i16>
+; CHECK-NEXT: br label [[BB3:%.*]]
+; CHECK: bb2:
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = zext <4 x i1> [[TMP8]] to <4 x i32>
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i1> poison, i1 [[TOBOOL3_NOT]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i1> [[TMP10]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP7]], <4 x i32> [[TMP9]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl <4 x i32> [[TMP12]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP14:%.*]] = ashr <4 x i32> [[TMP13]], <i32 16, i32 16, i32 16, i32 16>
+; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP14]] to <4 x i16>
+; CHECK-NEXT: br i1 true, label [[BB3]], label [[BB2]]
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP16:%.*]] = phi <4 x i16> [ [[TMP5]], [[BB1]] ], [ [[TMP15]], [[BB2]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i16> [[TMP16]], i32 0
+; CHECK-NEXT: [[TMP18:%.*]] = sext i16 [[TMP17]] to i32
+; CHECK-NEXT: store i32 [[TMP18]], ptr [[B]], align 16
+; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i16> [[TMP16]], i32 1
+; CHECK-NEXT: [[TMP20:%.*]] = sext i16 [[TMP19]] to i32
+; CHECK-NEXT: store i32 [[TMP20]], ptr [[A]], align 8
+; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i16> [[TMP16]], i32 2
+; CHECK-NEXT: [[TMP22:%.*]] = sext i16 [[TMP21]] to i32
+; CHECK-NEXT: store i32 [[TMP22]], ptr [[C]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i16> [[TMP16]], i32 3
+; CHECK-NEXT: [[TMP24:%.*]] = sext i16 [[TMP23]] to i32
+; CHECK-NEXT: store i32 [[TMP24]], ptr [[B]], align 8
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br i1 %tobool3.not, label %bb1, label %bb2
+
+bb1:
+ %conv1.i.us = ashr i32 %0, 16
+ %cmp2.i.us = icmp slt i32 %conv1.i.us, %0
+ %sext26.us = zext i1 %cmp2.i.us to i32
+ %conv1.i.us.5 = ashr i32 %0, 16
+ %cmp2.i.us.5 = icmp slt i32 %conv1.i.us.5, %0
+ %sext26.us.5 = zext i1 %cmp2.i.us.5 to i32
+ %conv1.i.us.6 = ashr i32 %0, 16
+ %cmp2.i.us.6 = icmp slt i32 %conv1.i.us.6, %0
+ %sext26.us.6 = zext i1 %cmp2.i.us.6 to i32
+ %conv1.i.us.7 = ashr i32 %0, 16
+ %cmp2.i.us.7 = icmp slt i32 %conv1.i.us.7, %0
+ %sext26.us.7 = zext i1 %cmp2.i.us.7 to i32
+ br label %bb3
+
+bb2:
+ %cmp2.i = icmp sgt i32 %0, 0
+ %1 = zext i1 %cmp2.i to i32
+ %cond.i = select i1 %tobool3.not, i32 %0, i32 %1
+ %sext26 = shl i32 %cond.i, 16
+ %conv13 = ashr i32 %sext26, 16
+ %cmp2.i.5 = icmp sgt i32 %0, 0
+ %2 = zext i1 %cmp2.i.5 to i32
+ %cond.i.5 = select i1 %tobool3.not, i32 %0, i32 %2
+ %sext26.5 = shl i32 %cond.i.5, 16
+ %conv13.5 = ashr i32 %sext26.5, 16
+ %cmp2.i.6 = icmp sgt i32 %0, 0
+ %3 = zext i1 %cmp2.i.6 to i32
+ %cond.i.6 = select i1 %tobool3.not, i32 %0, i32 %3
+ %sext26.6 = shl i32 %cond.i.6, 16
+ %conv13.6 = ashr i32 %sext26.6, 16
+ %cmp2.i.7 = icmp sgt i32 %0, 0
+ %4 = zext i1 %cmp2.i.7 to i32
+ %cond.i.7 = select i1 %tobool3.not, i32 %0, i32 %4
+ %sext26.7 = shl i32 %cond.i.7, 16
+ %conv13.7 = ashr i32 %sext26.7, 16
+ br i1 true, label %bb3, label %bb2
+
+bb3:
+ %conv13p = phi i32 [ %sext26.us, %bb1 ], [ %conv13, %bb2 ]
+ %conv13.5p = phi i32 [ %sext26.us.5, %bb1 ], [ %conv13.5, %bb2 ]
+ %conv13.6p = phi i32 [ %sext26.us.6, %bb1 ], [ %conv13.6, %bb2 ]
+ %conv13.7p = phi i32 [ %sext26.us.7, %bb1 ], [ %conv13.7, %bb2 ]
+ store i32 %conv13p, ptr %b, align 16
+ store i32 %conv13.5p, ptr %a, align 8
+ store i32 %conv13.6p, ptr %c, align 16
+ store i32 %conv13.7p, ptr %b, align 8
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
index 4a23abf182e8..cfbbe14186b5 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-possible-strided-node.ll
@@ -116,9 +116,7 @@ define void @test_div() {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARRAYIDX22]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[TMP0]]
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i32> [[TMP3]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = udiv <4 x i64> [[TMP4]], <i64 1, i64 2, i64 1, i64 2>
-; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i64> [[TMP5]] to <4 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = udiv <4 x i32> [[TMP3]], <i32 1, i32 2, i32 1, i32 2>
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
; CHECK-NEXT: ret void
;
@@ -170,9 +168,7 @@ define void @test_rem() {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARRAYIDX22]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[TMP0]]
-; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i32> [[TMP3]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = urem <4 x i64> [[TMP4]], <i64 1, i64 2, i64 1, i64 1>
-; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i64> [[TMP5]] to <4 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = urem <4 x i32> [[TMP3]], <i32 1, i32 2, i32 1, i32 1>
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr getelementptr inbounds ([4 x i32], ptr null, i64 8, i64 0), align 16
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
index dce85b4b2a19..9682567b173c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_diamond_match.ll
@@ -11,11 +11,11 @@ define void @test() {
; CHECK-NEXT: [[TMP6:%.*]] = shl <4 x i16> [[TMP5]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i16> [[TMP6]], zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i16> [[TMP7]], <4 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT: [[TMP9:%.*]] = add nsw <4 x i16> [[TMP7]], [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = sub nsw <4 x i16> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i16> [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i32> <i32 1, i32 4, i32 3, i32 6>
-; CHECK-NEXT: [[TMP12:%.*]] = add nsw <4 x i16> zeroinitializer, [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <4 x i16> zeroinitializer, [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = add <4 x i16> zeroinitializer, [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub <4 x i16> zeroinitializer, [[TMP11]]
; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i16> [[TMP12]], <4 x i16> [[TMP13]], <4 x i32> <i32 0, i32 1, i32 6, i32 7>
; CHECK-NEXT: [[TMP15:%.*]] = sext <4 x i16> [[TMP14]] to <4 x i32>
; CHECK-NEXT: store <4 x i32> [[TMP15]], ptr [[TMP2]], align 16
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll
new file mode 100644
index 000000000000..e8b854b7cea6
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/store-abs-minbitwidth.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx512vl -passes=slp-vectorizer -slp-threshold=-3 | FileCheck %s
+
+
+define i32 @test(ptr noalias %in, ptr noalias %inn, ptr %out) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr [[IN:%.*]], align 1
+; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i8, ptr [[IN]], i64 2
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr [[GEP_2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i8>, ptr [[INN:%.*]], align 1
+; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i8, ptr [[INN]], i64 2
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i8>, ptr [[GEP_5]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x i8> [[TMP3]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x i8> [[TMP2]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: [[TMP8:%.*]] = sext <4 x i8> [[TMP7]] to <4 x i32>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i8> [[TMP1]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i8> [[TMP4]], <2 x i8> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> [[TMP10]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i8> [[TMP11]] to <4 x i32>
+; CHECK-NEXT: [[TMP13:%.*]] = sub <4 x i32> [[TMP12]], [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[TMP13]], i1 true)
+; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP14]] to <4 x i16>
+; CHECK-NEXT: store <4 x i16> [[TMP15]], ptr [[OUT:%.*]], align 2
+; CHECK-NEXT: ret i32 undef
+;
+ %load.1 = load i8, ptr %in, align 1
+ %gep.1 = getelementptr inbounds i8, ptr %in, i64 1
+ %load.2 = load i8, ptr %gep.1, align 1
+ %gep.2 = getelementptr inbounds i8, ptr %in, i64 2
+ %load.3 = load i8, ptr %gep.2, align 1
+ %gep.3 = getelementptr inbounds i8, ptr %in, i64 3
+ %load.4 = load i8, ptr %gep.3, align 1
+ %load.5 = load i8, ptr %inn, align 1
+ %gep.4 = getelementptr inbounds i8, ptr %inn, i64 1
+ %load.6 = load i8, ptr %gep.4, align 1
+ %gep.5 = getelementptr inbounds i8, ptr %inn, i64 2
+ %load.7 = load i8, ptr %gep.5, align 1
+ %gep.6 = getelementptr inbounds i8, ptr %inn, i64 3
+ %load.8 = load i8, ptr %gep.6, align 1
+ %sext1 = sext i8 %load.1 to i32
+ %sext2 = sext i8 %load.2 to i32
+ %sext3 = sext i8 %load.3 to i32
+ %sext4 = sext i8 %load.4 to i32
+ %sext5 = sext i8 %load.5 to i32
+ %sext6 = sext i8 %load.6 to i32
+ %sext7 = sext i8 %load.7 to i32
+ %sext8 = sext i8 %load.8 to i32
+ %sub1 = sub i32 %sext1, %sext5
+ %sub2 = sub i32 %sext2, %sext6
+ %sub3 = sub i32 %sext7, %sext3
+ %sub4 = sub i32 %sext8, %sext4
+ %call1 = call i32 @llvm.abs(i32 %sub1, i1 true)
+ %call2 = call i32 @llvm.abs(i32 %sub2, i1 true)
+ %call3 = call i32 @llvm.abs(i32 %sub3, i1 true)
+ %call4 = call i32 @llvm.abs(i32 %sub4, i1 true)
+ %t1 = trunc i32 %call1 to i16
+ %t2 = trunc i32 %call2 to i16
+ %t3 = trunc i32 %call3 to i16
+ %t4 = trunc i32 %call4 to i16
+ %gep.8 = getelementptr inbounds i16, ptr %out, i64 1
+ %gep.9 = getelementptr inbounds i16, ptr %out, i64 2
+ %gep.10 = getelementptr inbounds i16, ptr %out, i64 3
+ store i16 %t1, ptr %out, align 2
+ store i16 %t2, ptr %gep.8, align 2
+ store i16 %t3, ptr %gep.9, align 2
+ store i16 %t4, ptr %gep.10, align 2
+
+ ret i32 undef
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll b/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll
new file mode 100644
index 000000000000..531e96405348
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/orig-btiwidth-les-projected.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S --passes=slp-vectorizer < %s | FileCheck %s
+
+define i32 @test(i4 %0) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: i4 [[TMP0:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 0 to i4
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i8 0 to i4
+; CHECK-NEXT: [[ADD_R:%.*]] = or i4 [[TMP1]], [[TMP0]]
+; CHECK-NEXT: [[ADD_R14:%.*]] = or i4 0, [[TMP2]]
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i4 [[ADD_R]], [[ADD_R14]]
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %1 = trunc i8 0 to i4
+ %2 = trunc i8 0 to i4
+ %add.r = or i4 %1, %0
+ %add.r14 = or i4 0, %2
+ %cmp.not = icmp eq i4 %add.r, %add.r14
+ ret i32 0
+}
diff --git a/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof b/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof
new file mode 100644
index 000000000000..76a8fc9d19a8
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/Inputs/pseudo-probe-callee-profile-mismatch.prof
@@ -0,0 +1,16 @@
+main:252:0
+ 1: 0
+ 2: 50
+ 5: 50
+ 7: bar:102
+ 1: 51
+ 2: baz:51
+ 1: 51
+ !CFGChecksum: 4294967295
+ !Attributes: 3
+ !CFGChecksum: 281479271677951
+ !Attributes: 2
+ !CFGChecksum: 281582081721716
+bar:1:1
+ 1: 1
+ !CFGChecksum: 281479271677951
diff --git a/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll b/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll
new file mode 100644
index 000000000000..df56b55dcdf3
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/csspgo-profile-checksum-mismatch-attr.ll
@@ -0,0 +1,67 @@
+; REQUIRES: x86_64-linux
+; REQUIRES: asserts
+; RUN: opt < %s -passes='thinlto-pre-link<O2>' -pgo-kind=pgo-sample-use-pipeline -sample-profile-file=%S/Inputs/pseudo-probe-callee-profile-mismatch.prof -pass-remarks=inline -S -o %t 2>&1 | FileCheck %s --check-prefix=INLINE
+; RUN: FileCheck %s < %t
+; RUN: FileCheck %s < %t --check-prefix=MERGE
+
+
+; Make sure bar is inlined into main for attr merging verification.
+; INLINE: 'bar' inlined into 'main'
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @baz() #0 {
+entry:
+ ret i32 0
+}
+
+define i32 @bar() #0 !dbg !11 {
+; CHECK: define {{.*}} @bar() {{.*}} #[[#BAR_ATTR:]] !
+entry:
+ %call = call i32 @baz()
+ ret i32 0
+}
+
+define i32 @main() #0 {
+; MERGE: define {{.*}} @main() {{.*}} #[[#MAIN_ATTR:]] !
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %entry
+ %call = call i32 @bar(), !dbg !14
+ br label %for.cond
+}
+
+; CHECK: attributes #[[#BAR_ATTR]] = {{{.*}} "profile-checksum-mismatch" {{.*}}}
+
+; Verify the attribute is not merged into the caller.
+; MERGE-NOT: attributes #[[#MAIN_ATTR]] = {{{.*}} "profile-checksum-mismatch" {{.*}}}
+
+attributes #0 = { "use-sample-profile" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!7}
+!llvm.pseudo_probe_desc = !{!8, !9, !10}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "test.c", directory: "/home", checksumkind: CSK_MD5, checksum: "0df0c950a93a603a7d13f0a9d4623642")
+!2 = !{!3}
+!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
+!4 = distinct !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true)
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = !{i32 2, !"Debug Info Version", i32 3}
+!8 = !{i64 7546896869197086323, i64 4294967295, !"baz"}
+!9 = !{i64 -2012135647395072713, i64 281530612780802, !"bar"}
+!10 = !{i64 -2624081020897602054, i64 281582081721716, !"main"}
+!11 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !12, scopeLine: 5, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !13)
+!12 = distinct !DISubroutineType(types: !13)
+!13 = !{}
+!14 = !DILocation(line: 15, column: 10, scope: !15)
+!15 = !DILexicalBlockFile(scope: !16, file: !1, discriminator: 186646591)
+!16 = distinct !DILexicalBlock(scope: !17, file: !1, line: 14, column: 40)
+!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 14, column: 3)
+!18 = distinct !DILexicalBlock(scope: !19, file: !1, line: 14, column: 3)
+!19 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 12, type: !20, scopeLine: 13, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !13)
+!20 = !DISubroutineType(types: !13)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll
new file mode 100644
index 000000000000..4881937df101
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-callee-profile-mismatch.ll
@@ -0,0 +1,63 @@
+; REQUIRES: x86_64-linux
+; REQUIRES: asserts
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-callee-profile-mismatch.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl -pass-remarks=inline 2>&1 | FileCheck %s
+
+
+; CHECK: Run stale profile matching for bar
+; CHECK: Callsite with callee:baz is matched from 4 to 2
+; CHECK: 'baz' inlined into 'main' to match profiling context with (cost=always): preinliner at callsite bar:3:8.4 @ main:3:10.7
+
+; CHECK: Probe descriptor missing for Function bar
+; CHECK: Profile is invalid due to CFG mismatch for Function bar
+
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() #0 {
+ %1 = call i32 @bar(), !dbg !13
+ ret i32 0
+}
+
+define available_externally i32 @bar() #1 !dbg !21 {
+ %1 = call i32 @baz(), !dbg !23
+ ret i32 0
+}
+
+define available_externally i32 @baz() #0 !dbg !25 {
+ ret i32 0
+}
+
+attributes #0 = { "use-sample-profile" }
+attributes #1 = { "profile-checksum-mismatch" "use-sample-profile" }
+
+!llvm.dbg.cu = !{!0, !7, !9}
+!llvm.module.flags = !{!11}
+!llvm.pseudo_probe_desc = !{!12}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "test.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "7220f1a2d70ff869f1a6ab7958e3c393")
+!2 = !{!3}
+!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression())
+!4 = distinct !DIGlobalVariable(name: "x", scope: !0, file: !1, line: 2, type: !5, isLocal: false, isDefinition: true)
+!5 = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: !6)
+!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!7 = distinct !DICompileUnit(language: DW_LANG_C11, file: !8, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!8 = !DIFile(filename: "test1.v1.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "76696bd6bfe16a9f227fe03cfdb6a82c")
+!9 = distinct !DICompileUnit(language: DW_LANG_C11, file: !10, producer: "clang version 19.0.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!10 = !DIFile(filename: "test2.c", directory: "/home/test", checksumkind: CSK_MD5, checksum: "553093afc026f9c73562eb3b0c5b7532")
+!11 = !{i32 2, !"Debug Info Version", i32 3}
+!12 = !{i64 -2624081020897602054, i64 281582081721716, !"main"}
+!13 = !DILocation(line: 8, column: 10, scope: !14)
+!14 = !DILexicalBlockFile(scope: !15, file: !1, discriminator: 186646591)
+!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 7, column: 40)
+!16 = distinct !DILexicalBlock(scope: !17, file: !1, line: 7, column: 3)
+!17 = distinct !DILexicalBlock(scope: !18, file: !1, line: 7, column: 3)
+!18 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 5, type: !19, scopeLine: 6, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !20)
+!19 = distinct !DISubroutineType(types: !20)
+!20 = !{}
+!21 = distinct !DISubprogram(name: "bar", scope: !8, file: !8, line: 3, type: !22, scopeLine: 3, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !7, retainedNodes: !20)
+!22 = !DISubroutineType(types: !20)
+!23 = !DILocation(line: 6, column: 8, scope: !24)
+!24 = !DILexicalBlockFile(scope: !21, file: !8, discriminator: 186646567)
+!25 = distinct !DISubprogram(name: "baz", scope: !10, file: !10, line: 1, type: !22, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !9, retainedNodes: !20)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll
new file mode 100644
index 000000000000..2bb8f677f40c
--- /dev/null
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch-error.ll
@@ -0,0 +1,7 @@
+; REQUIRES: x86_64-linux
+; RUN: not opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=1 -precent-mismatch-for-staleness-error=1 -S 2>&1 | FileCheck %s
+; RUN: opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=3 -precent-mismatch-for-staleness-error=70 -S 2>&1
+; RUN: opt < %S/pseudo-probe-profile-mismatch.ll -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-profile-mismatch.prof -min-functions-for-staleness-error=4 -precent-mismatch-for-staleness-error=1 -S 2>&1
+
+
+; CHECK: error: {{.*}}: The input profile significantly mismatches current source code. Please recollect profile to avoid performance regression.
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
index 55225b415d4a..7aabeeca2585 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-lto.ll
@@ -1,6 +1,6 @@
; REQUIRES: x86_64-linux
; REQUIRES: asserts
-; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching-lto.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-impl 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching-lto.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl 2>&1 | FileCheck %s
; CHECK: Run stale profile matching for main
@@ -106,7 +106,7 @@ define available_externally dso_local i32 @bar(i32 noundef %0) local_unnamed_add
ret i32 %2, !dbg !132
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
+attributes #0 = { nounwind uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" "profile-checksum-mismatch"}
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) }
attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
attributes #3 = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) uwtable "disable-tail-calls"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
index 89477ea5fecf..0d471e43d2a7 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
@@ -1,6 +1,6 @@
; REQUIRES: x86_64-linux
; REQUIRES: asserts
-; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-impl 2>&1 | FileCheck %s
+; RUN: opt < %s -passes=sample-profile -sample-profile-file=%S/Inputs/pseudo-probe-stale-profile-matching.prof --salvage-stale-profile -S --debug-only=sample-profile,sample-profile-matcher,sample-profile-impl 2>&1 | FileCheck %s
; The profiled source code:
@@ -48,6 +48,8 @@
; }
; }
+; Verify not running profile matching for checksum matched function.
+; CHECK-NOT: Run stale profile matching for bar
; CHECK: Run stale profile matching for main
diff --git a/llvm/test/Transforms/SampleProfile/remarks-hotness.ll b/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
index b90b21e9e3c5..36fb3c581817 100644
--- a/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
+++ b/llvm/test/Transforms/SampleProfile/remarks-hotness.ll
@@ -24,7 +24,7 @@
; YAML-PASS: --- !Passed
; YAML-PASS-NEXT: Pass: inline
-; YAML-PASS-NEXT: Name: AlwaysInline
+; YAML-PASS-NEXT: Name: Inlined
; YAML-PASS-NEXT: DebugLoc: { File: remarks-hotness.cpp, Line: 10, Column: 10 }
; YAML-PASS-NEXT: Function: _Z7caller1v
; YAML-PASS-NEXT: Hotness: 401
@@ -36,7 +36,7 @@
; YAML-MISS-NEXT: Function: _Z7caller2v
; YAML-MISS-NEXT: Hotness: 2
-; CHECK-RPASS: '_Z7callee1v' inlined into '_Z7caller1v' with (cost=always): benefit over cost at callsite _Z7caller1v:1:10; (hotness: 401)
+; CHECK-RPASS: '_Z7callee1v' inlined into '_Z7caller1v' with (cost=-30, threshold=4500) at callsite _Z7caller1v:1:10; (hotness: 401)
; CHECK-RPASS-NOT: '_Z7callee2v' not inlined into '_Z7caller2v' because it should never be inlined (cost=never): noinline function attribute (hotness: 2)
; ModuleID = 'remarks-hotness.cpp'
diff --git a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
index a081eddfc456..4a4c94098ab9 100644
--- a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
+++ b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll
@@ -64,8 +64,8 @@ define float @PR39535min_switch(i64 %i, float %x) {
; CHECK-LABEL: @PR39535min_switch(
; CHECK-NEXT: entry:
; CHECK-NEXT: switch i64 [[I:%.*]], label [[END:%.*]] [
-; CHECK-NEXT: i64 1, label [[BB1:%.*]]
-; CHECK-NEXT: i64 2, label [[BB2:%.*]]
+; CHECK-NEXT: i64 1, label [[BB1:%.*]]
+; CHECK-NEXT: i64 2, label [[BB2:%.*]]
; CHECK-NEXT: ]
; CHECK: bb1:
; CHECK-NEXT: br label [[END]]
@@ -154,3 +154,33 @@ F:
%z2 = or disjoint i32 %x, %y
ret i32 %z2
}
+
+define i16 @hoist_trunc_flags_preserve(i1 %C, i32 %x) {
+; CHECK-LABEL: @hoist_trunc_flags_preserve(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[Z1:%.*]] = trunc nuw nsw i32 [[X:%.*]] to i16
+; CHECK-NEXT: ret i16 [[Z1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %z1 = trunc nsw nuw i32 %x to i16
+ ret i16 %z1
+F:
+ %z2 = trunc nsw nuw i32 %x to i16
+ ret i16 %z2
+}
+
+define i16 @hoist_trunc_flags_drop(i1 %C, i32 %x) {
+; CHECK-LABEL: @hoist_trunc_flags_drop(
+; CHECK-NEXT: common.ret:
+; CHECK-NEXT: [[Z1:%.*]] = trunc i32 [[X:%.*]] to i16
+; CHECK-NEXT: ret i16 [[Z1]]
+;
+ br i1 %C, label %T, label %F
+T:
+ %z1 = trunc i32 %x to i16
+ ret i16 %z1
+F:
+ %z2 = trunc nsw nuw i32 %x to i16
+ ret i16 %z2
+}
diff --git a/llvm/test/Transforms/TailCallElim/debugloc.ll b/llvm/test/Transforms/TailCallElim/debugloc.ll
index 3abbd6552efc..49957695a421 100644
--- a/llvm/test/Transforms/TailCallElim/debugloc.ll
+++ b/llvm/test/Transforms/TailCallElim/debugloc.ll
@@ -4,13 +4,13 @@
define void @foo() {
entry:
; CHECK-LABEL: entry:
-; CHECK: br label %tailrecurse, !dbg ![[DbgLoc:[0-9]+]]
+; CHECK: br label %tailrecurse{{$}}
call void @foo() ;; line 1
ret void
; CHECK-LABEL: tailrecurse:
-; CHECK: br label %tailrecurse, !dbg ![[DbgLoc]]
+; CHECK: br label %tailrecurse, !dbg ![[DbgLoc:[0-9]+]]
}
;; Make sure tailrecurse has the call instruction's DL
diff --git a/llvm/test/Verifier/tbaa-struct.ll b/llvm/test/Verifier/tbaa-struct.ll
new file mode 100644
index 000000000000..b8ddc7cee496
--- /dev/null
+++ b/llvm/test/Verifier/tbaa-struct.ll
@@ -0,0 +1,40 @@
+; RUN: llvm-as < %s 2>&1
+
+; FIXME: The verifer should reject the invalid !tbaa.struct nodes below.
+
+define void @test_overlapping_regions(ptr %a1) {
+ %ld = load i8, ptr %a1, align 1, !tbaa.struct !0
+ ret void
+}
+
+define void @test_size_not_integer(ptr %a1) {
+ store i8 1, ptr %a1, align 1, !tbaa.struct !5
+ ret void
+}
+
+define void @test_offset_not_integer(ptr %a1, ptr %a2) {
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a1, ptr align 8 %a2, i64 16, i1 false), !tbaa.struct !6
+ ret void
+}
+
+define void @test_tbaa_missing(ptr %a1, ptr %a2) {
+ tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a1, ptr align 8 %a2, i64 16, i1 false), !tbaa.struct !7
+ ret void
+}
+
+define void @test_tbaa_invalid(ptr %a1) {
+ store i8 1, ptr %a1, align 1, !tbaa.struct !8
+ ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
+
+!0 = !{i64 0, i64 4, !1, i64 1, i64 4, !1}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}
+!5 = !{i64 0, !2, !1}
+!6 = !{!2, i64 0, !1}
+!7 = !{i64 0, i64 4, null}
+!8 = !{i64 0, i64 4, !2}
diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
index 80145c5e098e..71e82eca6c3e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/lanai_isel.ll.expected
@@ -7,7 +7,7 @@ define i64 @i64_test(i64 %i) nounwind readnone {
; CHECK-NEXT: t0: ch,glue = EntryToken
; CHECK-NEXT: t5: i32,ch = LDW_RI<Mem:(load (s32) from %fixed-stack.0)> TargetFrameIndex:i32<-2>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t7: i32 = ADD_I_LO TargetFrameIndex:i32<0>, TargetConstant:i32<0>
-; CHECK-NEXT: t29: i32 = OR_I_LO t7, TargetConstant:i32<4>
+; CHECK-NEXT: t29: i32 = OR_I_LO disjoint t7, TargetConstant:i32<4>
; CHECK-NEXT: t22: i32,ch = LDW_RI<Mem:(dereferenceable load (s32) from %ir.loc + 4, basealign 8)> t29, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t5, t22, TargetConstant:i32<0>
; CHECK-NEXT: t3: i32,ch = LDW_RI<Mem:(load (s32) from %fixed-stack.1, align 8)> TargetFrameIndex:i32<-1>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
@@ -52,7 +52,7 @@ define i64 @i16_test(i16 %i) nounwind readnone {
; CHECK-NEXT: t33: i32,ch = CopyFromReg t0, Register:i32 $r0
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
-; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<2>
+; CHECK-NEXT: t21: i32 = OR_I_LO disjoint t1, TargetConstant:i32<2>
; CHECK-NEXT: t23: i32,ch = LDHz_RI<Mem:(load (s16) from %fixed-stack.0 + 2, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDHz_RI<Mem:(dereferenceable load (s16) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
@@ -75,7 +75,7 @@ define i64 @i8_test(i8 %i) nounwind readnone {
; CHECK-NEXT: t33: i32,ch = CopyFromReg t0, Register:i32 $r0
; CHECK-NEXT: t14: ch,glue = CopyToReg t0, Register:i32 $rv, t33
; CHECK-NEXT: t1: i32 = ADD_I_LO TargetFrameIndex:i32<-1>, TargetConstant:i32<0>
-; CHECK-NEXT: t21: i32 = OR_I_LO t1, TargetConstant:i32<3>
+; CHECK-NEXT: t21: i32 = OR_I_LO disjoint t1, TargetConstant:i32<3>
; CHECK-NEXT: t23: i32,ch = LDBz_RI<Mem:(load (s8) from %fixed-stack.0 + 3, basealign 4)> t21, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t22: i32,ch = LDBz_RI<Mem:(dereferenceable load (s8) from %ir.loc)> TargetFrameIndex:i32<0>, TargetConstant:i32<0>, TargetConstant:i32<0>, t0
; CHECK-NEXT: t24: i32 = ADD_R t23, t22, TargetConstant:i32<0>
diff --git a/llvm/test/tools/dxil-dis/debug-info.ll b/llvm/test/tools/dxil-dis/debug-info.ll
index 92dc65481e82..96e023338e5c 100644
--- a/llvm/test/tools/dxil-dis/debug-info.ll
+++ b/llvm/test/tools/dxil-dis/debug-info.ll
@@ -1,4 +1,4 @@
-; RUN: llc --filetype=obj %s -o - | dxil-dis -o - | FileCheck %s
+; RUN: llc --filetype=obj %s -o - -experimental-debuginfo-iterators=false | dxil-dis -o - | FileCheck %s
target triple = "dxil-unknown-shadermodel6.7-library"
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/tools/llc/new-pm/machine-function-properties.mir b/llvm/test/tools/llc/new-pm/machine-function-properties.mir
new file mode 100644
index 000000000000..a9eb88ec6988
--- /dev/null
+++ b/llvm/test/tools/llc/new-pm/machine-function-properties.mir
@@ -0,0 +1,12 @@
+# REQUIRES: asserts
+# RUN: not --crash llc -mtriple=x86_64-pc-linux-gnu -passes=require-all-machine-function-properties -filetype=null %s 2>&1 | FileCheck %s
+
+# CHECK: MachineFunctionProperties required by RequireAllMachineFunctionPropertiesPass pass are not met by function f.
+
+---
+name: f
+selected: false
+body: |
+ bb.0:
+ RET 0
+...
diff --git a/llvm/test/tools/llvm-lib/arm64ec-implib.test b/llvm/test/tools/llvm-lib/arm64ec-implib.test
index 00eddd2a4752..e9987d0ca2e6 100644
--- a/llvm/test/tools/llvm-lib/arm64ec-implib.test
+++ b/llvm/test/tools/llvm-lib/arm64ec-implib.test
@@ -14,6 +14,8 @@ ARMAP-NEXT: Archive EC map
ARMAP-NEXT: #expname in test.dll
ARMAP-NEXT: #funcexp in test.dll
ARMAP-NEXT: #mangledfunc in test.dll
+ARMAP-NEXT: #manglednonamefunc in test.dll
+ARMAP-NEXT: #nonamefunc in test.dll
ARMAP-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test.dll
ARMAP-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAP-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
@@ -23,28 +25,34 @@ ARMAP-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAP-NEXT: __imp_aux_expname in test.dll
ARMAP-NEXT: __imp_aux_funcexp in test.dll
ARMAP-NEXT: __imp_aux_mangledfunc in test.dll
+ARMAP-NEXT: __imp_aux_manglednonamefunc in test.dll
+ARMAP-NEXT: __imp_aux_nonamefunc in test.dll
ARMAP-NEXT: __imp_dataexp in test.dll
ARMAP-NEXT: __imp_expname in test.dll
ARMAP-NEXT: __imp_funcexp in test.dll
ARMAP-NEXT: __imp_mangledfunc in test.dll
+ARMAP-NEXT: __imp_manglednonamefunc in test.dll
+ARMAP-NEXT: __imp_nonamefunc in test.dll
ARMAP-NEXT: expname in test.dll
ARMAP-NEXT: funcexp in test.dll
ARMAP-NEXT: mangledfunc in test.dll
+ARMAP-NEXT: manglednonamefunc in test.dll
+ARMAP-NEXT: nonamefunc in test.dll
ARMAP-NEXT: test_NULL_THUNK_DATA in test.dll
RUN: llvm-readobj test.lib | FileCheck -check-prefix=READOBJ %s
-READOBJ: File: test.lib(test.dll)
+READOBJ: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
READOBJ-EMPTY:
-READOBJ-NEXT: File: test.lib(test.dll)
+READOBJ-NEXT: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
READOBJ-EMPTY:
-READOBJ-NEXT: File: test.lib(test.dll)
+READOBJ-NEXT: File: test{{.*}}.lib(test.dll)
READOBJ-NEXT: Format: COFF-ARM64{{$}}
READOBJ-NEXT: Arch: aarch64
READOBJ-NEXT: AddressSize: 64bit
@@ -95,6 +103,30 @@ READOBJ-NEXT: Type: data
READOBJ-NEXT: Name type: name
READOBJ-NEXT: Export name: dataexp
READOBJ-NEXT: Symbol: __imp_dataexp
+READOBJ-EMPTY:
+READOBJ-NEXT: File: test.dll
+READOBJ-NEXT: Format: COFF-import-file-ARM64EC
+READOBJ-NEXT: Type: code
+READOBJ-NEXT: Name type: ordinal
+READOBJ-NEXT: Symbol: __imp_nonamefunc
+READOBJ-NEXT: Symbol: nonamefunc
+READOBJ-NEXT: Symbol: __imp_aux_nonamefunc
+READOBJ-NEXT: Symbol: #nonamefunc
+READOBJ-EMPTY:
+READOBJ-NEXT: File: test.dll
+READOBJ-NEXT: Format: COFF-import-file-ARM64EC
+READOBJ-NEXT: Type: code
+READOBJ-NEXT: Name type: ordinal
+READOBJ-NEXT: Symbol: __imp_manglednonamefunc
+READOBJ-NEXT: Symbol: manglednonamefunc
+READOBJ-NEXT: Symbol: __imp_aux_manglednonamefunc
+READOBJ-NEXT: Symbol: #manglednonamefunc
+
+
+Using -machine:arm64x gives the same output.
+RUN: llvm-lib -machine:arm64x -def:test.def -out:testx.lib
+RUN: llvm-nm --print-armap testx.lib | FileCheck -check-prefix=ARMAP %s
+RUN: llvm-readobj testx.lib | FileCheck -check-prefix=READOBJ %s
Creating a new lib containing the existing lib:
RUN: llvm-lib -machine:arm64ec test.lib -out:test2.lib
@@ -107,22 +139,28 @@ RUN: llvm-nm --print-armap testx.lib | FileCheck -check-prefix=ARMAPX %s
ARMAPX: Archive map
ARMAPX-NEXT: #mangledfunc in test.dll
+ARMAPX-NEXT: #manglednonamefunc in test.dll
ARMAPX-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
ARMAPX-NEXT: __NULL_IMPORT_DESCRIPTOR in test.dll
ARMAPX-NEXT: __imp_#mangledfunc in test.dll
+ARMAPX-NEXT: __imp_#manglednonamefunc in test.dll
ARMAPX-NEXT: __imp_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __imp_dataexp in test.dll
ARMAPX-NEXT: __imp_expname in test.dll
ARMAPX-NEXT: __imp_funcexp in test.dll
+ARMAPX-NEXT: __imp_nonamefunc in test.dll
ARMAPX-NEXT: expname in test.dll
ARMAPX-NEXT: funcexp in test.dll
+ARMAPX-NEXT: nonamefunc in test.dll
ARMAPX-NEXT: test_NULL_THUNK_DATA in test.dll
ARMAPX-EMPTY:
ARMAPX-NEXT: Archive EC map
ARMAPX-NEXT: #expname in test.dll
ARMAPX-NEXT: #funcexp in test.dll
ARMAPX-NEXT: #mangledfunc in test.dll
+ARMAPX-NEXT: #manglednonamefunc in test.dll
+ARMAPX-NEXT: #nonamefunc in test.dll
ARMAPX-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test.dll
ARMAPX-NEXT: ?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __IMPORT_DESCRIPTOR_test in test.dll
@@ -132,13 +170,19 @@ ARMAPX-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test.dll
ARMAPX-NEXT: __imp_aux_expname in test.dll
ARMAPX-NEXT: __imp_aux_funcexp in test.dll
ARMAPX-NEXT: __imp_aux_mangledfunc in test.dll
+ARMAPX-NEXT: __imp_aux_manglednonamefunc in test.dll
+ARMAPX-NEXT: __imp_aux_nonamefunc in test.dll
ARMAPX-NEXT: __imp_dataexp in test.dll
ARMAPX-NEXT: __imp_expname in test.dll
ARMAPX-NEXT: __imp_funcexp in test.dll
ARMAPX-NEXT: __imp_mangledfunc in test.dll
+ARMAPX-NEXT: __imp_manglednonamefunc in test.dll
+ARMAPX-NEXT: __imp_nonamefunc in test.dll
ARMAPX-NEXT: expname in test.dll
ARMAPX-NEXT: funcexp in test.dll
ARMAPX-NEXT: mangledfunc in test.dll
+ARMAPX-NEXT: manglednonamefunc in test.dll
+ARMAPX-NEXT: nonamefunc in test.dll
ARMAPX-NEXT: test_NULL_THUNK_DATA in test.dll
RUN: llvm-readobj testx.lib | FileCheck -check-prefix=READOBJX %s
@@ -206,6 +250,24 @@ READOBJX-NEXT: Export name: dataexp
READOBJX-NEXT: Symbol: __imp_dataexp
READOBJX-EMPTY:
READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64EC
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_nonamefunc
+READOBJX-NEXT: Symbol: nonamefunc
+READOBJX-NEXT: Symbol: __imp_aux_nonamefunc
+READOBJX-NEXT: Symbol: #nonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64EC
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_manglednonamefunc
+READOBJX-NEXT: Symbol: manglednonamefunc
+READOBJX-NEXT: Symbol: __imp_aux_manglednonamefunc
+READOBJX-NEXT: Symbol: #manglednonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
READOBJX-NEXT: Format: COFF-import-file-ARM64
READOBJX-NEXT: Type: code
READOBJX-NEXT: Name type: name
@@ -243,10 +305,26 @@ READOBJX-NEXT: Type: data
READOBJX-NEXT: Name type: name
READOBJX-NEXT: Export name: dataexp
READOBJX-NEXT: Symbol: __imp_dataexp
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_nonamefunc
+READOBJX-NEXT: Symbol: nonamefunc
+READOBJX-EMPTY:
+READOBJX-NEXT: File: test.dll
+READOBJX-NEXT: Format: COFF-import-file-ARM64
+READOBJX-NEXT: Type: code
+READOBJX-NEXT: Name type: ordinal
+READOBJX-NEXT: Symbol: __imp_#manglednonamefunc
+READOBJX-NEXT: Symbol: #manglednonamefunc
RUN: llvm-lib -machine:arm64ec -def:test.def -defArm64Native:test2.def -out:test2.lib
+RUN: llvm-lib -machine:arm64ec -def:test.def -defArm64Native:test2.def -out:test2x.lib
RUN: llvm-nm --print-armap test2.lib | FileCheck -check-prefix=ARMAPX2 %s
+RUN: llvm-nm --print-armap test2x.lib | FileCheck -check-prefix=ARMAPX2 %s
ARMAPX2: Archive map
ARMAPX2-NEXT: __IMPORT_DESCRIPTOR_test2 in test2.dll
@@ -259,6 +337,8 @@ ARMAPX2-NEXT: Archive EC map
ARMAPX2-NEXT: #expname in test2.dll
ARMAPX2-NEXT: #funcexp in test2.dll
ARMAPX2-NEXT: #mangledfunc in test2.dll
+ARMAPX2-NEXT: #manglednonamefunc in test2.dll
+ARMAPX2-NEXT: #nonamefunc in test2.dll
ARMAPX2-NEXT: ?test_cpp_func@@$$hYAHPEAX@Z in test2.dll
ARMAPX2-NEXT: ?test_cpp_func@@YAHPEAX@Z in test2.dll
ARMAPX2-NEXT: __IMPORT_DESCRIPTOR_test2 in test2.dll
@@ -268,13 +348,19 @@ ARMAPX2-NEXT: __imp_aux_?test_cpp_func@@YAHPEAX@Z in test2.dll
ARMAPX2-NEXT: __imp_aux_expname in test2.dll
ARMAPX2-NEXT: __imp_aux_funcexp in test2.dll
ARMAPX2-NEXT: __imp_aux_mangledfunc in test2.dll
+ARMAPX2-NEXT: __imp_aux_manglednonamefunc in test2.dll
+ARMAPX2-NEXT: __imp_aux_nonamefunc in test2.dll
ARMAPX2-NEXT: __imp_dataexp in test2.dll
ARMAPX2-NEXT: __imp_expname in test2.dll
ARMAPX2-NEXT: __imp_funcexp in test2.dll
ARMAPX2-NEXT: __imp_mangledfunc in test2.dll
+ARMAPX2-NEXT: __imp_manglednonamefunc in test2.dll
+ARMAPX2-NEXT: __imp_nonamefunc in test2.dll
ARMAPX2-NEXT: expname in test2.dll
ARMAPX2-NEXT: funcexp in test2.dll
ARMAPX2-NEXT: mangledfunc in test2.dll
+ARMAPX2-NEXT: manglednonamefunc in test2.dll
+ARMAPX2-NEXT: nonamefunc in test2.dll
ARMAPX2-NEXT: test2_NULL_THUNK_DATA in test2.dll
ARMAPX2: test2.dll:
@@ -305,6 +391,18 @@ ARMAPX2-NEXT: test2.dll:
ARMAPX2-NEXT: 00000000 D __imp_dataexp
ARMAPX2-EMPTY:
ARMAPX2-NEXT: test2.dll:
+ARMAPX2-NEXT: 00000000 T #nonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_aux_nonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_nonamefunc
+ARMAPX2-NEXT: 00000000 T nonamefunc
+ARMAPX2-EMPTY:
+ARMAPX2-NEXT: test2.dll:
+ARMAPX2-NEXT: 00000000 T #manglednonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_aux_manglednonamefunc
+ARMAPX2-NEXT: 00000000 T __imp_manglednonamefunc
+ARMAPX2-NEXT: 00000000 T manglednonamefunc
+ARMAPX2-EMPTY:
+ARMAPX2-NEXT: test2.dll:
ARMAPX2-NEXT: 00000000 T __imp_otherfunc
ARMAPX2-NEXT: 00000000 T otherfunc
@@ -399,6 +497,8 @@ EXPORTS
?test_cpp_func@@YAHPEAX@Z
expname=impname
dataexp DATA
+ nonamefunc @1 NONAME
+ #manglednonamefunc @2 NONAME
#--- test2.def
LIBRARY test2.dll
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
index efa81b0ffcd4..f120f5feaf7c 100644
--- a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
@@ -4093,8 +4093,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 sdiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 sdivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 sdivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 0.50 sdot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 0.50 sdot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 4 1.00 sdot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 sdot z0.d, z1.h, z31.h
# CHECK-NEXT: 1 3 0.50 sdot z0.s, z1.b, z31.b
# CHECK-NEXT: 1 3 0.50 sdot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 sel z23.b, p11, z13.b, z8.b
@@ -4569,8 +4569,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 udiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 udivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 udivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 0.50 udot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 0.50 udot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 4 1.00 udot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 udot z0.d, z1.h, z31.h
# CHECK-NEXT: 1 3 0.50 udot z0.s, z1.b, z31.b
# CHECK-NEXT: 1 3 0.50 udot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 umax z0.b, z0.b, #0
@@ -4839,7 +4839,7 @@ zip2 z31.s, z31.s, z31.s
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10]
-# CHECK-NEXT: - - - - 88.67 500.67 500.67 797.50 2.50 92.50 92.50 1250.00 923.00 178.50 181.50
+# CHECK-NEXT: - - - - 88.67 500.67 500.67 797.50 2.50 92.50 92.50 1252.00 921.00 178.50 181.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0.0] [0.1] [1.0] [1.1] [2] [3.0] [3.1] [4] [5] [6.0] [6.1] [7] [8] [9] [10] Instructions:
@@ -6521,8 +6521,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - sdivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.d, z1.h, z31.h
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - sdot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - sdot z0.d, z1.h, z31.h
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.s, z1.b, z31.b
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sdot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - sel z23.b, p11, z13.b, z8.b
@@ -6997,8 +6997,8 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: - - - - - - - - - - - 7.00 - - - udivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.d, z1.h, z31.h
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - udot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - udot z0.d, z1.h, z31.h
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.s, z1.b, z31.b
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - udot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: - - - - - - - - - - - 0.50 0.50 - - umax z0.b, z0.b, #0
diff --git a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
index c33cc79bd6c1..98b8619f2e04 100644
--- a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-avx1.s
@@ -1564,30 +1564,30 @@ vzeroupper
# CHECK-NEXT: 2 6 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 7 1.00 * vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 7 1.00 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 6 0.50 * vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubd %xmm0, %xmm1, %xmm2
@@ -1736,7 +1736,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 257.00 215.25 235.25 176.17 176.17 38.00 432.25 2.25 12.67
+# CHECK-NEXT: - 257.00 215.25 235.25 176.17 176.17 38.00 424.25 2.25 12.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -2274,30 +2274,30 @@ vzeroupper
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubd %xmm0, %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
index 5bed312b0fe1..e76d90521afa 100644
--- a/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/Broadwell/resources-sse2.s
@@ -596,30 +596,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 2 6 1.00 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * pslld (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psllq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psllq (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psllw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psllw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrad $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrad (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psraw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psraw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrld (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psrlq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrlq (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrlw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
-# CHECK-NEXT: 3 7 1.00 * psrlw (%rax), %xmm2
+# CHECK-NEXT: 2 7 1.00 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 6 0.50 * psubb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubd %xmm0, %xmm2
@@ -689,7 +689,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 78.00 70.75 95.75 63.17 63.17 14.00 127.25 2.25 4.67
+# CHECK-NEXT: - 78.00 70.75 95.75 63.17 63.17 14.00 119.25 2.25 4.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -882,30 +882,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - pslld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - pslld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - pslld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - pslldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrad $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrad %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrad (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psraw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psraw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psraw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - psrldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - psubb (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubd %xmm0, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
index 3da547de54e3..376070d7f4e0 100644
--- a/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/Haswell/resources-avx1.s
@@ -1564,30 +1564,30 @@ vzeroupper
# CHECK-NEXT: 2 7 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 1.00 vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: 3 8 1.00 * vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: 2 8 1.00 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsubd %xmm0, %xmm1, %xmm2
@@ -1736,7 +1736,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 336.00 214.58 236.58 176.17 176.17 38.00 435.58 2.25 12.67
+# CHECK-NEXT: - 336.00 214.58 236.58 176.17 176.17 38.00 427.58 2.25 12.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -2274,30 +2274,30 @@ vzeroupper
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpslld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpslld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpslld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsllw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsllw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsllw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrad $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrad %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrad (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsraw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsraw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsraw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrld $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrld %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrld (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlq $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlq %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlq (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - vpsrlw $1, %xmm0, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - vpsrlw %xmm0, %xmm1, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - vpsrlw (%rax), %xmm1, %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - vpsubb (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - vpsubd %xmm0, %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
index 3813ef4707a8..3b4aeb37968f 100644
--- a/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/Haswell/resources-sse2.s
@@ -596,30 +596,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 2 7 1.00 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * pslld (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psllq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psllq (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psllw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psllw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrad $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrad (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psraw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psraw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrld $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrld (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 1.00 psrlq $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrlq (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 1.00 psrlw $1, %xmm2
# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
-# CHECK-NEXT: 3 8 1.00 * psrlw (%rax), %xmm2
+# CHECK-NEXT: 2 8 1.00 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psubb (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psubd %xmm0, %xmm2
@@ -689,7 +689,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9]
-# CHECK-NEXT: - 112.00 70.75 95.75 63.17 63.17 14.00 127.25 2.25 4.67
+# CHECK-NEXT: - 112.00 70.75 95.75 63.17 63.17 14.00 119.25 2.25 4.67
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] Instructions:
@@ -882,30 +882,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - - 0.50 0.50 - 1.00 - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - pslld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - pslld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - pslld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - pslldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psllw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psllw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psllw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrad $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrad %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrad (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psraw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psraw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psraw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrld $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrld %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrld (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - - - - - 1.00 - - psrldq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlq $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlq %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlq (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 1.00 - - - - - - - psrlw $1, %xmm2
# CHECK-NEXT: - - 1.00 - - - - 1.00 - - psrlw %xmm0, %xmm2
-# CHECK-NEXT: - - 1.00 - 0.50 0.50 - 1.00 - - psrlw (%rax), %xmm2
+# CHECK-NEXT: - - 1.00 - 0.50 0.50 - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - psubb (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - psubd %xmm0, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
index f184d5579d06..bd7a4894b45d 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx1.s
@@ -1563,30 +1563,30 @@ vzeroupper
# CHECK-NEXT: 1 1 0.50 vpsignw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpslld $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpslld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpslld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsllq $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsllq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsllq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsllw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrad $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrad %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrad %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsraw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrld $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrlq $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrlq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrlq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.50 vpsrlw $1, %xmm0, %xmm2
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: 1 1 0.33 vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: 2 7 0.50 * vpsubb (%rax), %xmm1, %xmm2
@@ -1738,7 +1738,7 @@ vzeroupper
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - 126.00 322.92 233.92 160.50 160.50 19.00 295.92 6.25 19.00 19.00 19.00
+# CHECK-NEXT: - 126.00 322.92 237.92 160.50 160.50 19.00 291.92 6.25 19.00 19.00 19.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -2275,30 +2275,30 @@ vzeroupper
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsignw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsignw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpslld $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpslld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpslld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpslld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - vpslldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllq $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrad $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrad %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrad %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrad (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsraw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrld $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrld %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrld %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrld (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - vpsrldq $1, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlq $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlq %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlq %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlq (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlw $1, %xmm0, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm0, %xmm1, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm1, %xmm2
# CHECK-NEXT: - - 0.33 0.33 - - - 0.33 - - - - vpsubb %xmm0, %xmm1, %xmm2
# CHECK-NEXT: - - 0.33 0.33 0.50 0.50 - 0.33 - - - - vpsubb (%rax), %xmm1, %xmm2
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
index a0715cef8b00..ee095f0d3bfc 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-avx512bwvl.s
@@ -1768,11 +1768,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsllw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsllw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsllw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsllw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsllw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsllw (%rax), %ymm17, %ymm19
@@ -1804,11 +1804,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsraw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsraw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsraw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsraw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsraw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsraw (%rax), %ymm17, %ymm19
@@ -1844,11 +1844,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: 2 8 0.50 * vpsrlw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: 1 1 0.50 vpsrlw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: 2 8 0.50 * vpsrlw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: 2 2 1.00 vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: 2 2 0.67 vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 7 0.50 * vpsrlw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: 2 4 1.00 vpsrlw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: 2 8 0.50 * vpsrlw (%rax), %ymm17, %ymm19
@@ -2025,7 +2025,7 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - - 255.33 303.33 216.00 216.00 10.00 451.33 - 10.00 10.00 10.00
+# CHECK-NEXT: - - 255.33 307.83 216.00 216.00 10.00 446.83 - 10.00 10.00 10.00
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -2705,11 +2705,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsllw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsllw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsllw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsllw (%rax), %ymm17, %ymm19
@@ -2741,11 +2741,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsraw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsraw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsraw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsraw (%rax), %ymm17, %ymm19
@@ -2781,11 +2781,11 @@ vpunpcklwd (%rax), %ymm17, %ymm19 {z}{k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw $0, (%rax), %ymm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - vpsrlw $0, %ymm16, %ymm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw $0, (%rax), %ymm19 {%k1} {z}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19 {%k1}
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - vpsrlw %xmm16, %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %xmm17, %xmm19 {%k1} {z}
# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - vpsrlw %xmm16, %ymm17, %ymm19
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - vpsrlw (%rax), %ymm17, %ymm19
diff --git a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
index a4a51280b8ca..a1bf6c1e9862 100644
--- a/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
+++ b/llvm/test/tools/llvm-mca/X86/IceLakeServer/resources-sse2.s
@@ -595,30 +595,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pshuflw $1, %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pslld $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 pslld %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 pslld %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * pslld (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 pslldq $1, %xmm2
# CHECK-NEXT: 1 1 0.50 psllq $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psllq %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psllq %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psllq (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psllw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psllw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psllw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psllw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrad $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrad %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrad %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrad (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psraw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psraw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psraw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psraw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrld $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrld %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrld %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrld (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrldq $1, %xmm2
# CHECK-NEXT: 1 1 0.50 psrlq $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrlq %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrlq %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrlq (%rax), %xmm2
# CHECK-NEXT: 1 1 0.50 psrlw $1, %xmm2
-# CHECK-NEXT: 2 2 1.00 psrlw %xmm0, %xmm2
+# CHECK-NEXT: 2 2 0.67 psrlw %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psrlw (%rax), %xmm2
# CHECK-NEXT: 1 1 0.33 psubb %xmm0, %xmm2
# CHECK-NEXT: 2 7 0.50 * psubb (%rax), %xmm2
@@ -691,7 +691,7 @@ xorpd (%rax), %xmm2
# CHECK: Resource pressure per iteration:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
-# CHECK-NEXT: - 40.00 106.92 98.92 58.50 58.50 7.50 76.42 1.75 8.00 8.00 7.50
+# CHECK-NEXT: - 40.00 106.92 102.92 58.50 58.50 7.50 72.42 1.75 8.00 8.00 7.50
# CHECK: Resource pressure by instruction:
# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] Instructions:
@@ -883,30 +883,30 @@ xorpd (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - pshuflw $1, %xmm0, %xmm2
# CHECK-NEXT: - - - 0.50 0.50 0.50 - 0.50 - - - - pshuflw $1, (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - pslld $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - pslld %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - pslld %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - pslld (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - pslldq $1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psllq $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psllq %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psllq %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psllq (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psllw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psllw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psllw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psllw (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrad $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrad %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrad %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrad (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psraw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psraw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psraw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psraw (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrld $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrld %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrld %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrld (%rax), %xmm2
# CHECK-NEXT: - - - 0.50 - - - 0.50 - - - - psrldq $1, %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrlq $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrlq %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrlq %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrlq (%rax), %xmm2
# CHECK-NEXT: - - 0.50 0.50 - - - - - - - - psrlw $1, %xmm2
-# CHECK-NEXT: - - 0.50 0.50 - - - 1.00 - - - - psrlw %xmm0, %xmm2
+# CHECK-NEXT: - - 0.50 1.00 - - - 0.50 - - - - psrlw %xmm0, %xmm2
# CHECK-NEXT: - - 0.50 0.50 0.50 0.50 - - - - - - psrlw (%rax), %xmm2
# CHECK-NEXT: - - 0.33 0.33 - - - 0.33 - - - - psubb %xmm0, %xmm2
# CHECK-NEXT: - - 0.33 0.33 0.50 0.50 - 0.33 - - - - psubb (%rax), %xmm2
diff --git a/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test b/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test
new file mode 100644
index 000000000000..0f3ab808482b
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/ELF/skip-symbol.test
@@ -0,0 +1,100 @@
+## This test checks the functionality of options --skip-symbol and --skip-symbols.
+# RUN: yaml2obj %s -o %t.o
+# RUN: echo 'foo[2-3]' > %t.skip.regex
+
+## Check --skip-symbol functionality when changing symbol bindings.
+# RUN: llvm-objcopy %t.o %t2.o --localize-hidden --skip-symbol=foo3
+# RUN: llvm-readelf -s %t2.o | FileCheck %s --check-prefix=LH-SYM
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo1
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo2
+# LH-SYM-DAG: GLOBAL HIDDEN 1 foo3
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo4
+# LH-SYM-DAG: LOCAL HIDDEN 1 foo5
+
+## Check --skip-symbols functionality when changing symbol bindings.
+# RUN: llvm-objcopy %t.o %t1.o --localize-hidden --skip-symbols=%t.skip.regex --regex
+# RUN: llvm-readelf -s %t1.o | FileCheck %s --check-prefix=LH-SYMS
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo1
+# LH-SYMS-DAG: GLOBAL HIDDEN 1 foo2
+# LH-SYMS-DAG: GLOBAL HIDDEN 1 foo3
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo4
+# LH-SYMS-DAG: LOCAL HIDDEN 1 foo5
+
+## Check --skip-symbol functionality when changing symbol names.
+# RUN: echo -e "foo1 bar1\nfoo2 bar2" > %t.renames.list
+# RUN: llvm-objcopy %t.o %t4.o --redefine-syms=%t.renames.list \
+# RUN: --skip-symbol='fo*' --wildcard
+# RUN: llvm-readelf -s %t4.o | FileCheck %s --check-prefix=RS-SYM
+# RS-SYM-DAG: foo1
+# RS-SYM-DAG: foo2
+# RS-SYM-DAG: foo3
+# RS-SYM-DAG: foo4
+# RS-SYM-DAG: foo5
+
+## Check --skip-symbols functionality when changing symbol names.
+# RUN: llvm-objcopy %t.o %t3.o --redefine-syms=%t.renames.list \
+# RUN: --skip-symbols=%t.skip.regex --regex
+# RUN: llvm-readelf -s %t3.o | FileCheck %s --check-prefix=RS-SYMS
+# RS-SYMS-DAG: bar1
+# RS-SYMS-DAG: foo2
+# RS-SYMS-DAG: foo3
+# RS-SYMS-DAG: foo4
+# RS-SYMS-DAG: foo5
+
+## Check the functionality when using skip options multiple times.
+# RUN: echo "foo3" > %t.symbol0.list
+# RUN: echo "foo4" > %t.symbol1.list
+# RUN: llvm-objcopy %t.o %t5.o --set-symbol-visibility='foo*'=internal --wildcard \
+# RUN: --skip-symbol=foo1 --skip-symbol=foo2 \
+# RUN: --skip-symbols=%t.symbol0.list --skip-symbols=%t.symbol1.list
+# RUN: llvm-readelf -s %t5.o | FileCheck %s --check-prefix=BOTH
+# BOTH-DAG: GLOBAL HIDDEN 1 foo1
+# BOTH-DAG: GLOBAL HIDDEN 1 foo2
+# BOTH-DAG: GLOBAL HIDDEN 1 foo3
+# BOTH-DAG: GLOBAL HIDDEN 1 foo4
+## Only foo5 is not skipped.
+# BOTH-DAG: GLOBAL INTERNAL 1 foo5
+
+## Check that using an invalid symbol name regex generates an error.
+# RUN: echo '*.' > %t.symbols.regex
+# RUN: not llvm-objcopy %t.o --skip-symbols=%t.symbols.regex --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# RUN: not llvm-objcopy %t.o --skip-symbol='*.' --regex 2>&1 | \
+# RUN: FileCheck %s --check-prefix=SYMBOL
+# SYMBOL: error: cannot compile regular expression '*.': repetition-operator operand invalid
+
+## Check passing an invalid filename generates an error.
+# RUN: not llvm-objcopy %t.o --skip-symbols=no_file 2>&1 | \
+# RUN: FileCheck %s --check-prefix=FILE -DMSG=%errc_ENOENT
+# FILE: error: 'no_file': [[MSG]]
+
+!ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_X86_64
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+Symbols:
+ - Name: foo1
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo2
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo3
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo4
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
+ - Name: foo5
+ Section: .text
+ Binding: STB_GLOBAL
+ Other: [ STV_HIDDEN ]
diff --git a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
index 729128281104..cf95b5170026 100644
--- a/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
+++ b/llvm/test/tools/llvm-readobj/ELF/reloc-types-aarch64.test
@@ -119,6 +119,7 @@
# CHECK: Type: R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC (571)
# CHECK: Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12 (572)
# CHECK: Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC (573)
+# CHECK: Type: R_AARCH64_AUTH_ABS64 (580)
# CHECK: Type: R_AARCH64_COPY (1024)
# CHECK: Type: R_AARCH64_GLOB_DAT (1025)
# CHECK: Type: R_AARCH64_JUMP_SLOT (1026)
@@ -128,6 +129,7 @@
# CHECK: Type: R_AARCH64_TLS_TPREL64 (1030)
# CHECK: Type: R_AARCH64_TLSDESC (1031)
# CHECK: Type: R_AARCH64_IRELATIVE (1032)
+# CHECK: Type: R_AARCH64_AUTH_RELATIVE (1041)
--- !ELF
FileHeader:
@@ -254,6 +256,7 @@ Sections:
- Type: R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC
- Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12
- Type: R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC
+ - Type: R_AARCH64_AUTH_ABS64
- Type: R_AARCH64_COPY
- Type: R_AARCH64_GLOB_DAT
- Type: R_AARCH64_JUMP_SLOT
@@ -263,3 +266,4 @@ Sections:
- Type: R_AARCH64_TLS_TPREL64
- Type: R_AARCH64_TLSDESC
- Type: R_AARCH64_IRELATIVE
+ - Type: R_AARCH64_AUTH_RELATIVE
diff --git a/llvm/tools/CMakeLists.txt b/llvm/tools/CMakeLists.txt
index c6116ac81d12..cde57367934e 100644
--- a/llvm/tools/CMakeLists.txt
+++ b/llvm/tools/CMakeLists.txt
@@ -37,12 +37,13 @@ add_llvm_tool_subdirectory(llvm-profdata)
# Projects supported via LLVM_EXTERNAL_*_SOURCE_DIR need to be explicitly
# specified.
-add_llvm_external_project(clang)
add_llvm_external_project(lld)
-add_llvm_external_project(lldb)
add_llvm_external_project(mlir)
-# Flang depends on mlir, so place it afterward
+# ClangIR and Flang depend on mlir, lldb and Flang depend on clang, sort them
+# accordingly so place them afterwards
+add_llvm_external_project(clang)
add_llvm_external_project(flang)
+add_llvm_external_project(lldb)
add_llvm_external_project(bolt)
# Automatically add remaining sub-directories containing a 'CMakeLists.txt'
diff --git a/llvm/tools/llvm-c-test/debuginfo.c b/llvm/tools/llvm-c-test/debuginfo.c
index 78ccaf12a380..9b5c37b05d90 100644
--- a/llvm/tools/llvm-c-test/debuginfo.c
+++ b/llvm/tools/llvm-c-test/debuginfo.c
@@ -136,12 +136,13 @@ int llvm_test_dibuilder(bool NewDebugInfoFormat) {
LLVMMetadataRef FooParamVar1 =
LLVMDIBuilderCreateParameterVariable(DIB, FunctionMetadata, "a", 1, 1, File,
42, Int64Ty, true, 0);
+
if (LLVMIsNewDbgInfoFormat(M))
- LLVMDIBuilderInsertDeclareRecordAtEnd(
+ LLVMDIBuilderInsertDeclareAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar1,
FooParamExpression, FooParamLocation, FooEntryBlock);
else
- LLVMDIBuilderInsertDeclareAtEnd(
+ LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar1,
FooParamExpression, FooParamLocation, FooEntryBlock);
LLVMMetadataRef FooParamVar2 =
@@ -149,11 +150,11 @@ int llvm_test_dibuilder(bool NewDebugInfoFormat) {
42, Int64Ty, true, 0);
if (LLVMIsNewDbgInfoFormat(M))
- LLVMDIBuilderInsertDeclareRecordAtEnd(
+ LLVMDIBuilderInsertDeclareAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar2,
FooParamExpression, FooParamLocation, FooEntryBlock);
else
- LLVMDIBuilderInsertDeclareAtEnd(
+ LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar2,
FooParamExpression, FooParamLocation, FooEntryBlock);
@@ -161,11 +162,11 @@ int llvm_test_dibuilder(bool NewDebugInfoFormat) {
LLVMDIBuilderCreateParameterVariable(DIB, FunctionMetadata, "c", 1, 3, File,
42, VectorTy, true, 0);
if (LLVMIsNewDbgInfoFormat(M))
- LLVMDIBuilderInsertDeclareRecordAtEnd(
+ LLVMDIBuilderInsertDeclareAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar3,
FooParamExpression, FooParamLocation, FooEntryBlock);
else
- LLVMDIBuilderInsertDeclareAtEnd(
+ LLVMDIBuilderInsertDeclareIntrinsicAtEnd(
DIB, LLVMConstInt(LLVMInt64Type(), 0, false), FooParamVar3,
FooParamExpression, FooParamLocation, FooEntryBlock);
diff --git a/llvm/tools/llvm-debuginfo-analyzer/README.md b/llvm/tools/llvm-debuginfo-analyzer/README.md
new file mode 100644
index 000000000000..28c4be6f7322
--- /dev/null
+++ b/llvm/tools/llvm-debuginfo-analyzer/README.md
@@ -0,0 +1,170 @@
+# `llvm-debuginfo-analyzer`
+
+These are the notes collected during the development, review and test.
+They describe limitations, known issues and future work.
+
+### Remove the use of macros in ``LVReader.h`` that describe the ``bumpallocators``.
+**[D137933](https://reviews.llvm.org/D137933#inline-1389904)**
+
+Use a standard (or LLVM) ``map`` with ``typeinfo`` (would need a specialization
+to expose equality and hasher) for the allocators and the creation
+functions could be a function template.
+
+### Use a **lit test** instead of a **unit test** for the **logical readers**.
+**[D125783](https://reviews.llvm.org/D125783#inline-1324376)**
+
+As the ``DebugInfoLogicalView`` library is sufficiently exposed via the
+``llvm-debuginfo-analyzer`` tool, follow the LLVM general approach and
+use ``lit`` tests to validate the **logical readers**.
+
+Convert the ``unitests``:
+```
+llvm-project/llvm/unittests/DebugInfo/LogicalView/CodeViewReaderTest.cpp
+llvm-project/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp
+```
+into ``lit`` tests:
+```
+llvm-project/llvm/test/DebugInfo/LogicalView/CodeViewReader.test
+llvm-project/llvm/test/DebugInfo/LogicalView/DWARFReader.test
+```
+
+### Eliminate calls to ``getInputFileDirectory()`` in the ``unittests``.
+**[D125783](https://reviews.llvm.org/D125783#inline-1324359)**
+
+Rewrite the unittests ``ReaderTest`` and ``CodeViewReaderTest`` to eliminate
+the call:
+```
+ getInputFileDirectory()
+```
+as use of that call is discouraged.
+
+### Fix mismatch between ``%d/%x`` format strings and ``uint64_t`` type.
+**[D137400](https://reviews.llvm.org/D137400) / [58758](https://github.com/llvm/llvm-project/issues/58758)**
+
+Incorrect printing of ``uint64_t`` on ``32-bit`` platforms.
+Add the ``PRIx64`` specifier to the printing code (``format()``).
+
+### Remove ``LVScope::Children`` container.
+**[D137933](https://reviews.llvm.org/D137933#inline-1373902)**
+
+Use a **chaining iterator** over the other containers rather than keep a
+separate container ``Children`` that mirrors their contents.
+
+### Use ``TableGen`` for command line options.
+**[D125777](https://reviews.llvm.org/D125777#inline-1291801)**
+
+The current trend is to use ``TableGen`` for command-line options in tools.
+Change command line options to use ``tablegen`` as many other LLVM tools.
+
+### ``LVDoubleMap`` to return ``optional<ValueType>`` instead of ``null pointer``.
+**[D125783](https://reviews.llvm.org/D125783#inline-1294164)**
+
+The more idiomatic LLVM way to handle this would be to have ``find``
+return ``Optional<ValueType>``.
+
+### Pass references instead of pointers (**Comparison functions**).
+**[D125782](https://reviews.llvm.org/D125782#inline-1293920)**
+
+In the **comparison functions**, pass references instead of pointers (when
+pointers cannot be null).
+
+### Use ``StringMap`` where possible.
+**[D125783](https://reviews.llvm.org/D125783#inline-1294211)**
+
+LLVM has a ``StringMap`` class that is advertised as more efficient than
+``std::map<std::string, ValueType>``. Mainly it does fewer allocations
+because the key is not a ``std::string``.
+
+Replace the use of ``std::map<std::string, ValueType>`` with ``StringMap``.
+One specific case is the ``LVSymbolNames`` definitions.
+
+### Calculate unique offset for CodeView elements.
+In order to have the same logical functionality as the DWARF reader, such
+as:
+
+* find scopes contribution to debug info
+* sort by its physical location
+
+The logical elements must have an unique offset (similar like the DWARF
+``DIE`` offset).
+
+### Move ``initializeFileAndStringTables`` to the CodeView Library.
+There is some code in the CodeView reader that was extracted/adapted
+from ``tools/llvm-readobj/COFFDumper.cpp`` that can be moved to the CodeView
+library.
+
+We had a similar case with code shared with ``llvm-pdbutil`` that was moved
+to the PDB library: **[D122226](https://reviews.llvm.org/D122226)**
+
+### Move ``getSymbolKindName`` and ``formatRegisterId`` to the CodeView Library.
+There is some code in the CodeView reader that was extracted/adapted
+from ``lib/DebugInfo/CodeView/SymbolDumper.cpp`` that can be used.
+
+### Use of ``std::unordered_set`` instead of ``std::set``.
+**[D125784](https://reviews.llvm.org/D125784#inline-1221421)**
+
+Replace the ``std::set`` usage for ``DeducedScopes``, ``UnresolvedScopes`` and
+``IdentifiedNamespaces`` with ``std::unordered_set`` and get the benefit
+of the O(1) while inserting/searching, as the order is not important.
+
+### Optimize ``LVNamespaceDeduction::find`` funtion.
+**[D125784](https://reviews.llvm.org/D125784#inline-1296195)**
+
+Optimize the ``find`` method to use the proposed code:
+
+```
+ LVStringRefs::iterator Iter = std::find_if(Components.begin(), Components.end(),
+ [](StringRef Name) {
+ return IdentifiedNamespaces.find(Name) == IdentifiedNamespaces.end();
+ });
+ LVStringRefs::size_type FirstNonNamespace = std::distance(Components.begin(), Iter);
+```
+
+### Move all the printing support to a common module.
+Factor out printing functionality from the logical elements into a
+common module.
+
+### Refactor ``LVBinaryReader::processLines``.
+**[D125783](https://reviews.llvm.org/D125783#inline-1246155) /
+[D137156](https://reviews.llvm.org/D137156)**
+
+During the traversal of the debug information sections, we created the
+logical lines representing the **disassembled instructions** from the **text
+section** and the logical lines representing the **line records** from the
+**debug line** section. Using the ranges associated with the logical scopes,
+we will allocate those logical lines to their logical scopes.
+
+Consider the case when any of those lines become orphans, causing
+incorrect scope parent for disassembly or line records.
+
+### Add support for ``-ffunction-sections``.
+**[D125783](https://reviews.llvm.org/D125783#inline-1295012)**
+
+Only linked executables are handled. It does not support relocatable
+files compiled with ``-ffunction-sections``.
+
+### Add support for DWARF v5 `.debug_names` section / CodeView public symbols stream.
+**[D125783](https://reviews.llvm.org/D125783#inline-1294142)**
+
+The DWARF and CodeView readers use the public names information to create
+the instructions (``LVLineAssembler``). Instead of relying on DWARF section
+names (``.debug_pubnames``, ``.debug_names``) and CodeView public symbol stream
+(``S_PUB32``), the readers should collect the needed information while processing
+the debug information.
+
+If the object file supports the above section names and stream, use them
+to create the public names.
+
+### Add support for some extra DWARF locations.
+The following DWARF debug location operands are not supported:
+
+* `DW_OP_const_type`
+* `DW_OP_entry_value`
+* `DW_OP_implicit_value`
+
+### Add support for additional binary formats.
+* Extended COFF (`XCOFF`)
+
+### Add support for ``JSON`` or ``YAML``
+The logical view uses its own and non-standard free form text when
+displaying information on logical elements.
diff --git a/llvm/tools/llvm-debuginfo-analyzer/README.txt b/llvm/tools/llvm-debuginfo-analyzer/README.txt
deleted file mode 100644
index ce7569d27224..000000000000
--- a/llvm/tools/llvm-debuginfo-analyzer/README.txt
+++ /dev/null
@@ -1,221 +0,0 @@
-//===- llvm/tools/llvm-debuginfo-analyzer/README.txt ----------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains notes collected during the development, review and test.
-// It describes limitations, know issues and future work.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Remove the use of macros in 'LVReader.h' that describe the bumpallocators.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D137933#inline-1389904
-
-Use a standard (or LLVM) map with typeinfo (would need a specialization
-to expose equality and hasher) for the allocators and the creation
-functions could be a function template.
-
-//===----------------------------------------------------------------------===//
-// Use a lit test instead of a unit test for the logical readers.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1324376
-
-As the DebugInfoLogicalView library is sufficiently exposed via the
-llvm-debuginfo-analyzer tool, follow the LLVM general approach and
-use LIT tests to validate the logical readers.
-
-Convert the unitests:
- llvm-project/llvm/unittests/DebugInfo/LogicalView/CodeViewReaderTest.cpp
- llvm-project/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp
-
-into LIT tests:
- llvm-project/llvm/test/DebugInfo/LogicalView/CodeViewReader.test
- llvm-project/llvm/test/DebugInfo/LogicalView/DWARFReader.test
-
-//===----------------------------------------------------------------------===//
-// Eliminate calls to 'getInputFileDirectory()' in the unit tests.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1324359
-
-Rewrite the unittests 'LFReaderTest' and 'CodeViewReaderTest'to eliminate
-the call:
-
- getInputFileDirectory()
-
-as use of that call is discouraged.
-
-See: Use a lit test instead of a unit test for the logical readers.
-
-//===----------------------------------------------------------------------===//
-// Fix mismatch between %d/%x format strings and uint64_t type.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D137400
-https://github.com/llvm/llvm-project/issues/58758
-
-Incorrect printing of uint64_t on 32-bit platforms.
-Add the PRIx64 specifier to the printing code (format()).
-
-//===----------------------------------------------------------------------===//
-// Remove 'LVScope::Children' container.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D137933#inline-1373902
-
-Use a chaining iterator over the other containers rather than keep a
-separate container 'Children' that mirrors their contents.
-
-//===----------------------------------------------------------------------===//
-// Use TableGen for command line options.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125777#inline-1291801
-
-The current trend is to use TableGen for command-line options in tools.
-Change command line options to use tablegen as many other LLVM tools.
-
-//===----------------------------------------------------------------------===//
-// LVDoubleMap to return optional<ValueType> instead of null pointer.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1294164
-
-The more idiomatic LLVM way to handle this would be to have 'find '
-return Optional<ValueType>.
-
-//===----------------------------------------------------------------------===//
-// Pass references instead of pointers (Comparison functions).
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125782#inline-1293920
-
-In the comparison functions, pass references instead of pointers (when
-pointers cannot be null).
-
-//===----------------------------------------------------------------------===//
-// Use StringMap where possible.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1294211
-
-LLVM has a StringMap class that is advertised as more efficient than
-std::map<std::string, ValueType>. Mainly it does fewer allocations
-because the key is not a std::string.
-
-Replace the use of std::map<std::string, ValueType> with String Map.
-One specific case is the LVSymbolNames definitions.
-
-//===----------------------------------------------------------------------===//
-// Calculate unique offset for CodeView elements.
-//===----------------------------------------------------------------------===//
-In order to have the same logical functionality as the ELF Reader, such
-as:
-
-- find scopes contribution to debug info
-- sort by its physical location
-
-The logical elements must have an unique offset (similar like the DWARF
-DIE offset).
-
-//===----------------------------------------------------------------------===//
-// Move 'initializeFileAndStringTables' to the COFF Library.
-//===----------------------------------------------------------------------===//
-There is some code in the CodeView reader that was extracted/adapted
-from 'tools/llvm-readobj/COFFDumper.cpp' that can be moved to the COFF
-library.
-
-We had a similar case with code shared with llvm-pdbutil that was moved
-to the PDB library: https://reviews.llvm.org/D122226
-
-//===----------------------------------------------------------------------===//
-// Move 'getSymbolKindName'/'formatRegisterId' to the CodeView Library.
-//===----------------------------------------------------------------------===//
-There is some code in the CodeView reader that was extracted/adapted
-from 'lib/DebugInfo/CodeView/SymbolDumper.cpp' that can be used.
-
-//===----------------------------------------------------------------------===//
-// Use of std::unordered_set instead of std::set.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125784#inline-1221421
-
-Replace the std::set usage for DeducedScopes, UnresolvedScopes and
-IdentifiedNamespaces with std::unordered_set and get the benefit
-of the O(1) while inserting/searching, as the order is not important.
-
-//===----------------------------------------------------------------------===//
-// Optimize 'LVNamespaceDeduction::find' funtion.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125784#inline-1296195
-
-Optimize the 'find' method to use the proposed code:
-
- LVStringRefs::iterator Iter = std::find_if(Components.begin(), Components.end(),
- [](StringRef Name) {
- return IdentifiedNamespaces.find(Name) == IdentifiedNamespaces.end();
- });
- LVStringRefs::size_type FirstNonNamespace = std::distance(Components.begin(), Iter);
-
-//===----------------------------------------------------------------------===//
-// Move all the printing support to a common module.
-//===----------------------------------------------------------------------===//
-Factor out printing functionality from the logical elements into a
-common module.
-
-//===----------------------------------------------------------------------===//
-// Refactor 'LVBinaryReader::processLines'.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1246155
-https://reviews.llvm.org/D137156
-
-During the traversal of the debug information sections, we created the
-logical lines representing the disassembled instructions from the text
-section and the logical lines representing the line records from the
-debug line section. Using the ranges associated with the logical scopes,
-we will allocate those logical lines to their logical scopes.
-
-Consider the case when any of those lines become orphans, causing
-incorrect scope parent for disassembly or line records.
-
-//===----------------------------------------------------------------------===//
-// Add support for '-ffunction-sections'.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1295012
-
-Only linked executables are handled. It does not support relocatable
-files compiled with -ffunction-sections.
-
-//===----------------------------------------------------------------------===//
-// Add support for DWARF v5 .debug_names section.
-// Add support for CodeView public symbols stream.
-//===----------------------------------------------------------------------===//
-https://reviews.llvm.org/D125783#inline-1294142
-
-The ELF and CodeView readers use the public names information to create
-the instructions (LVLineAssembler). Instead of relying on DWARF section
-names (.debug_pubnames, .debug_names) and CodeView public symbol stream
-(S_PUB32), the readers collects the needed information while processing
-the debug information.
-
-If the object file supports the above section names and stream, use them
-to create the public names.
-
-//===----------------------------------------------------------------------===//
-// Add support for some extra DWARF locations.
-//===----------------------------------------------------------------------===//
-The following DWARF debug location operands are not supported:
-
-- DW_OP_const_type
-- DW_OP_entry_value
-- DW_OP_implicit_value
-
-//===----------------------------------------------------------------------===//
-// Add support for additional binary formats.
-//===----------------------------------------------------------------------===//
-- Extended COFF (XCOFF)
-
-//===----------------------------------------------------------------------===//
-// Add support for JSON or YAML.
-//===----------------------------------------------------------------------===//
-The logical view uses its own and non-standard free form text when
-displaying information on logical elements.
-
-//===----------------------------------------------------------------------===//
diff --git a/llvm/tools/llvm-dis/llvm-dis.cpp b/llvm/tools/llvm-dis/llvm-dis.cpp
index 8e443318dd7d..49154dc46c57 100644
--- a/llvm/tools/llvm-dis/llvm-dis.cpp
+++ b/llvm/tools/llvm-dis/llvm-dis.cpp
@@ -82,6 +82,8 @@ static cl::opt<bool> PrintThinLTOIndexOnly(
extern cl::opt<bool> WriteNewDbgInfoFormat;
+extern cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat;
+
namespace {
static void printDebugLoc(const DebugLoc &DL, formatted_raw_ostream &OS) {
@@ -169,6 +171,10 @@ int main(int argc, char **argv) {
cl::HideUnrelatedOptions({&DisCategory, &getColorCategory()});
cl::ParseCommandLineOptions(argc, argv, "llvm .bc -> .ll disassembler\n");
+ // Load bitcode into the new debug info format by default.
+ if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET)
+ LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE;
+
LLVMContext Context;
Context.setDiagnosticHandler(
std::make_unique<LLVMDisDiagnosticHandler>(argv[0]));
diff --git a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
index 5c9848f3c688..498308e2edbe 100644
--- a/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
+++ b/llvm/tools/llvm-exegesis/lib/BenchmarkRunner.cpp
@@ -278,59 +278,20 @@ private:
return FD;
}
- Error createSubProcessAndRunBenchmark(
- StringRef CounterName, SmallVectorImpl<int64_t> &CounterValues,
- ArrayRef<const char *> ValidationCounters,
- SmallVectorImpl<int64_t> &ValidationCounterValues) const {
- int PipeFiles[2];
- int PipeSuccessOrErr = socketpair(AF_UNIX, SOCK_DGRAM, 0, PipeFiles);
- if (PipeSuccessOrErr != 0) {
- return make_error<Failure>(
- "Failed to create a pipe for interprocess communication between "
- "llvm-exegesis and the benchmarking subprocess: " +
- Twine(strerror(errno)));
- }
-
- SubprocessMemory SPMemory;
- Error MemoryInitError = SPMemory.initializeSubprocessMemory(getpid());
- if (MemoryInitError)
- return MemoryInitError;
-
- Error AddMemDefError =
- SPMemory.addMemoryDefinition(Key.MemoryValues, getpid());
- if (AddMemDefError)
- return AddMemDefError;
-
- pid_t ParentOrChildPID = fork();
-
- if (ParentOrChildPID == -1) {
- return make_error<Failure>("Failed to create child process: " +
- Twine(strerror(errno)));
- }
-
- if (ParentOrChildPID == 0) {
- // We are in the child process, close the write end of the pipe.
- close(PipeFiles[1]);
- // Unregister handlers, signal handling is now handled through ptrace in
- // the host process.
- sys::unregisterHandlers();
- prepareAndRunBenchmark(PipeFiles[0], Key);
- // The child process terminates in the above function, so we should never
- // get to this point.
- llvm_unreachable("Child process didn't exit when expected.");
- }
-
+ Error
+ runParentProcess(pid_t ChildPID, int WriteFD, StringRef CounterName,
+ SmallVectorImpl<int64_t> &CounterValues,
+ ArrayRef<const char *> ValidationCounters,
+ SmallVectorImpl<int64_t> &ValidationCounterValues) const {
const ExegesisTarget &ET = State.getExegesisTarget();
- auto CounterOrError = ET.createCounter(
- CounterName, State, ValidationCounters, ParentOrChildPID);
+ auto CounterOrError =
+ ET.createCounter(CounterName, State, ValidationCounters, ChildPID);
if (!CounterOrError)
return CounterOrError.takeError();
pfm::CounterGroup *Counter = CounterOrError.get().get();
- close(PipeFiles[0]);
-
// Make sure to attach to the process (and wait for the sigstop to be
// delivered and for the process to continue) before we write to the counter
// file descriptor. Attaching to the process before writing to the socket
@@ -338,30 +299,30 @@ private:
// attach afterwards, the subprocess might exit before we get to the attach
// call due to effects like scheduler contention, introducing transient
// failures.
- if (ptrace(PTRACE_ATTACH, ParentOrChildPID, NULL, NULL) != 0)
+ if (ptrace(PTRACE_ATTACH, ChildPID, NULL, NULL) != 0)
return make_error<Failure>("Failed to attach to the child process: " +
Twine(strerror(errno)));
- if (wait(NULL) == -1) {
+ if (waitpid(ChildPID, NULL, 0) == -1) {
return make_error<Failure>(
"Failed to wait for child process to stop after attaching: " +
Twine(strerror(errno)));
}
- if (ptrace(PTRACE_CONT, ParentOrChildPID, NULL, NULL) != 0)
+ if (ptrace(PTRACE_CONT, ChildPID, NULL, NULL) != 0)
return make_error<Failure>(
"Failed to continue execution of the child process: " +
Twine(strerror(errno)));
int CounterFileDescriptor = Counter->getFileDescriptor();
Error SendError =
- sendFileDescriptorThroughSocket(PipeFiles[1], CounterFileDescriptor);
+ sendFileDescriptorThroughSocket(WriteFD, CounterFileDescriptor);
if (SendError)
return SendError;
int ChildStatus;
- if (wait(&ChildStatus) == -1) {
+ if (waitpid(ChildPID, &ChildStatus, 0) == -1) {
return make_error<Failure>(
"Waiting for the child process to complete failed: " +
Twine(strerror(errno)));
@@ -395,12 +356,25 @@ private:
// An error was encountered running the snippet, process it
siginfo_t ChildSignalInfo;
- if (ptrace(PTRACE_GETSIGINFO, ParentOrChildPID, NULL, &ChildSignalInfo) ==
- -1) {
+ if (ptrace(PTRACE_GETSIGINFO, ChildPID, NULL, &ChildSignalInfo) == -1) {
return make_error<Failure>("Getting signal info from the child failed: " +
Twine(strerror(errno)));
}
+ // Send SIGKILL rather than SIGTERM as the child process has no SIGTERM
+ // handlers to run, and calling SIGTERM would mean that ptrace will force
+ // it to block in the signal-delivery-stop for the SIGSEGV/other signals,
+ // and upon exit.
+ if (kill(ChildPID, SIGKILL) == -1)
+ return make_error<Failure>("Failed to kill child benchmarking proces: " +
+ Twine(strerror(errno)));
+
+ // Wait for the process to exit so that there are no zombie processes left
+ // around.
+ if (waitpid(ChildPID, NULL, 0) == -1)
+ return make_error<Failure>("Failed to wait for process to die: " +
+ Twine(strerror(errno)));
+
if (ChildSignalInfo.si_signo == SIGSEGV)
return make_error<SnippetSegmentationFault>(
reinterpret_cast<intptr_t>(ChildSignalInfo.si_addr));
@@ -408,6 +382,57 @@ private:
return make_error<SnippetSignal>(ChildSignalInfo.si_signo);
}
+ Error createSubProcessAndRunBenchmark(
+ StringRef CounterName, SmallVectorImpl<int64_t> &CounterValues,
+ ArrayRef<const char *> ValidationCounters,
+ SmallVectorImpl<int64_t> &ValidationCounterValues) const {
+ int PipeFiles[2];
+ int PipeSuccessOrErr = socketpair(AF_UNIX, SOCK_DGRAM, 0, PipeFiles);
+ if (PipeSuccessOrErr != 0) {
+ return make_error<Failure>(
+ "Failed to create a pipe for interprocess communication between "
+ "llvm-exegesis and the benchmarking subprocess: " +
+ Twine(strerror(errno)));
+ }
+
+ SubprocessMemory SPMemory;
+ Error MemoryInitError = SPMemory.initializeSubprocessMemory(getpid());
+ if (MemoryInitError)
+ return MemoryInitError;
+
+ Error AddMemDefError =
+ SPMemory.addMemoryDefinition(Key.MemoryValues, getpid());
+ if (AddMemDefError)
+ return AddMemDefError;
+
+ long ParentTID = SubprocessMemory::getCurrentTID();
+ pid_t ParentOrChildPID = fork();
+
+ if (ParentOrChildPID == -1) {
+ return make_error<Failure>("Failed to create child process: " +
+ Twine(strerror(errno)));
+ }
+
+ if (ParentOrChildPID == 0) {
+ // We are in the child process, close the write end of the pipe.
+ close(PipeFiles[1]);
+ // Unregister handlers, signal handling is now handled through ptrace in
+ // the host process.
+ sys::unregisterHandlers();
+ runChildSubprocess(PipeFiles[0], Key, ParentTID);
+ // The child process terminates in the above function, so we should never
+ // get to this point.
+ llvm_unreachable("Child process didn't exit when expected.");
+ }
+
+ // Close the read end of the pipe as we only need to write to the subprocess
+ // from the parent process.
+ close(PipeFiles[0]);
+ return runParentProcess(ParentOrChildPID, PipeFiles[1], CounterName,
+ CounterValues, ValidationCounters,
+ ValidationCounterValues);
+ }
+
void disableCoreDumps() const {
struct rlimit rlim;
@@ -415,8 +440,8 @@ private:
setrlimit(RLIMIT_CORE, &rlim);
}
- [[noreturn]] void prepareAndRunBenchmark(int Pipe,
- const BenchmarkKey &Key) const {
+ [[noreturn]] void runChildSubprocess(int Pipe, const BenchmarkKey &Key,
+ long ParentTID) const {
// Disable core dumps in the child process as otherwise everytime we
// encounter an execution failure like a segmentation fault, we will create
// a core dump. We report the information directly rather than require the
@@ -473,7 +498,7 @@ private:
Expected<int> AuxMemFDOrError =
SubprocessMemory::setupAuxiliaryMemoryInSubprocess(
- Key.MemoryValues, ParentPID, CounterFileDescriptor);
+ Key.MemoryValues, ParentPID, ParentTID, CounterFileDescriptor);
if (!AuxMemFDOrError)
exit(ChildProcessExitCodeE::AuxiliaryMemorySetupFailed);
diff --git a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
index a49fa077257d..0a947f6e206f 100644
--- a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
+++ b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
@@ -9,11 +9,13 @@
#include "SubprocessMemory.h"
#include "Error.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
#include <cerrno>
#ifdef __linux__
#include <fcntl.h>
#include <sys/mman.h>
+#include <sys/syscall.h>
#include <unistd.h>
#endif
@@ -22,12 +24,21 @@ namespace exegesis {
#if defined(__linux__) && !defined(__ANDROID__)
+long SubprocessMemory::getCurrentTID() {
+ // We're using the raw syscall here rather than the gettid() function provided
+ // by most libcs for compatibility as gettid() was only added to glibc in
+ // version 2.30.
+ return syscall(SYS_gettid);
+}
+
Error SubprocessMemory::initializeSubprocessMemory(pid_t ProcessID) {
// Add the PID to the shared memory name so that if we're running multiple
// processes at the same time, they won't interfere with each other.
// This comes up particularly often when running the exegesis tests with
- // llvm-lit
- std::string AuxiliaryMemoryName = "/auxmem" + std::to_string(ProcessID);
+ // llvm-lit. Additionally add the TID so that downstream consumers
+ // using multiple threads don't run into conflicts.
+ std::string AuxiliaryMemoryName =
+ formatv("/{0}auxmem{1}", getCurrentTID(), ProcessID);
int AuxiliaryMemoryFD = shm_open(AuxiliaryMemoryName.c_str(),
O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (AuxiliaryMemoryFD == -1)
@@ -47,11 +58,15 @@ Error SubprocessMemory::addMemoryDefinition(
pid_t ProcessPID) {
SharedMemoryNames.reserve(MemoryDefinitions.size());
for (auto &[Name, MemVal] : MemoryDefinitions) {
- std::string SharedMemoryName = "/" + std::to_string(ProcessPID) + "memdef" +
- std::to_string(MemVal.Index);
+ std::string SharedMemoryName =
+ formatv("/{0}t{1}memdef{2}", ProcessPID, getCurrentTID(), MemVal.Index);
SharedMemoryNames.push_back(SharedMemoryName);
int SharedMemoryFD =
shm_open(SharedMemoryName.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ if (SharedMemoryFD == -1)
+ return make_error<Failure>(
+ "Failed to create shared memory object for memory definition: " +
+ Twine(strerror(errno)));
if (ftruncate(SharedMemoryFD, MemVal.SizeBytes) != 0) {
return make_error<Failure>("Truncating a memory definiton failed: " +
Twine(strerror(errno)));
@@ -82,13 +97,15 @@ Error SubprocessMemory::addMemoryDefinition(
Expected<int> SubprocessMemory::setupAuxiliaryMemoryInSubprocess(
std::unordered_map<std::string, MemoryValue> MemoryDefinitions,
- pid_t ParentPID, int CounterFileDescriptor) {
- std::string AuxiliaryMemoryName = "/auxmem" + std::to_string(ParentPID);
+ pid_t ParentPID, long ParentTID, int CounterFileDescriptor) {
+ std::string AuxiliaryMemoryName =
+ formatv("/{0}auxmem{1}", ParentTID, ParentPID);
int AuxiliaryMemoryFileDescriptor =
shm_open(AuxiliaryMemoryName.c_str(), O_RDWR, S_IRUSR | S_IWUSR);
if (AuxiliaryMemoryFileDescriptor == -1)
return make_error<Failure>(
- "Getting file descriptor for auxiliary memory failed");
+ "Getting file descriptor for auxiliary memory failed: " +
+ Twine(strerror(errno)));
// set up memory value file descriptors
int *AuxiliaryMemoryMapping =
(int *)mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -97,8 +114,8 @@ Expected<int> SubprocessMemory::setupAuxiliaryMemoryInSubprocess(
return make_error<Failure>("Mapping auxiliary memory failed");
AuxiliaryMemoryMapping[0] = CounterFileDescriptor;
for (auto &[Name, MemVal] : MemoryDefinitions) {
- std::string MemoryValueName = "/" + std::to_string(ParentPID) + "memdef" +
- std::to_string(MemVal.Index);
+ std::string MemoryValueName =
+ formatv("/{0}t{1}memdef{2}", ParentPID, ParentTID, MemVal.Index);
AuxiliaryMemoryMapping[AuxiliaryMemoryOffset + MemVal.Index] =
shm_open(MemoryValueName.c_str(), O_RDWR, S_IRUSR | S_IWUSR);
if (AuxiliaryMemoryMapping[AuxiliaryMemoryOffset + MemVal.Index] == -1)
@@ -133,7 +150,7 @@ Error SubprocessMemory::addMemoryDefinition(
Expected<int> SubprocessMemory::setupAuxiliaryMemoryInSubprocess(
std::unordered_map<std::string, MemoryValue> MemoryDefinitions,
- pid_t ParentPID, int CounterFileDescriptor) {
+ pid_t ParentPID, long ParentTID, int CounterFileDescriptor) {
return make_error<Failure>(
"setupAuxiliaryMemoryInSubprocess is only supported on Linux");
}
diff --git a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.h b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.h
index e20b50cdc811..572d1085d9cf 100644
--- a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.h
+++ b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.h
@@ -35,6 +35,9 @@ public:
static constexpr const size_t AuxiliaryMemoryOffset = 1;
static constexpr const size_t AuxiliaryMemorySize = 4096;
+ // Gets the thread ID for the calling thread.
+ static long getCurrentTID();
+
Error initializeSubprocessMemory(pid_t ProcessID);
// The following function sets up memory definitions. It creates shared
@@ -54,7 +57,7 @@ public:
// section.
static Expected<int> setupAuxiliaryMemoryInSubprocess(
std::unordered_map<std::string, MemoryValue> MemoryDefinitions,
- pid_t ParentPID, int CounterFileDescriptor);
+ pid_t ParentPID, long ParentTID, int CounterFileDescriptor);
~SubprocessMemory();
diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp
index 9e7f2c3ebac4..9049cb5e8580 100644
--- a/llvm/tools/llvm-link/llvm-link.cpp
+++ b/llvm/tools/llvm-link/llvm-link.cpp
@@ -136,6 +136,8 @@ static cl::opt<bool> TryUseNewDbgInfoFormat(
extern cl::opt<bool> UseNewDbgInfoFormat;
+extern cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat;
+
static ExitOnError ExitOnErr;
// Read the specified bitcode file in and return it. This routine searches the
@@ -480,6 +482,10 @@ int main(int argc, char **argv) {
cl::HideUnrelatedOptions({&LinkCategory, &getColorCategory()});
cl::ParseCommandLineOptions(argc, argv, "llvm linker\n");
+ // Load bitcode into the new debug info format by default.
+ if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET)
+ LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE;
+
// RemoveDIs debug-info transition: tests may request that we /try/ to use the
// new debug-info format.
if (TryUseNewDbgInfoFormat) {
diff --git a/llvm/tools/llvm-lto/llvm-lto.cpp b/llvm/tools/llvm-lto/llvm-lto.cpp
index 7943d6952b82..3c452b650cee 100644
--- a/llvm/tools/llvm-lto/llvm-lto.cpp
+++ b/llvm/tools/llvm-lto/llvm-lto.cpp
@@ -270,6 +270,7 @@ static cl::opt<bool> TryUseNewDbgInfoFormat(
cl::init(false), cl::Hidden);
extern cl::opt<bool> UseNewDbgInfoFormat;
+extern cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat;
namespace {
@@ -943,6 +944,9 @@ int main(int argc, char **argv) {
InitLLVM X(argc, argv);
cl::HideUnrelatedOptions({&LTOCategory, &getColorCategory()});
cl::ParseCommandLineOptions(argc, argv, "llvm LTO linker\n");
+ // Load bitcode into the new debug info format by default.
+ if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET)
+ LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE;
// RemoveDIs debug-info transition: tests may request that we /try/ to use the
// new debug-info format.
diff --git a/llvm/tools/llvm-lto2/llvm-lto2.cpp b/llvm/tools/llvm-lto2/llvm-lto2.cpp
index d5de4f6b1a27..f222d02bd7ce 100644
--- a/llvm/tools/llvm-lto2/llvm-lto2.cpp
+++ b/llvm/tools/llvm-lto2/llvm-lto2.cpp
@@ -193,6 +193,7 @@ static cl::opt<bool> TryUseNewDbgInfoFormat(
cl::init(false), cl::Hidden);
extern cl::opt<bool> UseNewDbgInfoFormat;
+extern cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat;
static void check(Error E, std::string Msg) {
if (!E)
@@ -228,6 +229,9 @@ static int usage() {
static int run(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv, "Resolution-based LTO test harness");
+ // Load bitcode into the new debug info format by default.
+ if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET)
+ LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_TRUE;
// RemoveDIs debug-info transition: tests may request that we /try/ to use the
// new debug-info format.
diff --git a/llvm/tools/llvm-mc/llvm-mc.cpp b/llvm/tools/llvm-mc/llvm-mc.cpp
index 8eb53e440459..807071a7b9a1 100644
--- a/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -541,11 +541,6 @@ int main(int argc, char **argv) {
std::unique_ptr<MCAsmBackend> MAB(
TheTarget->createMCAsmBackend(*STI, *MRI, MCOptions));
auto FOut = std::make_unique<formatted_raw_ostream>(*OS);
- // FIXME: Workaround for bug in formatted_raw_ostream. Color escape codes
- // are (incorrectly) written directly to the unbuffered raw_ostream wrapped
- // by the formatted_raw_ostream.
- if (Action == AC_CDisassemble)
- FOut->SetUnbuffered();
Str.reset(
TheTarget->createAsmStreamer(Ctx, std::move(FOut), /*asmverbose*/ true,
/*useDwarfDirectory*/ true, IP,
diff --git a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
index a0c6415bf0e6..7269c51a08d6 100644
--- a/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
+++ b/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
@@ -978,6 +978,15 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
addSymbolsFromFile(Config.SymbolsToKeep, DC.Alloc, Arg->getValue(),
SymbolMatchStyle, ErrorCallback))
return std::move(E);
+ for (auto *Arg : InputArgs.filtered(OBJCOPY_skip_symbol))
+ if (Error E = Config.SymbolsToSkip.addMatcher(NameOrPattern::create(
+ Arg->getValue(), SymbolMatchStyle, ErrorCallback)))
+ return std::move(E);
+ for (auto *Arg : InputArgs.filtered(OBJCOPY_skip_symbols))
+ if (Error E =
+ addSymbolsFromFile(Config.SymbolsToSkip, DC.Alloc, Arg->getValue(),
+ SymbolMatchStyle, ErrorCallback))
+ return std::move(E);
for (auto *Arg : InputArgs.filtered(OBJCOPY_add_symbol)) {
Expected<NewSymbolInfo> SymInfo = parseNewSymbolInfo(Arg->getValue());
if (!SymInfo)
diff --git a/llvm/tools/llvm-objcopy/ObjcopyOpts.td b/llvm/tools/llvm-objcopy/ObjcopyOpts.td
index 3c0e5cd475a3..be02616e8c68 100644
--- a/llvm/tools/llvm-objcopy/ObjcopyOpts.td
+++ b/llvm/tools/llvm-objcopy/ObjcopyOpts.td
@@ -206,6 +206,20 @@ defm keep_symbols
"be repeated to read symbols from many files">,
MetaVarName<"filename">;
+defm skip_symbol : Eq<"skip-symbol", "Do not change parameters of symbol <symbol> "
+ "when executing other options that can change the symbol's "
+ "name, binding or visibility">,
+ MetaVarName<"symbol">;
+
+defm skip_symbols
+ : Eq<"skip-symbols",
+ "Read a list of symbols from <filename> and run as if "
+ "--skip-symbol=<symbol> is set for each one. <filename> "
+ "contains one symbol per line and may contain comments beginning with "
+ "'#'. Leading and trailing whitespace is stripped from each line. May "
+ "be repeated to read symbols from many files">,
+ MetaVarName<"filename">;
+
defm dump_section
: Eq<"dump-section",
"Dump contents of section named <section> into file <file>">,
diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 78cf67b1e630..9b65ea5a99e4 100644
--- a/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -2115,13 +2115,6 @@ disassembleObject(ObjectFile &Obj, const ObjectFile &DbgObj,
formatted_raw_ostream FOS(outs());
- // FIXME: Workaround for bug in formatted_raw_ostream. Color escape codes
- // are (incorrectly) written directly to the unbuffered raw_ostream
- // wrapped by the formatted_raw_ostream.
- if (DisassemblyColor == ColorOutput::Enable ||
- DisassemblyColor == ColorOutput::Auto)
- FOS.SetUnbuffered();
-
std::unordered_map<uint64_t, std::string> AllLabels;
std::unordered_map<uint64_t, std::vector<BBAddrMapLabel>> BBAddrMapLabels;
if (SymbolizeOperands) {
diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 3a7bd061d3d2..e8ee3c238194 100644
--- a/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -300,6 +300,13 @@ cl::opt<bool> DoWritePrevVersion(
cl::desc("Write the previous version of indexed format, to enable "
"some forward compatibility."));
+cl::opt<memprof::IndexedVersion> MemProfVersionRequested(
+ "memprof-version", cl::Hidden, cl::sub(MergeSubcommand),
+ cl::desc("Specify the version of the memprof format to use"),
+ cl::init(memprof::Version0),
+ cl::values(clEnumValN(memprof::Version0, "0", "version 0"),
+ clEnumValN(memprof::Version1, "1", "version 1")));
+
// Options specific to overlap subcommand.
cl::opt<std::string> BaseFilename(cl::Positional, cl::Required,
cl::desc("<base profile file>"),
@@ -588,7 +595,8 @@ struct WriterContext {
WriterContext(bool IsSparse, std::mutex &ErrLock,
SmallSet<instrprof_error, 4> &WriterErrorCodes,
uint64_t ReservoirSize = 0, uint64_t MaxTraceLength = 0)
- : Writer(IsSparse, ReservoirSize, MaxTraceLength, DoWritePrevVersion),
+ : Writer(IsSparse, ReservoirSize, MaxTraceLength, DoWritePrevVersion,
+ MemProfVersionRequested),
ErrLock(ErrLock), WriterErrorCodes(WriterErrorCodes) {}
};
diff --git a/llvm/tools/llvm-readobj/DwarfCFIEHPrinter.h b/llvm/tools/llvm-readobj/DwarfCFIEHPrinter.h
index 2e89463e68d5..94a44e3afccb 100644
--- a/llvm/tools/llvm-readobj/DwarfCFIEHPrinter.h
+++ b/llvm/tools/llvm-readobj/DwarfCFIEHPrinter.h
@@ -113,7 +113,7 @@ void PrinterContext<ELFT>::printEHFrameHdr(const Elf_Phdr *EHFramePHdr) const {
if (!Content)
reportError(Content.takeError(), ObjF.getFileName());
- DataExtractor DE(*Content, ELFT::TargetEndianness == llvm::endianness::little,
+ DataExtractor DE(*Content, ELFT::Endianness == llvm::endianness::little,
ELFT::Is64Bits ? 8 : 4);
DictScope D(W, "Header");
@@ -186,10 +186,9 @@ void PrinterContext<ELFT>::printEHFrame(const Elf_Shdr *EHFrameShdr) const {
// Construct DWARFDataExtractor to handle relocations ("PC Begin" fields).
std::unique_ptr<DWARFContext> DICtx = DWARFContext::create(
ObjF, DWARFContext::ProcessDebugRelocations::Process, nullptr);
- DWARFDataExtractor DE(DICtx->getDWARFObj(),
- DICtx->getDWARFObj().getEHFrameSection(),
- ELFT::TargetEndianness == llvm::endianness::little,
- ELFT::Is64Bits ? 8 : 4);
+ DWARFDataExtractor DE(
+ DICtx->getDWARFObj(), DICtx->getDWARFObj().getEHFrameSection(),
+ ELFT::Endianness == llvm::endianness::little, ELFT::Is64Bits ? 8 : 4);
DWARFDebugFrame EHFrame(Triple::ArchType(ObjF.getArch()), /*IsEH=*/true,
/*EHFrameAddress=*/Address);
if (Error E = EHFrame.parse(DE))
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index d1c05f437042..4b406ef12aec 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -74,6 +74,7 @@
using namespace llvm;
using namespace llvm::object;
+using namespace llvm::support;
using namespace ELF;
#define LLVM_READOBJ_ENUM_CASE(ns, enum) \
@@ -3419,13 +3420,13 @@ template <class ELFT> void ELFDumper<ELFT>::printStackMap() const {
return;
}
- if (Error E = StackMapParser<ELFT::TargetEndianness>::validateHeader(
- *ContentOrErr)) {
+ if (Error E =
+ StackMapParser<ELFT::Endianness>::validateHeader(*ContentOrErr)) {
Warn(std::move(E));
return;
}
- prettyPrintStackMap(W, StackMapParser<ELFT::TargetEndianness>(*ContentOrErr));
+ prettyPrintStackMap(W, StackMapParser<ELFT::Endianness>(*ContentOrErr));
}
template <class ELFT>
@@ -5145,7 +5146,7 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize,
OS << format("<corrupt length: 0x%x>", DataSize);
return OS.str();
}
- PrData = support::endian::read32<ELFT::TargetEndianness>(Data.data());
+ PrData = endian::read32<ELFT::Endianness>(Data.data());
if (PrData == 0) {
OS << "<None>";
return OS.str();
@@ -5169,7 +5170,7 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize,
OS << format("<corrupt length: 0x%x>", DataSize);
return OS.str();
}
- PrData = support::endian::read32<ELFT::TargetEndianness>(Data.data());
+ PrData = endian::read32<ELFT::Endianness>(Data.data());
if (PrData == 0) {
OS << "<None>";
return OS.str();
@@ -5195,7 +5196,7 @@ static std::string getGNUProperty(uint32_t Type, uint32_t DataSize,
OS << format("<corrupt length: 0x%x>", DataSize);
return OS.str();
}
- PrData = support::endian::read32<ELFT::TargetEndianness>(Data.data());
+ PrData = endian::read32<ELFT::Endianness>(Data.data());
if (PrData == 0) {
OS << "<None>";
return OS.str();
@@ -5374,10 +5375,8 @@ static bool printAArch64Note(raw_ostream &OS, uint32_t NoteType,
return false;
}
- uint64_t Platform =
- support::endian::read64<ELFT::TargetEndianness>(Desc.data() + 0);
- uint64_t Version =
- support::endian::read64<ELFT::TargetEndianness>(Desc.data() + 8);
+ uint64_t Platform = endian::read64<ELFT::Endianness>(Desc.data() + 0);
+ uint64_t Version = endian::read64<ELFT::Endianness>(Desc.data() + 8);
OS << format("platform 0x%" PRIx64 ", version 0x%" PRIx64, Platform, Version);
if (Desc.size() > 16)
@@ -5457,16 +5456,14 @@ getFreeBSDNote(uint32_t NoteType, ArrayRef<uint8_t> Desc, bool IsCore) {
case ELF::NT_FREEBSD_ABI_TAG:
if (Desc.size() != 4)
return std::nullopt;
- return FreeBSDNote{
- "ABI tag",
- utostr(support::endian::read32<ELFT::TargetEndianness>(Desc.data()))};
+ return FreeBSDNote{"ABI tag",
+ utostr(endian::read32<ELFT::Endianness>(Desc.data()))};
case ELF::NT_FREEBSD_ARCH_TAG:
return FreeBSDNote{"Arch tag", toStringRef(Desc).str()};
case ELF::NT_FREEBSD_FEATURE_CTL: {
if (Desc.size() != 4)
return std::nullopt;
- unsigned Value =
- support::endian::read32<ELFT::TargetEndianness>(Desc.data());
+ unsigned Value = endian::read32<ELFT::Endianness>(Desc.data());
std::string FlagsStr;
raw_string_ostream OS(FlagsStr);
printFlags(Value, ArrayRef(FreeBSDFeatureCtlFlags), OS);
@@ -6053,7 +6050,7 @@ template <class ELFT> void GNUELFDumper<ELFT>::printNotes() {
} else if (Name == "CORE") {
if (Type == ELF::NT_FILE) {
DataExtractor DescExtractor(
- Descriptor, ELFT::TargetEndianness == llvm::endianness::little,
+ Descriptor, ELFT::Endianness == llvm::endianness::little,
sizeof(Elf_Addr));
if (Expected<CoreNote> NoteOrErr = readCoreNote(DescExtractor)) {
printCoreNote<ELFT>(OS, *NoteOrErr);
@@ -7714,10 +7711,8 @@ static bool printAarch64NoteLLVMStyle(uint32_t NoteType, ArrayRef<uint8_t> Desc,
if (Desc.size() < 16)
return false;
- uint64_t platform =
- support::endian::read64<ELFT::TargetEndianness>(Desc.data() + 0);
- uint64_t version =
- support::endian::read64<ELFT::TargetEndianness>(Desc.data() + 8);
+ uint64_t platform = endian::read64<ELFT::Endianness>(Desc.data() + 0);
+ uint64_t version = endian::read64<ELFT::Endianness>(Desc.data() + 8);
W.printNumber("Platform", platform);
W.printNumber("Version", version);
@@ -7852,7 +7847,7 @@ template <class ELFT> void LLVMELFDumper<ELFT>::printNotes() {
} else if (Name == "CORE") {
if (Type == ELF::NT_FILE) {
DataExtractor DescExtractor(
- Descriptor, ELFT::TargetEndianness == llvm::endianness::little,
+ Descriptor, ELFT::Endianness == llvm::endianness::little,
sizeof(Elf_Addr));
if (Expected<CoreNote> N = readCoreNote(DescExtractor)) {
printCoreNoteLLVMStyle(*N, W);
diff --git a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
index 69d9b9a2f784..ec4f5c74498f 100644
--- a/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
+++ b/llvm/tools/obj2yaml/dxcontainer2yaml.cpp
@@ -99,6 +99,9 @@ dumpDXContainer(MemoryBufferRef Source) {
else if (const auto *P =
std::get_if<dxbc::PSV::v2::RuntimeInfo>(&PSVInfo->getInfo()))
NewPart.Info = DXContainerYAML::PSVInfo(P);
+ else if (const auto *P =
+ std::get_if<dxbc::PSV::v3::RuntimeInfo>(&PSVInfo->getInfo()))
+ NewPart.Info = DXContainerYAML::PSVInfo(P, PSVInfo->getStringTable());
NewPart.Info->ResourceStride = PSVInfo->getResourceStride();
for (auto Res : PSVInfo->getResources())
NewPart.Info->Resources.push_back(Res);
diff --git a/llvm/tools/spirv-tools/CMakeLists.txt b/llvm/tools/spirv-tools/CMakeLists.txt
index f73dcadd9f86..c0d4556de4c6 100644
--- a/llvm/tools/spirv-tools/CMakeLists.txt
+++ b/llvm/tools/spirv-tools/CMakeLists.txt
@@ -49,7 +49,7 @@ if (SPIRV_DIS)
COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${SPIRV_DIS}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-dis")
else ()
add_custom_target(spirv-dis
- COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${BINARY_DIR}/tools/spirv-dis" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-dis"
+ COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${BINARY_DIR}/tools/spirv-dis${CMAKE_EXECUTABLE_SUFFIX}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-dis${CMAKE_EXECUTABLE_SUFFIX}"
DEPENDS SPIRVTools
)
endif ()
@@ -59,7 +59,7 @@ if (SPIRV_VAL)
COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${SPIRV_VAL}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-val")
else ()
add_custom_target(spirv-val
- COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${BINARY_DIR}/tools/spirv-val" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-val"
+ COMMAND ${CMAKE_COMMAND} -E ${LLVM_LINK_OR_COPY} "${BINARY_DIR}/tools/spirv-val${CMAKE_EXECUTABLE_SUFFIX}" "${LLVM_RUNTIME_OUTPUT_INTDIR}/spirv-val${CMAKE_EXECUTABLE_SUFFIX}"
DEPENDS SPIRVTools
)
endif ()
diff --git a/llvm/tools/verify-uselistorder/verify-uselistorder.cpp b/llvm/tools/verify-uselistorder/verify-uselistorder.cpp
index d929ae09958a..cb07dede1d13 100644
--- a/llvm/tools/verify-uselistorder/verify-uselistorder.cpp
+++ b/llvm/tools/verify-uselistorder/verify-uselistorder.cpp
@@ -68,6 +68,8 @@ static cl::opt<unsigned>
cl::desc("Number of times to shuffle and verify use-lists"),
cl::init(1), cl::cat(Cat));
+extern cl::opt<cl::boolOrDefault> LoadBitcodeIntoNewDbgInfoFormat;
+
namespace {
struct TempFile {
@@ -169,8 +171,7 @@ std::unique_ptr<Module> TempFile::readBitcode(LLVMContext &Context) const {
// verify-uselistoder currently only supports old-style debug info mode.
// FIXME: Update mapping code for RemoveDIs.
- assert(!ModuleOr.get()->IsNewDbgInfoFormat &&
- "Unexpectedly in new debug info mode");
+ ModuleOr.get()->setIsNewDbgInfoFormat(false);
return std::move(ModuleOr.get());
}
@@ -182,7 +183,7 @@ std::unique_ptr<Module> TempFile::readAssembly(LLVMContext &Context) const {
Err.print("verify-uselistorder", errs());
// verify-uselistoder currently only supports old-style debug info mode.
// FIXME: Update mapping code for RemoveDIs.
- assert(!M->IsNewDbgInfoFormat && "Unexpectedly in new debug info mode");
+ M->setIsNewDbgInfoFormat(false);
return M;
}
@@ -544,6 +545,10 @@ int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv,
"llvm tool to verify use-list order\n");
+ // Do not load bitcode into the new debug info format by default.
+ if (LoadBitcodeIntoNewDbgInfoFormat == cl::boolOrDefault::BOU_UNSET)
+ LoadBitcodeIntoNewDbgInfoFormat = cl::boolOrDefault::BOU_FALSE;
+
LLVMContext Context;
SMDiagnostic Err;
@@ -551,7 +556,7 @@ int main(int argc, char **argv) {
std::unique_ptr<Module> M = parseIRFile(InputFilename, Err, Context);
// verify-uselistoder currently only supports old-style debug info mode.
// FIXME: Update mapping code for RemoveDIs.
- assert(!M->IsNewDbgInfoFormat && "Unexpectedly in new debug info mode");
+ M->setIsNewDbgInfoFormat(false);
if (!M.get()) {
Err.print(argv[0], errs());
diff --git a/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp b/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
index 116099eff14a..822707a1f4ed 100644
--- a/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/CSETest.cpp
@@ -233,4 +233,216 @@ TEST_F(AArch64GISelMITest, TestConstantFoldCTL) {
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
}
+TEST_F(AArch64GISelMITest, TestConstantFoldCTT) {
+ setUp();
+ if (!TM)
+ GTEST_SKIP();
+
+ LLT s32 = LLT::scalar(32);
+
+ GISelCSEInfo CSEInfo;
+ CSEInfo.setCSEConfig(std::make_unique<CSEConfigConstantOnly>());
+ CSEInfo.analyze(*MF);
+ B.setCSEInfo(&CSEInfo);
+ CSEMIRBuilder CSEB(B.getState());
+ auto Cst8 = CSEB.buildConstant(s32, 8);
+ auto *CttzDef = &*CSEB.buildCTTZ(s32, Cst8);
+ EXPECT_TRUE(CttzDef->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(CttzDef->getOperand(1).getCImm()->getZExtValue() == 3);
+
+ // Test vector.
+ auto Cst16 = CSEB.buildConstant(s32, 16);
+ auto Cst32 = CSEB.buildConstant(s32, 32);
+ auto Cst64 = CSEB.buildConstant(s32, 64);
+ LLT VecTy = LLT::fixed_vector(4, s32);
+ auto BV = CSEB.buildBuildVector(VecTy, {Cst8.getReg(0), Cst16.getReg(0),
+ Cst32.getReg(0), Cst64.getReg(0)});
+ CSEB.buildCTTZ(VecTy, BV);
+
+ auto CheckStr = R"(
+ ; CHECK: [[CST8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK: [[CST3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK: [[CST16:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK: [[CST32:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; CHECK: [[CST64:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[BV1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[CST8]]:_(s32), [[CST16]]:_(s32), [[CST32]]:_(s32), [[CST64]]:_(s32)
+ ; CHECK: [[CST27:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+ ; CHECK: [[CST26:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+ ; CHECK: [[CST25:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+ ; CHECK: [[BV2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[CST3]]:_(s32), [[CST27]]:_(s32), [[CST26]]:_(s32), [[CST25]]:_(s32)
+ )";
+
+ EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
+}
+
+TEST_F(AArch64GISelMITest, TestConstantFoldICMP) {
+ setUp();
+ if (!TM)
+ GTEST_SKIP();
+
+ LLT s32 = LLT::scalar(32);
+ LLT s1 = LLT::scalar(1);
+
+ GISelCSEInfo CSEInfo;
+ CSEInfo.setCSEConfig(std::make_unique<CSEConfigConstantOnly>());
+ CSEInfo.analyze(*MF);
+ B.setCSEInfo(&CSEInfo);
+ CSEMIRBuilder CSEB(B.getState());
+
+ auto One = CSEB.buildConstant(s32, 1);
+ auto Two = CSEB.buildConstant(s32, 2);
+ auto MinusOne = CSEB.buildConstant(s32, -1);
+ auto MinusTwo = CSEB.buildConstant(s32, -2);
+
+ // ICMP_EQ
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_EQ, s1, One, One);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_NE
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_NE, s1, One, Two);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_UGT
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_UGT, s1, Two, One);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_UGE
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_UGE, s1, One, One);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_ULT
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_ULT, s1, One, Two);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_ULE
+ {
+ auto I = CSEB.buildICmp(CmpInst::Predicate::ICMP_ULE, s1, Two, Two);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_SGT
+ {
+ auto I =
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SGT, s1, MinusOne, MinusTwo);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_SGE
+ {
+ auto I =
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SGE, s1, MinusOne, MinusOne);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_SLT
+ {
+ auto I =
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SLT, s1, MinusTwo, MinusOne);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ // ICMP_SLE
+ {
+ auto I =
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SLE, s1, MinusTwo, MinusOne);
+ EXPECT_TRUE(I->getOpcode() == TargetOpcode::G_CONSTANT);
+ EXPECT_TRUE(I->getOperand(1).getCImm()->getZExtValue());
+ }
+
+ LLT VecTy = LLT::fixed_vector(2, s32);
+ LLT DstTy = LLT::fixed_vector(2, s1);
+ auto Three = CSEB.buildConstant(s32, 3);
+ auto MinusThree = CSEB.buildConstant(s32, -3);
+ auto OneOne = CSEB.buildBuildVector(VecTy, {One.getReg(0), One.getReg(0)});
+ auto OneTwo = CSEB.buildBuildVector(VecTy, {One.getReg(0), Two.getReg(0)});
+ auto TwoThree =
+ CSEB.buildBuildVector(VecTy, {Two.getReg(0), Three.getReg(0)});
+ auto MinusOneOne =
+ CSEB.buildBuildVector(VecTy, {MinusOne.getReg(0), MinusOne.getReg(0)});
+ auto MinusOneTwo =
+ CSEB.buildBuildVector(VecTy, {MinusOne.getReg(0), MinusTwo.getReg(0)});
+ auto MinusTwoThree =
+ CSEB.buildBuildVector(VecTy, {MinusTwo.getReg(0), MinusThree.getReg(0)});
+
+ // ICMP_EQ
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, OneOne, OneOne);
+
+ // ICMP_NE
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_NE, DstTy, OneOne, OneTwo);
+
+ // ICMP_UGT
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, TwoThree, OneTwo);
+
+ // ICMP_UGE
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_UGE, DstTy, OneTwo, OneOne);
+
+ // ICMP_ULT
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, OneOne, OneTwo);
+
+ // ICMP_ULE
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_ULE, DstTy, OneTwo, OneOne);
+
+ // ICMP_SGT
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SGT, DstTy, MinusOneTwo,
+ MinusTwoThree);
+
+ // ICMP_SGE
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SGE, DstTy, MinusOneTwo, MinusOneOne);
+
+ // ICMP_SLT
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SLT, DstTy, MinusTwoThree,
+ MinusOneTwo);
+
+ // ICMP_SLE
+ CSEB.buildICmp(CmpInst::Predicate::ICMP_SLE, DstTy, MinusOneTwo, MinusOneOne);
+
+ auto CheckStr = R"(
+ ; CHECK: [[One:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[Two:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+ ; CHECK: [[MinusOne:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK: [[MinusTwo:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2
+ ; CHECK: [[True:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; CHECK: [[Three:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+ ; CHECK: [[MinusThree:%[0-9]+]]:_(s32) = G_CONSTANT i32 -3
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[One]]:_(s32), [[One]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[One]]:_(s32), [[Two]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[Two]]:_(s32), [[Three]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[MinusOne]]:_(s32), [[MinusOne]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[MinusOne]]:_(s32), [[MinusTwo]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s32>) = G_BUILD_VECTOR [[MinusTwo]]:_(s32), [[MinusThree]]:_(s32)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ ; CHECK: [[False:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[False]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[False]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[False]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[False]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ ; CHECK: {{%[0-9]+}}:_(<2 x s1>) = G_BUILD_VECTOR [[True]]:_(s1), [[True]]:_(s1)
+ )";
+
+ EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
+}
+
} // namespace
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
index f3659bc69563..ef80eed8d180 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp
@@ -1100,6 +1100,25 @@ TEST_F(AMDGPUGISelMITest, TestIsKnownToBeAPowerOfTwo) {
EXPECT_TRUE(isKnownToBeAPowerOfTwo(CopyOrPow2, *MRI, &KB));
}
+static void AddRangeMetadata(LLVMContext &Context, MachineInstr *Load) {
+ IntegerType *Int8Ty = Type::getInt8Ty(Context);
+
+ // Value must be in [0, 2)
+ Metadata *LowAndHigh[] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 0)),
+ ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 2))};
+ auto NewMDNode = MDNode::get(Context, LowAndHigh);
+ const MachineMemOperand *OldMMO = *Load->memoperands_begin();
+ MachineMemOperand *NewMMO =
+ Load->getParent()->getParent()->getMachineMemOperand(
+ OldMMO->getPointerInfo(), OldMMO->getFlags(), OldMMO->getMemoryType(),
+ OldMMO->getAlign(), OldMMO->getAAInfo(), NewMDNode);
+ MachineIRBuilder MIB(*Load);
+ MIB.buildLoadInstr(Load->getOpcode(), Load->getOperand(0),
+ Load->getOperand(1), *NewMMO);
+ Load->eraseFromParent();
+}
+
TEST_F(AArch64GISelMITest, TestMetadata) {
StringRef MIRString = " %imp:_(p0) = G_IMPLICIT_DEF\n"
" %load:_(s8) = G_LOAD %imp(p0) :: (load (s8))\n"
@@ -1120,20 +1139,7 @@ TEST_F(AArch64GISelMITest, TestMetadata) {
MachineInstr *And = MRI->getVRegDef(SrcReg);
MachineInstr *Ext = MRI->getVRegDef(And->getOperand(1).getReg());
MachineInstr *Load = MRI->getVRegDef(Ext->getOperand(1).getReg());
- IntegerType *Int8Ty = Type::getInt8Ty(Context);
-
- // Value must be in [0, 2)
- Metadata *LowAndHigh[] = {
- ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 0)),
- ConstantAsMetadata::get(ConstantInt::get(Int8Ty, 2))};
- auto NewMDNode = MDNode::get(Context, LowAndHigh);
- const MachineMemOperand *OldMMO = *Load->memoperands_begin();
- MachineMemOperand NewMMO(OldMMO->getPointerInfo(), OldMMO->getFlags(),
- OldMMO->getSizeInBits(), OldMMO->getAlign(),
- OldMMO->getAAInfo(), NewMDNode);
- MachineIRBuilder MIB(*Load);
- MIB.buildLoad(Load->getOperand(0), Load->getOperand(1), NewMMO);
- Load->eraseFromParent();
+ AddRangeMetadata(Context, Load);
GISelKnownBits Info(*MF);
KnownBits Res = Info.getKnownBits(And->getOperand(1).getReg());
@@ -1148,6 +1154,66 @@ TEST_F(AArch64GISelMITest, TestMetadata) {
EXPECT_EQ(Mask.getZExtValue(), Res.Zero.getZExtValue());
}
+TEST_F(AArch64GISelMITest, TestMetadataExt) {
+ StringRef MIRString = " %imp:_(p0) = G_IMPLICIT_DEF\n"
+ " %load:_(s32) = G_LOAD %imp(p0) :: (load (s8))\n"
+ " %copy:_(s32) = COPY %load(s32)\n";
+ setUp(MIRString);
+ if (!TM)
+ GTEST_SKIP();
+
+ Register CopyReg = Copies[Copies.size() - 1];
+ MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+ Register SrcReg = FinalCopy->getOperand(1).getReg();
+ MachineInstr *Load = MRI->getVRegDef(SrcReg);
+ AddRangeMetadata(Context, Load);
+
+ GISelKnownBits Info(*MF);
+ KnownBits Res = Info.getKnownBits(SrcReg);
+ EXPECT_TRUE(Res.One.isZero());
+ EXPECT_EQ(Res.Zero.getZExtValue(), 0xfeu);
+}
+
+TEST_F(AArch64GISelMITest, TestMetadataZExt) {
+ StringRef MIRString = " %imp:_(p0) = G_IMPLICIT_DEF\n"
+ " %load:_(s32) = G_ZEXTLOAD %imp(p0) :: (load (s8))\n"
+ " %copy:_(s32) = COPY %load(s32)\n";
+ setUp(MIRString);
+ if (!TM)
+ GTEST_SKIP();
+
+ Register CopyReg = Copies[Copies.size() - 1];
+ MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+ Register SrcReg = FinalCopy->getOperand(1).getReg();
+ MachineInstr *Load = MRI->getVRegDef(SrcReg);
+ AddRangeMetadata(Context, Load);
+
+ GISelKnownBits Info(*MF);
+ KnownBits Res = Info.getKnownBits(SrcReg);
+ EXPECT_TRUE(Res.One.isZero());
+ EXPECT_EQ(Res.Zero.getZExtValue(), 0xfffffffe);
+}
+
+TEST_F(AArch64GISelMITest, TestMetadataSExt) {
+ StringRef MIRString = " %imp:_(p0) = G_IMPLICIT_DEF\n"
+ " %load:_(s32) = G_SEXTLOAD %imp(p0) :: (load (s8))\n"
+ " %copy:_(s32) = COPY %load(s32)\n";
+ setUp(MIRString);
+ if (!TM)
+ GTEST_SKIP();
+
+ Register CopyReg = Copies[Copies.size() - 1];
+ MachineInstr *FinalCopy = MRI->getVRegDef(CopyReg);
+ Register SrcReg = FinalCopy->getOperand(1).getReg();
+ MachineInstr *Load = MRI->getVRegDef(SrcReg);
+ AddRangeMetadata(Context, Load);
+
+ GISelKnownBits Info(*MF);
+ KnownBits Res = Info.getKnownBits(SrcReg);
+ EXPECT_TRUE(Res.One.isZero());
+ EXPECT_EQ(Res.Zero.getZExtValue(), 0xfffffffe);
+}
+
TEST_F(AArch64GISelMITest, TestKnownBitsExt) {
StringRef MIRString = " %c1:_(s16) = G_CONSTANT i16 1\n"
" %x:_(s16) = G_IMPLICIT_DEF\n"
diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
index ab180cd1cc27..dd6edd35a846 100644
--- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp
@@ -1008,7 +1008,7 @@ TEST_F(AArch64GISelMITest, TestVectorMetadata) {
auto *NewMDNode = MDNode::get(Context, LowAndHigh);
const MachineMemOperand *OldMMO = *Load->memoperands_begin();
MachineMemOperand NewMMO(OldMMO->getPointerInfo(), OldMMO->getFlags(),
- OldMMO->getSizeInBits(), OldMMO->getAlign(),
+ OldMMO->getMemoryType(), OldMMO->getAlign(),
OldMMO->getAAInfo(), NewMDNode);
MachineIRBuilder MIB(*Load);
MIB.buildLoad(Load->getOperand(0), Load->getOperand(1), NewMMO);
diff --git a/llvm/unittests/CodeGen/MFCommon.inc b/llvm/unittests/CodeGen/MFCommon.inc
index 7de7eabdd1f6..1997e8052297 100644
--- a/llvm/unittests/CodeGen/MFCommon.inc
+++ b/llvm/unittests/CodeGen/MFCommon.inc
@@ -23,9 +23,10 @@ class BogusRegisterInfo : public TargetRegisterInfo {
public:
BogusRegisterInfo()
: TargetRegisterInfo(nullptr, BogusRegisterClasses, BogusRegisterClasses,
- nullptr, nullptr, LaneBitmask(~0u), nullptr, nullptr) {
+ nullptr, nullptr, nullptr, LaneBitmask(~0u), nullptr,
+ nullptr) {
InitMCRegisterInfo(nullptr, 0, 0, 0, nullptr, 0, nullptr, 0, nullptr,
- nullptr, nullptr, nullptr, nullptr, 0, nullptr, nullptr);
+ nullptr, nullptr, nullptr, nullptr, 0, nullptr);
}
const MCPhysReg *
diff --git a/llvm/unittests/IR/ConstantRangeTest.cpp b/llvm/unittests/IR/ConstantRangeTest.cpp
index 34a162a5514e..8ec120d70e99 100644
--- a/llvm/unittests/IR/ConstantRangeTest.cpp
+++ b/llvm/unittests/IR/ConstantRangeTest.cpp
@@ -2479,6 +2479,24 @@ TEST_F(ConstantRangeTest, castOps) {
ConstantRange IntToPtr = A.castOp(Instruction::IntToPtr, 64);
EXPECT_EQ(64u, IntToPtr.getBitWidth());
EXPECT_TRUE(IntToPtr.isFullSet());
+
+ ConstantRange UIToFP = A.castOp(Instruction::UIToFP, 16);
+ EXPECT_EQ(16u, UIToFP.getBitWidth());
+ EXPECT_TRUE(UIToFP.isFullSet());
+
+ ConstantRange UIToFP2 = A.castOp(Instruction::UIToFP, 64);
+ ConstantRange B(APInt(64, 0), APInt(64, 65536));
+ EXPECT_EQ(64u, UIToFP2.getBitWidth());
+ EXPECT_EQ(B, UIToFP2);
+
+ ConstantRange SIToFP = A.castOp(Instruction::SIToFP, 16);
+ EXPECT_EQ(16u, SIToFP.getBitWidth());
+ EXPECT_TRUE(SIToFP.isFullSet());
+
+ ConstantRange SIToFP2 = A.castOp(Instruction::SIToFP, 64);
+ ConstantRange C(APInt(64, -32768), APInt(64, 32768));
+ EXPECT_EQ(64u, SIToFP2.getBitWidth());
+ EXPECT_EQ(C, SIToFP2);
}
TEST_F(ConstantRangeTest, binaryAnd) {
diff --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp
index 533a30bfba45..4d0c2e4220fe 100644
--- a/llvm/unittests/IR/PatternMatch.cpp
+++ b/llvm/unittests/IR/PatternMatch.cpp
@@ -494,6 +494,45 @@ TEST_F(PatternMatchTest, Unless) {
EXPECT_FALSE(m_Unless(m_c_Add(m_Zero(), m_One())).match(X));
}
+TEST_F(PatternMatchTest, BitWise) {
+ Value *Or = IRB.CreateOr(IRB.getInt32(1), IRB.getInt32(0));
+ Value *Xor = IRB.CreateXor(IRB.getInt32(1), IRB.getInt32(0));
+ Value *And = IRB.CreateXor(IRB.getInt32(1), IRB.getInt32(0));
+ Constant *T = IRB.getInt1(true);
+ Constant *F = IRB.getInt1(false);
+ Value *Alloca = IRB.CreateAlloca(IRB.getInt1Ty());
+ Value *X = IRB.CreateLoad(IRB.getInt1Ty(), Alloca);
+ Value *Y = IRB.CreateLoad(IRB.getInt1Ty(), Alloca);
+ Value *LAnd = IRB.CreateSelect(X, Y, F);
+ Value *LOr = IRB.CreateSelect(X, T, Y);
+ Value *Add = IRB.CreateAdd(IRB.getInt32(1), IRB.getInt32(0));
+
+ EXPECT_TRUE(m_BitwiseLogic(m_One(), m_Zero()).match(Or));
+ EXPECT_TRUE(m_BitwiseLogic(m_One(), m_Zero()).match(Xor));
+ EXPECT_TRUE(m_BitwiseLogic(m_One(), m_Zero()).match(And));
+ EXPECT_FALSE(m_BitwiseLogic(m_Value(), m_Value()).match(LAnd));
+ EXPECT_FALSE(m_BitwiseLogic(m_Value(), m_Value()).match(LOr));
+ EXPECT_FALSE(m_BitwiseLogic(m_Value(), m_Value()).match(Add));
+
+ EXPECT_FALSE(m_BitwiseLogic(m_Zero(), m_One()).match(Or));
+ EXPECT_FALSE(m_BitwiseLogic(m_Zero(), m_One()).match(Xor));
+ EXPECT_FALSE(m_BitwiseLogic(m_Zero(), m_One()).match(And));
+
+ EXPECT_TRUE(m_c_BitwiseLogic(m_One(), m_Zero()).match(Or));
+ EXPECT_TRUE(m_c_BitwiseLogic(m_One(), m_Zero()).match(Xor));
+ EXPECT_TRUE(m_c_BitwiseLogic(m_One(), m_Zero()).match(And));
+ EXPECT_FALSE(m_c_BitwiseLogic(m_Value(), m_Value()).match(LAnd));
+ EXPECT_FALSE(m_c_BitwiseLogic(m_Value(), m_Value()).match(LOr));
+ EXPECT_FALSE(m_c_BitwiseLogic(m_Value(), m_Value()).match(Add));
+
+ EXPECT_TRUE(m_c_BitwiseLogic(m_Zero(), m_One()).match(Or));
+ EXPECT_TRUE(m_c_BitwiseLogic(m_Zero(), m_One()).match(Xor));
+ EXPECT_TRUE(m_c_BitwiseLogic(m_Zero(), m_One()).match(And));
+
+ EXPECT_FALSE(m_c_BitwiseLogic(m_One(), m_One()).match(Or));
+ EXPECT_FALSE(m_c_BitwiseLogic(m_Zero(), m_Zero()).match(Xor));
+}
+
TEST_F(PatternMatchTest, ZExtSExtSelf) {
LLVMContext &Ctx = IRB.getContext();
@@ -909,6 +948,16 @@ TEST_F(PatternMatchTest, OverflowingBinOps) {
EXPECT_EQ(L, MatchL);
EXPECT_EQ(R, MatchR);
MatchL = MatchR = nullptr;
+
+ EXPECT_TRUE(
+ m_c_NUWAdd(m_Specific(L), m_Specific(R)).match(IRB.CreateNUWAdd(L, R)));
+ EXPECT_TRUE(
+ m_c_NUWAdd(m_Specific(R), m_Specific(L)).match(IRB.CreateNUWAdd(L, R)));
+ EXPECT_FALSE(
+ m_c_NUWAdd(m_Specific(R), m_ZeroInt()).match(IRB.CreateNUWAdd(L, R)));
+ EXPECT_FALSE(
+ m_NUWAdd(m_Specific(R), m_Specific(L)).match(IRB.CreateNUWAdd(L, R)));
+
EXPECT_TRUE(
m_NUWSub(m_Value(MatchL), m_Value(MatchR)).match(IRB.CreateNUWSub(L, R)));
EXPECT_EQ(L, MatchL);
diff --git a/llvm/unittests/Linker/LinkModulesTest.cpp b/llvm/unittests/Linker/LinkModulesTest.cpp
index 182ce73178c1..884e20e89c5c 100644
--- a/llvm/unittests/Linker/LinkModulesTest.cpp
+++ b/llvm/unittests/Linker/LinkModulesTest.cpp
@@ -72,7 +72,7 @@ protected:
BasicBlock *ExitBB;
};
-static void expectNoDiags(const DiagnosticInfo &DI, void *C) {
+static void expectNoDiags(const DiagnosticInfo *DI, void *C) {
llvm_unreachable("expectNoDiags called!");
}
diff --git a/llvm/unittests/ProfileData/InstrProfTest.cpp b/llvm/unittests/ProfileData/InstrProfTest.cpp
index cd4552a039b3..c9323420bda7 100644
--- a/llvm/unittests/ProfileData/InstrProfTest.cpp
+++ b/llvm/unittests/ProfileData/InstrProfTest.cpp
@@ -366,7 +366,8 @@ IndexedMemProfRecord makeRecord(
const MemInfoBlock &Block = MemInfoBlock()) {
llvm::memprof::IndexedMemProfRecord MR;
for (const auto &Frames : AllocFrames)
- MR.AllocSites.emplace_back(Frames, Block);
+ MR.AllocSites.emplace_back(Frames, llvm::memprof::hashCallStack(Frames),
+ Block);
for (const auto &Frames : CallSiteFrames)
MR.CallSites.push_back(Frames);
return MR;
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp
index f5e4a4aff2ed..1cca44e9b037 100644
--- a/llvm/unittests/ProfileData/MemProfTest.cpp
+++ b/llvm/unittests/ProfileData/MemProfTest.cpp
@@ -280,7 +280,8 @@ TEST(MemProf, RecordSerializationRoundTrip) {
IndexedMemProfRecord Record;
for (const auto &ACS : AllocCallStacks) {
// Use the same info block for both allocation sites.
- Record.AllocSites.emplace_back(ACS, Info);
+ Record.AllocSites.emplace_back(ACS, llvm::memprof::hashCallStack(ACS),
+ Info);
}
Record.CallSites.assign(CallSites);
@@ -376,7 +377,9 @@ TEST(MemProf, BaseMemProfReader) {
Block.AllocCount = 1U, Block.TotalAccessDensity = 4,
Block.TotalLifetime = 200001;
std::array<FrameId, 2> CallStack{F1.hash(), F2.hash()};
- FakeRecord.AllocSites.emplace_back(/*CS=*/CallStack, /*MB=*/Block);
+ FakeRecord.AllocSites.emplace_back(
+ /*CS=*/CallStack, /*CSId=*/llvm::memprof::hashCallStack(CallStack),
+ /*MB=*/Block);
ProfData.insert({F1.hash(), FakeRecord});
MemProfReader Reader(FrameIdMap, ProfData);
diff --git a/llvm/unittests/Support/ThreadPool.cpp b/llvm/unittests/Support/ThreadPool.cpp
index d74c625d1229..381b4fc2a26b 100644
--- a/llvm/unittests/Support/ThreadPool.cpp
+++ b/llvm/unittests/Support/ThreadPool.cpp
@@ -126,7 +126,7 @@ using ThreadPoolImpls = ::testing::Types<
#endif
SingleThreadExecutor>;
-TYPED_TEST_SUITE(ThreadPoolTest, ThreadPoolImpls);
+TYPED_TEST_SUITE(ThreadPoolTest, ThreadPoolImpls, );
#define CHECK_UNSUPPORTED() \
do { \
diff --git a/llvm/unittests/TableGen/CMakeLists.txt b/llvm/unittests/TableGen/CMakeLists.txt
index 7830e0218045..57b237306b19 100644
--- a/llvm/unittests/TableGen/CMakeLists.txt
+++ b/llvm/unittests/TableGen/CMakeLists.txt
@@ -9,10 +9,10 @@ tablegen(LLVM AutomataTables.inc -gen-searchable-tables)
tablegen(LLVM AutomataAutomata.inc -gen-automata)
add_public_tablegen_target(AutomataTestTableGen)
-add_llvm_unittest(TableGenTests DISABLE_LLVM_LINK_LLVM_DYLIB
+add_llvm_unittest(TableGenTests
AutomataTest.cpp
CodeExpanderTest.cpp
ParserEntryPointTest.cpp
)
-target_link_libraries(TableGenTests PRIVATE LLVMTableGenGlobalISel LLVMTableGen)
+target_link_libraries(TableGenTests PRIVATE LLVMTableGenCommon LLVMTableGen)
diff --git a/llvm/unittests/TableGen/CodeExpanderTest.cpp b/llvm/unittests/TableGen/CodeExpanderTest.cpp
index 4a9a0e8c114b..1528884ffdf6 100644
--- a/llvm/unittests/TableGen/CodeExpanderTest.cpp
+++ b/llvm/unittests/TableGen/CodeExpanderTest.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "GlobalISel/CodeExpander.h"
-#include "GlobalISel/CodeExpansions.h"
+#include "Common/GlobalISel/CodeExpander.h"
+#include "Common/GlobalISel/CodeExpansions.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Error.h"
diff --git a/llvm/unittests/TargetParser/TargetParserTest.cpp b/llvm/unittests/TargetParser/TargetParserTest.cpp
index a7d0b1687a7f..2c72a7229b52 100644
--- a/llvm/unittests/TargetParser/TargetParserTest.cpp
+++ b/llvm/unittests/TargetParser/TargetParserTest.cpp
@@ -2347,13 +2347,6 @@ AArch64ExtensionDependenciesBaseArchTestParams
{},
{"aes", "sha2", "sha3", "sm4"}},
- // +sve implies +f32mm if the base architecture is v8.6A+ or v9.1A+, but
- // not earlier architectures.
- {AArch64::ARMV8_5A, {"sve"}, {"sve"}, {"f32mm"}},
- {AArch64::ARMV9A, {"sve"}, {"sve"}, {"f32mm"}},
- {AArch64::ARMV8_6A, {"sve"}, {"sve", "f32mm"}, {}},
- {AArch64::ARMV9_1A, {"sve"}, {"sve", "f32mm"}, {}},
-
// +fp16 implies +fp16fml for v8.4A+, but not v9.0-A+
{AArch64::ARMV8_3A, {"fp16"}, {"fullfp16"}, {"fp16fml"}},
{AArch64::ARMV9A, {"fp16"}, {"fullfp16"}, {"fp16fml"}},
@@ -2520,10 +2513,10 @@ AArch64ExtensionDependenciesBaseCPUTestParams
{}},
{"cortex-a520",
{},
- {"v9.2a", "bf16", "crc", "dotprod", "f32mm", "flagm",
- "fp-armv8", "fullfp16", "fp16fml", "i8mm", "lse", "mte",
- "pauth", "perfmon", "predres", "ras", "rcpc", "rdm",
- "sb", "neon", "ssbs", "sve", "sve2-bitperm", "sve2"},
+ {"v9.2a", "bf16", "crc", "dotprod", "flagm", "fp-armv8",
+ "fullfp16", "fp16fml", "i8mm", "lse", "mte", "pauth",
+ "perfmon", "predres", "ras", "rcpc", "rdm", "sb",
+ "neon", "ssbs", "sve", "sve2-bitperm", "sve2"},
{}},
// Negative modifiers
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
index be8be7acbe38..777675b623f3 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp
@@ -96,7 +96,7 @@ TEST_F(VPlanHCFGTest, testBuildHCFGInnerLoop) {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
// Add an external value to check we do not print the list of external values,
// as this is not required with the new printing.
- Plan->getVPValueOrAddLiveIn(&*F->arg_begin());
+ Plan->getOrAddLiveIn(&*F->arg_begin());
std::string FullDump;
raw_string_ostream OS(FullDump);
Plan->printDOT(OS);
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
index 33d5e2759af5..e537aac75515 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
@@ -1237,8 +1237,8 @@ TEST(VPRecipeTest, dump) {
BinaryOperator::CreateAdd(UndefValue::get(Int32), UndefValue::get(Int32));
AI->setName("a");
SmallVector<VPValue *, 2> Args;
- VPValue *ExtVPV1 = Plan.getVPValueOrAddLiveIn(ConstantInt::get(Int32, 1));
- VPValue *ExtVPV2 = Plan.getVPValueOrAddLiveIn(ConstantInt::get(Int32, 2));
+ VPValue *ExtVPV1 = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
+ VPValue *ExtVPV2 = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
Args.push_back(ExtVPV1);
Args.push_back(ExtVPV2);
VPWidenRecipe *WidenR =
diff --git a/llvm/unittests/tools/llvm-exegesis/X86/SubprocessMemoryTest.cpp b/llvm/unittests/tools/llvm-exegesis/X86/SubprocessMemoryTest.cpp
index c07ec188a602..7c23e7b7e9c5 100644
--- a/llvm/unittests/tools/llvm-exegesis/X86/SubprocessMemoryTest.cpp
+++ b/llvm/unittests/tools/llvm-exegesis/X86/SubprocessMemoryTest.cpp
@@ -17,6 +17,7 @@
#include <endian.h>
#include <fcntl.h>
#include <sys/mman.h>
+#include <sys/syscall.h>
#include <unistd.h>
#endif // __linux__
@@ -49,7 +50,9 @@ protected:
std::string getSharedMemoryName(const unsigned TestNumber,
const unsigned DefinitionNumber) {
- return "/" + std::to_string(getSharedMemoryNumber(TestNumber)) + "memdef" +
+ long CurrentTID = syscall(SYS_gettid);
+ return "/" + std::to_string(getSharedMemoryNumber(TestNumber)) + "t" +
+ std::to_string(CurrentTID) + "memdef" +
std::to_string(DefinitionNumber);
}
diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 5df7990d8fc2..8b82ce899a48 100644
--- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -95,12 +95,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenInstAlias.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "SubtargetFeatureInfo.h"
-#include "Types.h"
+#include "Common/CodeGenInstAlias.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/SubtargetFeatureInfo.h"
+#include "Common/Types.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
@@ -2519,7 +2519,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info, raw_ostream &OS) {
// Check for register operands, including sub-classes.
OS << " if (Operand.isReg()) {\n";
OS << " MatchClassKind OpKind;\n";
- OS << " switch (Operand.getReg()) {\n";
+ OS << " switch (Operand.getReg().id()) {\n";
OS << " default: OpKind = InvalidMatchClass; break;\n";
for (const auto &RC : Info.RegisterClasses)
OS << " case " << RC.first->getValueAsString("Namespace")
diff --git a/llvm/utils/TableGen/AsmWriterEmitter.cpp b/llvm/utils/TableGen/AsmWriterEmitter.cpp
index a27061ee585a..16661cd29edc 100644
--- a/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "AsmWriterInst.h"
-#include "CodeGenInstAlias.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "SequenceToOffsetTable.h"
-#include "Types.h"
+#include "Basic/SequenceToOffsetTable.h"
+#include "Common/AsmWriterInst.h"
+#include "Common/CodeGenInstAlias.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
diff --git a/llvm/utils/TableGen/Basic/CMakeLists.txt b/llvm/utils/TableGen/Basic/CMakeLists.txt
new file mode 100644
index 000000000000..5a899e3b7c80
--- /dev/null
+++ b/llvm/utils/TableGen/Basic/CMakeLists.txt
@@ -0,0 +1,21 @@
+# The basic TableGen library contains as little dependencies as possible.
+# In particular, it does not depend on vt_gen -> it does not use ValueTypes.
+#
+# This library is the only thing included in `llvm-min-tablegen`.
+
+set(LLVM_LINK_COMPONENTS
+ Support
+ TableGen
+ )
+
+add_llvm_library(LLVMTableGenBasic OBJECT EXCLUDE_FROM_ALL
+ CodeGenIntrinsics.cpp
+ SDNodeProperties.cpp
+)
+set_target_properties(LLVMTableGenBasic PROPERTIES FOLDER "Tablegenning")
+
+# Users may include its headers as "Basic/*.h"
+target_include_directories(LLVMTableGenBasic
+ INTERFACE
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
+ )
diff --git a/llvm/utils/TableGen/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
index 7cb86ad95266..7cb86ad95266 100644
--- a/llvm/utils/TableGen/CodeGenIntrinsics.cpp
+++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
diff --git a/llvm/utils/TableGen/CodeGenIntrinsics.h b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h
index da9e386d64d1..da9e386d64d1 100644
--- a/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h
diff --git a/llvm/utils/TableGen/SDNodeProperties.cpp b/llvm/utils/TableGen/Basic/SDNodeProperties.cpp
index 2aec41aac625..2aec41aac625 100644
--- a/llvm/utils/TableGen/SDNodeProperties.cpp
+++ b/llvm/utils/TableGen/Basic/SDNodeProperties.cpp
diff --git a/llvm/utils/TableGen/SDNodeProperties.h b/llvm/utils/TableGen/Basic/SDNodeProperties.h
index 571542328524..571542328524 100644
--- a/llvm/utils/TableGen/SDNodeProperties.h
+++ b/llvm/utils/TableGen/Basic/SDNodeProperties.h
diff --git a/llvm/utils/TableGen/SequenceToOffsetTable.h b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h
index 5766b682b58a..5766b682b58a 100644
--- a/llvm/utils/TableGen/SequenceToOffsetTable.h
+++ b/llvm/utils/TableGen/Basic/SequenceToOffsetTable.h
diff --git a/llvm/utils/TableGen/CMakeLists.txt b/llvm/utils/TableGen/CMakeLists.txt
index 0100bf345ec2..577aeded4be7 100644
--- a/llvm/utils/TableGen/CMakeLists.txt
+++ b/llvm/utils/TableGen/CMakeLists.txt
@@ -1,26 +1,25 @@
-add_subdirectory(GlobalISel)
+# Basic utilities which is the strict minimum needed to build
+# llvm-min-tblgen.
+add_subdirectory(Basic)
+# Common utilities are all of the reusable components and helper
+# code needed by the backends.
+add_subdirectory(Common)
-add_llvm_library(LLVMTableGenCommon STATIC OBJECT EXCLUDE_FROM_ALL
+set(LLVM_LINK_COMPONENTS Support)
+
+# llvm-min-tablegen only contains a subset of backends necessary to
+# build llvm/include. It must not depend on TableGenCommon, as
+# TableGenCommon depends on this already to generate things such as
+# ValueType definitions.
+add_tablegen(llvm-min-tblgen LLVM_HEADERS
+ TableGen.cpp
Attributes.cpp
- CodeGenIntrinsics.cpp
DirectiveEmitter.cpp
IntrinsicEmitter.cpp
RISCVTargetDefEmitter.cpp
- SDNodeProperties.cpp
VTEmitter.cpp
- PARTIAL_SOURCES_INTENDED
-
- LINK_COMPONENTS
- Support
- TableGen
- )
-set_target_properties(LLVMTableGenCommon PROPERTIES FOLDER "Tablegenning")
+ $<TARGET_OBJECTS:obj.LLVMTableGenBasic>
-set(LLVM_LINK_COMPONENTS Support)
-
-add_tablegen(llvm-min-tblgen LLVM_HEADERS
- TableGen.cpp
- $<TARGET_OBJECTS:obj.LLVMTableGenCommon>
PARTIAL_SOURCES_INTENDED
)
set_target_properties(llvm-min-tblgen PROPERTIES FOLDER "Tablegenning")
@@ -35,63 +34,51 @@ add_tablegen(llvm-tblgen LLVM
EXPORT LLVM
AsmMatcherEmitter.cpp
AsmWriterEmitter.cpp
- AsmWriterInst.cpp
- CTagsEmitter.cpp
+ Attributes.cpp
CallingConvEmitter.cpp
CodeEmitterGen.cpp
- CodeGenDAGPatterns.cpp
- CodeGenHwModes.cpp
- CodeGenInstAlias.cpp
- CodeGenInstruction.cpp
CodeGenMapTable.cpp
- CodeGenRegisters.cpp
- CodeGenSchedule.cpp
- CodeGenTarget.cpp
+ CompressInstEmitter.cpp
+ CTagsEmitter.cpp
DAGISelEmitter.cpp
DAGISelMatcherEmitter.cpp
DAGISelMatcherGen.cpp
DAGISelMatcherOpt.cpp
- DAGISelMatcher.cpp
DecoderEmitter.cpp
DFAEmitter.cpp
DFAPacketizerEmitter.cpp
+ DirectiveEmitter.cpp
DisassemblerEmitter.cpp
DXILEmitter.cpp
ExegesisEmitter.cpp
FastISelEmitter.cpp
GlobalISelCombinerEmitter.cpp
GlobalISelEmitter.cpp
- GlobalISelMatchTable.cpp
- GlobalISelMatchTableExecutorEmitter.cpp
- InfoByHwMode.cpp
- InstrInfoEmitter.cpp
InstrDocsEmitter.cpp
- OptEmitter.cpp
+ InstrInfoEmitter.cpp
+ IntrinsicEmitter.cpp
+ MacroFusionPredicatorEmitter.cpp
OptParserEmitter.cpp
OptRSTEmitter.cpp
- PredicateExpander.cpp
PseudoLoweringEmitter.cpp
- CompressInstEmitter.cpp
- MacroFusionPredicatorEmitter.cpp
RegisterBankEmitter.cpp
RegisterInfoEmitter.cpp
+ RISCVTargetDefEmitter.cpp
SearchableTableEmitter.cpp
SubtargetEmitter.cpp
- SubtargetFeatureInfo.cpp
TableGen.cpp
- Types.cpp
- VarLenCodeEmitterGen.cpp
- X86DisassemblerTables.cpp
+ VTEmitter.cpp
+ WebAssemblyDisassemblerEmitter.cpp
X86CompressEVEXTablesEmitter.cpp
+ X86DisassemblerTables.cpp
X86FoldTablesEmitter.cpp
X86MnemonicTables.cpp
X86ModRMFilters.cpp
X86RecognizableInstr.cpp
- WebAssemblyDisassemblerEmitter.cpp
+ $<TARGET_OBJECTS:obj.LLVMTableGenBasic>
$<TARGET_OBJECTS:obj.LLVMTableGenCommon>
DEPENDS
intrinsics_gen # via llvm-min-tablegen
)
-target_link_libraries(llvm-tblgen PRIVATE LLVMTableGenGlobalISel)
set_target_properties(llvm-tblgen PROPERTIES FOLDER "Tablegenning")
diff --git a/llvm/utils/TableGen/CallingConvEmitter.cpp b/llvm/utils/TableGen/CallingConvEmitter.cpp
index 3c3a2874ce80..ec6ef56a66fa 100644
--- a/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenTarget.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index 9194c13ccdcb..a57885f22d7e 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -22,11 +22,11 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenHwModes.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "InfoByHwMode.h"
-#include "VarLenCodeEmitterGen.h"
+#include "Common/CodeGenHwModes.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/InfoByHwMode.h"
+#include "Common/VarLenCodeEmitterGen.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/llvm/utils/TableGen/CodeGenMapTable.cpp b/llvm/utils/TableGen/CodeGenMapTable.cpp
index 03af0b49ba97..fbf1d47c0327 100644
--- a/llvm/utils/TableGen/CodeGenMapTable.cpp
+++ b/llvm/utils/TableGen/CodeGenMapTable.cpp
@@ -75,8 +75,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
diff --git a/llvm/utils/TableGen/AsmWriterInst.cpp b/llvm/utils/TableGen/Common/AsmWriterInst.cpp
index 1fa609e90005..1fa609e90005 100644
--- a/llvm/utils/TableGen/AsmWriterInst.cpp
+++ b/llvm/utils/TableGen/Common/AsmWriterInst.cpp
diff --git a/llvm/utils/TableGen/AsmWriterInst.h b/llvm/utils/TableGen/Common/AsmWriterInst.h
index f0ebf799d910..f0ebf799d910 100644
--- a/llvm/utils/TableGen/AsmWriterInst.h
+++ b/llvm/utils/TableGen/Common/AsmWriterInst.h
diff --git a/llvm/utils/TableGen/Common/CMakeLists.txt b/llvm/utils/TableGen/Common/CMakeLists.txt
new file mode 100644
index 000000000000..c31ed5a1de69
--- /dev/null
+++ b/llvm/utils/TableGen/Common/CMakeLists.txt
@@ -0,0 +1,51 @@
+# The common library is similar to the basic library except it can
+# depend on vt_gen.
+#
+# This library contains the bulk of the supporting code for all
+# TableGen backends. It's split off as a separate library to
+# allow unit-testing those components.
+
+set(LLVM_LINK_COMPONENTS
+ Support
+ TableGen
+ )
+
+add_llvm_library(LLVMTableGenCommon STATIC OBJECT EXCLUDE_FROM_ALL
+ GlobalISel/CodeExpander.cpp
+ GlobalISel/CombinerUtils.cpp
+ GlobalISel/CXXPredicates.cpp
+ GlobalISel/GlobalISelMatchTable.cpp
+ GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
+ GlobalISel/MatchDataInfo.cpp
+ GlobalISel/PatternParser.cpp
+ GlobalISel/Patterns.cpp
+
+ AsmWriterInst.cpp
+ CodeGenDAGPatterns.cpp
+ CodeGenHwModes.cpp
+ CodeGenInstAlias.cpp
+ CodeGenInstruction.cpp
+ CodeGenRegisters.cpp
+ CodeGenSchedule.cpp
+ CodeGenTarget.cpp
+ DAGISelMatcher.cpp
+ InfoByHwMode.cpp
+ OptEmitter.cpp
+ PredicateExpander.cpp
+ SubtargetFeatureInfo.cpp
+ Types.cpp
+ VarLenCodeEmitterGen.cpp
+
+ LINK_LIBS
+ LLVMTableGenBasic
+
+ DEPENDS
+ vt_gen
+ )
+set_target_properties(LLVMTableGenCommon PROPERTIES FOLDER "Tablegenning")
+
+# Users may include its headers as "Common/*.h"
+target_include_directories(LLVMTableGenCommon
+ PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
+ )
diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index 076d0427a859..076d0427a859 100644
--- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 823c40c922cb..7fcd39a9e940 100644
--- a/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -14,9 +14,9 @@
#ifndef LLVM_UTILS_TABLEGEN_CODEGENDAGPATTERNS_H
#define LLVM_UTILS_TABLEGEN_CODEGENDAGPATTERNS_H
-#include "CodeGenIntrinsics.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Basic/SDNodeProperties.h"
#include "CodeGenTarget.h"
-#include "SDNodeProperties.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerUnion.h"
diff --git a/llvm/utils/TableGen/CodeGenHwModes.cpp b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
index fec74d29c8bb..fec74d29c8bb 100644
--- a/llvm/utils/TableGen/CodeGenHwModes.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenHwModes.cpp
diff --git a/llvm/utils/TableGen/CodeGenHwModes.h b/llvm/utils/TableGen/Common/CodeGenHwModes.h
index 23723b7bd4af..23723b7bd4af 100644
--- a/llvm/utils/TableGen/CodeGenHwModes.h
+++ b/llvm/utils/TableGen/Common/CodeGenHwModes.h
diff --git a/llvm/utils/TableGen/CodeGenInstAlias.cpp b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
index d217059542b1..d217059542b1 100644
--- a/llvm/utils/TableGen/CodeGenInstAlias.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.cpp
diff --git a/llvm/utils/TableGen/CodeGenInstAlias.h b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
index 2a05273e7270..2a05273e7270 100644
--- a/llvm/utils/TableGen/CodeGenInstAlias.h
+++ b/llvm/utils/TableGen/Common/CodeGenInstAlias.h
diff --git a/llvm/utils/TableGen/CodeGenInstruction.cpp b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
index 18a4e7b0f18b..18a4e7b0f18b 100644
--- a/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenInstruction.cpp
diff --git a/llvm/utils/TableGen/CodeGenInstruction.h b/llvm/utils/TableGen/Common/CodeGenInstruction.h
index b658259b4892..b658259b4892 100644
--- a/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/llvm/utils/TableGen/Common/CodeGenInstruction.h
diff --git a/llvm/utils/TableGen/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
index 40af0d3077b2..624e8d5d54ba 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp
@@ -47,19 +47,24 @@ using namespace llvm;
// CodeGenSubRegIndex
//===----------------------------------------------------------------------===//
-CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
+CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum,
+ const CodeGenHwModes &CGH)
: TheDef(R), EnumValue(Enum), AllSuperRegsCovered(true), Artificial(true) {
Name = std::string(R->getName());
if (R->getValue("Namespace"))
Namespace = std::string(R->getValueAsString("Namespace"));
- Size = R->getValueAsInt("Size");
- Offset = R->getValueAsInt("Offset");
+
+ if (const RecordVal *RV = R->getValue("SubRegRanges"))
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
+ Range = SubRegRangeByHwMode(DI->getDef(), CGH);
+ if (!Range.hasDefault())
+ Range.insertSubRegRangeForMode(DefaultMode, SubRegRange(R));
}
CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
unsigned Enum)
: TheDef(nullptr), Name(std::string(N)), Namespace(std::string(Nspace)),
- Size(-1), Offset(-1), EnumValue(Enum), AllSuperRegsCovered(true),
+ Range(SubRegRange(-1, -1)), EnumValue(Enum), AllSuperRegsCovered(true),
Artificial(true) {}
std::string CodeGenSubRegIndex::getQualifiedName() const {
@@ -81,7 +86,7 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
"ComposedOf must have exactly two entries");
CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
- CodeGenSubRegIndex *X = A->addComposite(B, this);
+ CodeGenSubRegIndex *X = A->addComposite(B, this, RegBank.getHwModes());
if (X)
PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
}
@@ -518,7 +523,8 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
// Each part of Cand is a sub-register of this. Make the full Cand also
// a sub-register with a concatenated sub-register index.
- CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts);
+ CodeGenSubRegIndex *Concat =
+ RegBank.getConcatSubRegIndex(Parts, RegBank.getHwModes());
std::pair<CodeGenSubRegIndex *, CodeGenRegister *> NewSubReg =
std::pair(Concat, Cand);
@@ -542,7 +548,7 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
SubReg.second->getName() +
" in " + getName());
- NewIdx->addComposite(SubReg.first, SubIdx);
+ NewIdx->addComposite(SubReg.first, SubIdx, RegBank.getHwModes());
}
}
}
@@ -1315,7 +1321,7 @@ CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
if (Idx)
return Idx;
- SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1);
+ SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1, getHwModes());
Idx = &SubRegIndices.back();
return Idx;
}
@@ -1379,12 +1385,13 @@ CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
// None exists, synthesize one.
std::string Name = A->getName() + "_then_" + B->getName();
Comp = createSubRegIndex(Name, A->getNamespace());
- A->addComposite(B, Comp);
+ A->addComposite(B, Comp, getHwModes());
return Comp;
}
CodeGenSubRegIndex *CodeGenRegBank::getConcatSubRegIndex(
- const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
+ const SmallVector<CodeGenSubRegIndex *, 8> &Parts,
+ const CodeGenHwModes &CGH) {
assert(Parts.size() > 1 && "Need two parts to concatenate");
#ifndef NDEBUG
for (CodeGenSubRegIndex *Idx : Parts) {
@@ -1399,28 +1406,47 @@ CodeGenSubRegIndex *CodeGenRegBank::getConcatSubRegIndex(
// None exists, synthesize one.
std::string Name = Parts.front()->getName();
- // Determine whether all parts are contiguous.
- bool isContinuous = true;
- unsigned Size = Parts.front()->Size;
- unsigned LastOffset = Parts.front()->Offset;
- unsigned LastSize = Parts.front()->Size;
- unsigned UnknownSize = (uint16_t)-1;
+ const unsigned UnknownSize = (uint16_t)-1;
+
for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
Name += '_';
Name += Parts[i]->getName();
- if (Size == UnknownSize || Parts[i]->Size == UnknownSize)
- Size = UnknownSize;
- else
- Size += Parts[i]->Size;
- if (LastSize == UnknownSize || Parts[i]->Offset != (LastOffset + LastSize))
- isContinuous = false;
- LastOffset = Parts[i]->Offset;
- LastSize = Parts[i]->Size;
}
+
Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
- Idx->Size = Size;
- Idx->Offset = isContinuous ? Parts.front()->Offset : -1;
Idx->ConcatenationOf.assign(Parts.begin(), Parts.end());
+
+ unsigned NumModes = CGH.getNumModeIds();
+ for (unsigned M = 0; M < NumModes; ++M) {
+ const CodeGenSubRegIndex *Part = Parts.front();
+
+ // Determine whether all parts are contiguous.
+ bool IsContinuous = true;
+ const SubRegRange &FirstPartRange = Part->Range.get(M);
+ unsigned Size = FirstPartRange.Size;
+ unsigned LastOffset = FirstPartRange.Offset;
+ unsigned LastSize = FirstPartRange.Size;
+
+ for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
+ Part = Parts[i];
+ Name += '_';
+ Name += Part->getName();
+
+ const SubRegRange &PartRange = Part->Range.get(M);
+ if (Size == UnknownSize || PartRange.Size == UnknownSize)
+ Size = UnknownSize;
+ else
+ Size += PartRange.Size;
+ if (LastSize == UnknownSize ||
+ PartRange.Offset != (LastOffset + LastSize))
+ IsContinuous = false;
+ LastOffset = PartRange.Offset;
+ LastSize = PartRange.Size;
+ }
+ unsigned Offset = IsContinuous ? FirstPartRange.Offset : -1;
+ Idx->Range.get(M) = SubRegRange(Size, Offset);
+ }
+
return Idx;
}
@@ -1504,7 +1530,8 @@ void CodeGenRegBank::computeComposites() {
assert(Idx3 && "Sub-register doesn't have an index");
// Conflicting composition? Emit a warning but allow it.
- if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3)) {
+ if (CodeGenSubRegIndex *Prev =
+ Idx1->addComposite(Idx2, Idx3, getHwModes())) {
// If the composition was not user-defined, always emit a warning.
if (!UserDefined.count({Idx1, Idx2}) ||
agree(compose(Idx1, Idx2), SubRegAction.at(Idx3)))
diff --git a/llvm/utils/TableGen/CodeGenRegisters.h b/llvm/utils/TableGen/Common/CodeGenRegisters.h
index c34f376ea99d..9058baea2b23 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/llvm/utils/TableGen/Common/CodeGenRegisters.h
@@ -68,8 +68,7 @@ class CodeGenSubRegIndex {
std::string Namespace;
public:
- uint16_t Size;
- uint16_t Offset;
+ SubRegRangeByHwMode Range;
const unsigned EnumValue;
mutable LaneBitmask LaneMask;
mutable SmallVector<MaskRolPair, 1> CompositionLaneMaskTransform;
@@ -86,7 +85,7 @@ public:
// indexes are not used to create new register classes.
bool Artificial;
- CodeGenSubRegIndex(Record *R, unsigned Enum);
+ CodeGenSubRegIndex(Record *R, unsigned Enum, const CodeGenHwModes &CGH);
CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
CodeGenSubRegIndex(CodeGenSubRegIndex &) = delete;
@@ -108,19 +107,42 @@ public:
// Add a composite subreg index: this+A = B.
// Return a conflicting composite, or NULL
- CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
- CodeGenSubRegIndex *B) {
+ CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A, CodeGenSubRegIndex *B,
+ const CodeGenHwModes &CGH) {
assert(A && B);
std::pair<CompMap::iterator, bool> Ins = Composed.insert(std::pair(A, B));
+
// Synthetic subreg indices that aren't contiguous (for instance ARM
// register tuples) don't have a bit range, so it's OK to let
// B->Offset == -1. For the other cases, accumulate the offset and set
// the size here. Only do so if there is no offset yet though.
- if ((Offset != (uint16_t)-1 && A->Offset != (uint16_t)-1) &&
- (B->Offset == (uint16_t)-1)) {
- B->Offset = Offset + A->Offset;
- B->Size = A->Size;
+ unsigned NumModes = CGH.getNumModeIds();
+ // Skip default mode.
+ for (unsigned M = 0; M < NumModes; ++M) {
+ // Handle DefaultMode last.
+ if (M == DefaultMode)
+ continue;
+ SubRegRange &Range = this->Range.get(M);
+ SubRegRange &ARange = A->Range.get(M);
+ SubRegRange &BRange = B->Range.get(M);
+
+ if (Range.Offset != (uint16_t)-1 && ARange.Offset != (uint16_t)-1 &&
+ BRange.Offset == (uint16_t)-1) {
+ BRange.Offset = Range.Offset + ARange.Offset;
+ BRange.Size = ARange.Size;
+ }
+ }
+
+ // Now handle default.
+ SubRegRange &Range = this->Range.get(DefaultMode);
+ SubRegRange &ARange = A->Range.get(DefaultMode);
+ SubRegRange &BRange = B->Range.get(DefaultMode);
+ if (Range.Offset != (uint16_t)-1 && ARange.Offset != (uint16_t)-1 &&
+ BRange.Offset == (uint16_t)-1) {
+ BRange.Offset = Range.Offset + ARange.Offset;
+ BRange.Size = ARange.Size;
}
+
return (Ins.second || Ins.first->second == B) ? nullptr : Ins.first->second;
}
@@ -681,7 +703,8 @@ public:
// Find or create a sub-register index representing the concatenation of
// non-overlapping sibling indices.
CodeGenSubRegIndex *
- getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &);
+ getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &,
+ const CodeGenHwModes &CGH);
const std::deque<CodeGenRegister> &getRegisters() const { return Registers; }
diff --git a/llvm/utils/TableGen/CodeGenSchedule.cpp b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
index 0e81623a6aa3..0e81623a6aa3 100644
--- a/llvm/utils/TableGen/CodeGenSchedule.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenSchedule.cpp
diff --git a/llvm/utils/TableGen/CodeGenSchedule.h b/llvm/utils/TableGen/Common/CodeGenSchedule.h
index 61980e7e196e..61980e7e196e 100644
--- a/llvm/utils/TableGen/CodeGenSchedule.h
+++ b/llvm/utils/TableGen/Common/CodeGenSchedule.h
diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
index e1cf33e7f62f..e1cf33e7f62f 100644
--- a/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.cpp
diff --git a/llvm/utils/TableGen/CodeGenTarget.h b/llvm/utils/TableGen/Common/CodeGenTarget.h
index e109c717dc01..df4c22ebb379 100644
--- a/llvm/utils/TableGen/CodeGenTarget.h
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.h
@@ -16,10 +16,10 @@
#ifndef LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
#define LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
+#include "Basic/SDNodeProperties.h"
#include "CodeGenHwModes.h"
#include "CodeGenInstruction.h"
#include "InfoByHwMode.h"
-#include "SDNodeProperties.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/llvm/utils/TableGen/DAGISelMatcher.cpp b/llvm/utils/TableGen/Common/DAGISelMatcher.cpp
index 3298965ab41d..3298965ab41d 100644
--- a/llvm/utils/TableGen/DAGISelMatcher.cpp
+++ b/llvm/utils/TableGen/Common/DAGISelMatcher.cpp
diff --git a/llvm/utils/TableGen/DAGISelMatcher.h b/llvm/utils/TableGen/Common/DAGISelMatcher.h
index d4fe513e2e96..d4fe513e2e96 100644
--- a/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/llvm/utils/TableGen/Common/DAGISelMatcher.h
diff --git a/llvm/utils/TableGen/GlobalISel/CXXPredicates.cpp b/llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.cpp
index e39293ebfe7a..e39293ebfe7a 100644
--- a/llvm/utils/TableGen/GlobalISel/CXXPredicates.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.cpp
diff --git a/llvm/utils/TableGen/GlobalISel/CXXPredicates.h b/llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.h
index 01610a13110d..01610a13110d 100644
--- a/llvm/utils/TableGen/GlobalISel/CXXPredicates.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/CXXPredicates.h
diff --git a/llvm/utils/TableGen/GlobalISel/CodeExpander.cpp b/llvm/utils/TableGen/Common/GlobalISel/CodeExpander.cpp
index b0baf194bafe..b0baf194bafe 100644
--- a/llvm/utils/TableGen/GlobalISel/CodeExpander.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/CodeExpander.cpp
diff --git a/llvm/utils/TableGen/GlobalISel/CodeExpander.h b/llvm/utils/TableGen/Common/GlobalISel/CodeExpander.h
index 0b1e6ceab52c..0b1e6ceab52c 100644
--- a/llvm/utils/TableGen/GlobalISel/CodeExpander.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/CodeExpander.h
diff --git a/llvm/utils/TableGen/GlobalISel/CodeExpansions.h b/llvm/utils/TableGen/Common/GlobalISel/CodeExpansions.h
index b82c3257b321..b82c3257b321 100644
--- a/llvm/utils/TableGen/GlobalISel/CodeExpansions.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/CodeExpansions.h
diff --git a/llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.cpp b/llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.cpp
new file mode 100644
index 000000000000..37e630605095
--- /dev/null
+++ b/llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.cpp
@@ -0,0 +1,23 @@
+//===- CombinerUtils.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CombinerUtils.h"
+#include "llvm/ADT/StringSet.h"
+
+namespace llvm {
+
+StringRef insertStrRef(StringRef S) {
+ if (S.empty())
+ return {};
+
+ static StringSet<> Pool;
+ auto [It, Inserted] = Pool.insert(S);
+ return It->getKey();
+}
+
+} // namespace llvm
diff --git a/llvm/utils/TableGen/GlobalISel/CombinerUtils.h b/llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.h
index 8cb2514a10e8..82a64c63edbd 100644
--- a/llvm/utils/TableGen/GlobalISel/CombinerUtils.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/CombinerUtils.h
@@ -65,6 +65,10 @@ inline const DagInit *getDagWithOperatorOfSubClass(const Init &N,
return I;
return nullptr;
}
+
+/// Copies a StringRef into a static pool to preserve it.
+StringRef insertStrRef(StringRef S);
+
} // namespace llvm
#endif
diff --git a/llvm/utils/TableGen/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
index 45fb41b89f27..19d42b7688da 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTable.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp
@@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
#include "GlobalISelMatchTable.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/LEB128.h"
@@ -1077,30 +1077,25 @@ OperandPredicateMatcher::~OperandPredicateMatcher() {}
bool OperandPredicateMatcher::isHigherPriorityThan(
const OperandPredicateMatcher &B) const {
// Generally speaking, an instruction is more important than an Int or a
- // LiteralInt because it can cover more nodes but theres an exception to
+ // LiteralInt because it can cover more nodes but there's an exception to
// this. G_CONSTANT's are less important than either of those two because they
// are more permissive.
- const InstructionOperandMatcher *AOM =
- dyn_cast<InstructionOperandMatcher>(this);
- const InstructionOperandMatcher *BOM =
- dyn_cast<InstructionOperandMatcher>(&B);
+ const auto *AOM = dyn_cast<InstructionOperandMatcher>(this);
+ const auto *BOM = dyn_cast<InstructionOperandMatcher>(&B);
bool AIsConstantInsn = AOM && AOM->getInsnMatcher().isConstantInstruction();
bool BIsConstantInsn = BOM && BOM->getInsnMatcher().isConstantInstruction();
- if (AOM && BOM) {
- // The relative priorities between a G_CONSTANT and any other instruction
- // don't actually matter but this code is needed to ensure a strict weak
- // ordering. This is particularly important on Windows where the rules will
- // be incorrectly sorted without it.
- if (AIsConstantInsn != BIsConstantInsn)
- return AIsConstantInsn < BIsConstantInsn;
- return false;
- }
+ // The relative priorities between a G_CONSTANT and any other instruction
+ // don't actually matter but this code is needed to ensure a strict weak
+ // ordering. This is particularly important on Windows where the rules will
+ // be incorrectly sorted without it.
+ if (AOM && BOM)
+ return !AIsConstantInsn && BIsConstantInsn;
- if (AOM && AIsConstantInsn && (B.Kind == OPM_Int || B.Kind == OPM_LiteralInt))
+ if (AIsConstantInsn && (B.Kind == OPM_Int || B.Kind == OPM_LiteralInt))
return false;
- if (BOM && BIsConstantInsn && (Kind == OPM_Int || Kind == OPM_LiteralInt))
+ if (BIsConstantInsn && (Kind == OPM_Int || Kind == OPM_LiteralInt))
return true;
return Kind < B.Kind;
diff --git a/llvm/utils/TableGen/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
index b1ab7da8db44..aa86fad763d1 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTable.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
@@ -16,7 +16,7 @@
#ifndef LLVM_UTILS_TABLEGEN_GLOBALISELMATCHTABLE_H
#define LLVM_UTILS_TABLEGEN_GLOBALISELMATCHTABLE_H
-#include "CodeGenDAGPatterns.h"
+#include "Common/CodeGenDAGPatterns.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
diff --git a/llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
index 5697899a915a..5697899a915a 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp
diff --git a/llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h
index 7e952d6df309..d2b6a74c7577 100644
--- a/llvm/utils/TableGen/GlobalISelMatchTableExecutorEmitter.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h
@@ -15,7 +15,7 @@
#ifndef LLVM_UTILS_TABLEGEN_GLOBALISELMATCHTABLEEXECUTOREMITTER_H
#define LLVM_UTILS_TABLEGEN_GLOBALISELMATCHTABLEEXECUTOREMITTER_H
-#include "SubtargetFeatureInfo.h"
+#include "Common/SubtargetFeatureInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
diff --git a/llvm/utils/TableGen/GlobalISel/MatchDataInfo.cpp b/llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.cpp
index b5c9e4f8c248..b5c9e4f8c248 100644
--- a/llvm/utils/TableGen/GlobalISel/MatchDataInfo.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.cpp
diff --git a/llvm/utils/TableGen/GlobalISel/MatchDataInfo.h b/llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.h
index abe1245bc67d..abe1245bc67d 100644
--- a/llvm/utils/TableGen/GlobalISel/MatchDataInfo.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/MatchDataInfo.h
diff --git a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
new file mode 100644
index 000000000000..1d6c4c73a264
--- /dev/null
+++ b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.cpp
@@ -0,0 +1,462 @@
+//===- PatternParser.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Common/GlobalISel/PatternParser.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/GlobalISel/CombinerUtils.h"
+#include "Common/GlobalISel/Patterns.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+namespace llvm {
+namespace gi {
+static constexpr StringLiteral MIFlagsEnumClassName = "MIFlagEnum";
+
+namespace {
+class PrettyStackTraceParse : public PrettyStackTraceEntry {
+ const Record &Def;
+
+public:
+ PrettyStackTraceParse(const Record &Def) : Def(Def) {}
+
+ void print(raw_ostream &OS) const override {
+ if (Def.isSubClassOf("GICombineRule"))
+ OS << "Parsing GICombineRule '" << Def.getName() << '\'';
+ else if (Def.isSubClassOf(PatFrag::ClassName))
+ OS << "Parsing " << PatFrag::ClassName << " '" << Def.getName() << '\'';
+ else
+ OS << "Parsing '" << Def.getName() << '\'';
+ OS << '\n';
+ }
+};
+} // namespace
+
+bool PatternParser::parsePatternList(
+ const DagInit &List,
+ function_ref<bool(std::unique_ptr<Pattern>)> ParseAction,
+ StringRef Operator, StringRef AnonPatNamePrefix) {
+ if (List.getOperatorAsDef(DiagLoc)->getName() != Operator) {
+ PrintError(DiagLoc, "Expected " + Operator + " operator");
+ return false;
+ }
+
+ if (List.getNumArgs() == 0) {
+ PrintError(DiagLoc, Operator + " pattern list is empty");
+ return false;
+ }
+
+ // The match section consists of a list of matchers and predicates. Parse each
+ // one and add the equivalent GIMatchDag nodes, predicates, and edges.
+ for (unsigned I = 0; I < List.getNumArgs(); ++I) {
+ Init *Arg = List.getArg(I);
+ std::string Name = List.getArgName(I)
+ ? List.getArgName(I)->getValue().str()
+ : ("__" + AnonPatNamePrefix + "_" + Twine(I)).str();
+
+ if (auto Pat = parseInstructionPattern(*Arg, Name)) {
+ if (!ParseAction(std::move(Pat)))
+ return false;
+ continue;
+ }
+
+ if (auto Pat = parseWipMatchOpcodeMatcher(*Arg, Name)) {
+ if (!ParseAction(std::move(Pat)))
+ return false;
+ continue;
+ }
+
+ // Parse arbitrary C++ code
+ if (const auto *StringI = dyn_cast<StringInit>(Arg)) {
+ auto CXXPat = std::make_unique<CXXPattern>(*StringI, insertStrRef(Name));
+ if (!ParseAction(std::move(CXXPat)))
+ return false;
+ continue;
+ }
+
+ PrintError(DiagLoc,
+ "Failed to parse pattern: '" + Arg->getAsString() + '\'');
+ return false;
+ }
+
+ return true;
+}
+
+static const CodeGenInstruction &
+getInstrForIntrinsic(const CodeGenTarget &CGT, const CodeGenIntrinsic *I) {
+ StringRef Opc;
+ if (I->isConvergent) {
+ Opc = I->hasSideEffects ? "G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS"
+ : "G_INTRINSIC_CONVERGENT";
+ } else {
+ Opc = I->hasSideEffects ? "G_INTRINSIC_W_SIDE_EFFECTS" : "G_INTRINSIC";
+ }
+
+ RecordKeeper &RK = I->TheDef->getRecords();
+ return CGT.getInstruction(RK.getDef(Opc));
+}
+
+static const CodeGenIntrinsic *getCodeGenIntrinsic(Record *R) {
+ // Intrinsics need to have a static lifetime because the match table keeps
+ // references to CodeGenIntrinsic objects.
+ static DenseMap<const Record *, std::unique_ptr<CodeGenIntrinsic>>
+ AllIntrinsics;
+
+ auto &Ptr = AllIntrinsics[R];
+ if (!Ptr)
+ Ptr = std::make_unique<CodeGenIntrinsic>(R, std::vector<Record *>());
+ return Ptr.get();
+}
+
+std::unique_ptr<Pattern>
+PatternParser::parseInstructionPattern(const Init &Arg, StringRef Name) {
+ const DagInit *DagPat = dyn_cast<DagInit>(&Arg);
+ if (!DagPat)
+ return nullptr;
+
+ std::unique_ptr<InstructionPattern> Pat;
+ if (const DagInit *IP = getDagWithOperatorOfSubClass(Arg, "Instruction")) {
+ auto &Instr = CGT.getInstruction(IP->getOperatorAsDef(DiagLoc));
+ Pat =
+ std::make_unique<CodeGenInstructionPattern>(Instr, insertStrRef(Name));
+ } else if (const DagInit *IP =
+ getDagWithOperatorOfSubClass(Arg, "Intrinsic")) {
+ Record *TheDef = IP->getOperatorAsDef(DiagLoc);
+ const CodeGenIntrinsic *Intrin = getCodeGenIntrinsic(TheDef);
+ const CodeGenInstruction &Instr = getInstrForIntrinsic(CGT, Intrin);
+ Pat =
+ std::make_unique<CodeGenInstructionPattern>(Instr, insertStrRef(Name));
+ cast<CodeGenInstructionPattern>(*Pat).setIntrinsic(Intrin);
+ } else if (const DagInit *PFP =
+ getDagWithOperatorOfSubClass(Arg, PatFrag::ClassName)) {
+ const Record *Def = PFP->getOperatorAsDef(DiagLoc);
+ const PatFrag *PF = parsePatFrag(Def);
+ if (!PF)
+ return nullptr; // Already diagnosed by parsePatFrag
+ Pat = std::make_unique<PatFragPattern>(*PF, insertStrRef(Name));
+ } else if (const DagInit *BP =
+ getDagWithOperatorOfSubClass(Arg, BuiltinPattern::ClassName)) {
+ Pat = std::make_unique<BuiltinPattern>(*BP->getOperatorAsDef(DiagLoc),
+ insertStrRef(Name));
+ } else
+ return nullptr;
+
+ for (unsigned K = 0; K < DagPat->getNumArgs(); ++K) {
+ Init *Arg = DagPat->getArg(K);
+ if (auto *DagArg = getDagWithSpecificOperator(*Arg, "MIFlags")) {
+ if (!parseInstructionPatternMIFlags(*Pat, DagArg))
+ return nullptr;
+ continue;
+ }
+
+ if (!parseInstructionPatternOperand(*Pat, Arg, DagPat->getArgName(K)))
+ return nullptr;
+ }
+
+ if (!Pat->checkSemantics(DiagLoc))
+ return nullptr;
+
+ return std::move(Pat);
+}
+
+std::unique_ptr<Pattern>
+PatternParser::parseWipMatchOpcodeMatcher(const Init &Arg, StringRef Name) {
+ const DagInit *Matcher = getDagWithSpecificOperator(Arg, "wip_match_opcode");
+ if (!Matcher)
+ return nullptr;
+
+ if (Matcher->getNumArgs() == 0) {
+ PrintError(DiagLoc, "Empty wip_match_opcode");
+ return nullptr;
+ }
+
+ // Each argument is an opcode that can match.
+ auto Result = std::make_unique<AnyOpcodePattern>(insertStrRef(Name));
+ for (const auto &Arg : Matcher->getArgs()) {
+ Record *OpcodeDef = getDefOfSubClass(*Arg, "Instruction");
+ if (OpcodeDef) {
+ Result->addOpcode(&CGT.getInstruction(OpcodeDef));
+ continue;
+ }
+
+ PrintError(DiagLoc, "Arguments to wip_match_opcode must be instructions");
+ return nullptr;
+ }
+
+ return std::move(Result);
+}
+
+bool PatternParser::parseInstructionPatternOperand(InstructionPattern &IP,
+ const Init *OpInit,
+ const StringInit *OpName) {
+ const auto ParseErr = [&]() {
+ PrintError(DiagLoc,
+ "cannot parse operand '" + OpInit->getAsUnquotedString() + "' ");
+ if (OpName)
+ PrintNote(DiagLoc,
+ "operand name is '" + OpName->getAsUnquotedString() + '\'');
+ return false;
+ };
+
+ // untyped immediate, e.g. 0
+ if (const auto *IntImm = dyn_cast<IntInit>(OpInit)) {
+ std::string Name = OpName ? OpName->getAsUnquotedString() : "";
+ IP.addOperand(IntImm->getValue(), insertStrRef(Name), PatternType());
+ return true;
+ }
+
+ // typed immediate, e.g. (i32 0)
+ if (const auto *DagOp = dyn_cast<DagInit>(OpInit)) {
+ if (DagOp->getNumArgs() != 1)
+ return ParseErr();
+
+ const Record *TyDef = DagOp->getOperatorAsDef(DiagLoc);
+ auto ImmTy = PatternType::get(DiagLoc, TyDef,
+ "cannot parse immediate '" +
+ DagOp->getAsUnquotedString() + '\'');
+ if (!ImmTy)
+ return false;
+
+ if (!IP.hasAllDefs()) {
+ PrintError(DiagLoc, "out operand of '" + IP.getInstName() +
+ "' cannot be an immediate");
+ return false;
+ }
+
+ const auto *Val = dyn_cast<IntInit>(DagOp->getArg(0));
+ if (!Val)
+ return ParseErr();
+
+ std::string Name = OpName ? OpName->getAsUnquotedString() : "";
+ IP.addOperand(Val->getValue(), insertStrRef(Name), *ImmTy);
+ return true;
+ }
+
+ // Typed operand e.g. $x/$z in (G_FNEG $x, $z)
+ if (auto *DefI = dyn_cast<DefInit>(OpInit)) {
+ if (!OpName) {
+ PrintError(DiagLoc, "expected an operand name after '" +
+ OpInit->getAsString() + '\'');
+ return false;
+ }
+ const Record *Def = DefI->getDef();
+ auto Ty = PatternType::get(DiagLoc, Def, "cannot parse operand type");
+ if (!Ty)
+ return false;
+ IP.addOperand(insertStrRef(OpName->getAsUnquotedString()), *Ty);
+ return true;
+ }
+
+ // Untyped operand e.g. $x/$z in (G_FNEG $x, $z)
+ if (isa<UnsetInit>(OpInit)) {
+ assert(OpName && "Unset w/ no OpName?");
+ IP.addOperand(insertStrRef(OpName->getAsUnquotedString()), PatternType());
+ return true;
+ }
+
+ return ParseErr();
+}
+
+bool PatternParser::parseInstructionPatternMIFlags(InstructionPattern &IP,
+ const DagInit *Op) {
+ auto *CGIP = dyn_cast<CodeGenInstructionPattern>(&IP);
+ if (!CGIP) {
+ PrintError(DiagLoc,
+ "matching/writing MIFlags is only allowed on CodeGenInstruction "
+ "patterns");
+ return false;
+ }
+
+ const auto CheckFlagEnum = [&](const Record *R) {
+ if (!R->isSubClassOf(MIFlagsEnumClassName)) {
+ PrintError(DiagLoc, "'" + R->getName() + "' is not a subclass of '" +
+ MIFlagsEnumClassName + "'");
+ return false;
+ }
+
+ return true;
+ };
+
+ if (CGIP->getMIFlagsInfo()) {
+ PrintError(DiagLoc, "MIFlags can only be present once on an instruction");
+ return false;
+ }
+
+ auto &FI = CGIP->getOrCreateMIFlagsInfo();
+ for (unsigned K = 0; K < Op->getNumArgs(); ++K) {
+ const Init *Arg = Op->getArg(K);
+
+ // Match/set a flag: (MIFlags FmNoNans)
+ if (const auto *Def = dyn_cast<DefInit>(Arg)) {
+ const Record *R = Def->getDef();
+ if (!CheckFlagEnum(R))
+ return false;
+
+ FI.addSetFlag(R);
+ continue;
+ }
+
+ // Do not match a flag/unset a flag: (MIFlags (not FmNoNans))
+ if (const DagInit *NotDag = getDagWithSpecificOperator(*Arg, "not")) {
+ for (const Init *NotArg : NotDag->getArgs()) {
+ const DefInit *DefArg = dyn_cast<DefInit>(NotArg);
+ if (!DefArg) {
+ PrintError(DiagLoc, "cannot parse '" + NotArg->getAsUnquotedString() +
+ "': expected a '" + MIFlagsEnumClassName +
+ "'");
+ return false;
+ }
+
+ const Record *R = DefArg->getDef();
+ if (!CheckFlagEnum(R))
+ return false;
+
+ FI.addUnsetFlag(R);
+ continue;
+ }
+
+ continue;
+ }
+
+ // Copy flags from a matched instruction: (MIFlags $mi)
+ if (isa<UnsetInit>(Arg)) {
+ FI.addCopyFlag(insertStrRef(Op->getArgName(K)->getAsUnquotedString()));
+ continue;
+ }
+ }
+
+ return true;
+}
+
+std::unique_ptr<PatFrag> PatternParser::parsePatFragImpl(const Record *Def) {
+ auto StackTrace = PrettyStackTraceParse(*Def);
+ if (!Def->isSubClassOf(PatFrag::ClassName))
+ return nullptr;
+
+ const DagInit *Ins = Def->getValueAsDag("InOperands");
+ if (Ins->getOperatorAsDef(Def->getLoc())->getName() != "ins") {
+ PrintError(Def, "expected 'ins' operator for " + PatFrag::ClassName +
+ " in operands list");
+ return nullptr;
+ }
+
+ const DagInit *Outs = Def->getValueAsDag("OutOperands");
+ if (Outs->getOperatorAsDef(Def->getLoc())->getName() != "outs") {
+ PrintError(Def, "expected 'outs' operator for " + PatFrag::ClassName +
+ " out operands list");
+ return nullptr;
+ }
+
+ auto Result = std::make_unique<PatFrag>(*Def);
+ if (!parsePatFragParamList(*Outs, [&](StringRef Name, unsigned Kind) {
+ Result->addOutParam(insertStrRef(Name), (PatFrag::ParamKind)Kind);
+ return true;
+ }))
+ return nullptr;
+
+ if (!parsePatFragParamList(*Ins, [&](StringRef Name, unsigned Kind) {
+ Result->addInParam(insertStrRef(Name), (PatFrag::ParamKind)Kind);
+ return true;
+ }))
+ return nullptr;
+
+ const ListInit *Alts = Def->getValueAsListInit("Alternatives");
+ unsigned AltIdx = 0;
+ for (const Init *Alt : *Alts) {
+ const auto *PatDag = dyn_cast<DagInit>(Alt);
+ if (!PatDag) {
+ PrintError(Def, "expected dag init for PatFrag pattern alternative");
+ return nullptr;
+ }
+
+ PatFrag::Alternative &A = Result->addAlternative();
+ const auto AddPat = [&](std::unique_ptr<Pattern> Pat) {
+ A.Pats.push_back(std::move(Pat));
+ return true;
+ };
+
+ SaveAndRestore<ArrayRef<SMLoc>> DiagLocSAR(DiagLoc, Def->getLoc());
+ if (!parsePatternList(
+ *PatDag, AddPat, "pattern",
+ /*AnonPatPrefix*/
+ (Def->getName() + "_alt" + Twine(AltIdx++) + "_pattern").str()))
+ return nullptr;
+ }
+
+ if (!Result->buildOperandsTables() || !Result->checkSemantics())
+ return nullptr;
+
+ return Result;
+}
+
+bool PatternParser::parsePatFragParamList(
+ const DagInit &OpsList,
+ function_ref<bool(StringRef, unsigned)> ParseAction) {
+ for (unsigned K = 0; K < OpsList.getNumArgs(); ++K) {
+ const StringInit *Name = OpsList.getArgName(K);
+ const Init *Ty = OpsList.getArg(K);
+
+ if (!Name) {
+ PrintError(DiagLoc, "all operands must be named'");
+ return false;
+ }
+ const std::string NameStr = Name->getAsUnquotedString();
+
+ PatFrag::ParamKind OpKind;
+ if (isSpecificDef(*Ty, "gi_imm"))
+ OpKind = PatFrag::PK_Imm;
+ else if (isSpecificDef(*Ty, "root"))
+ OpKind = PatFrag::PK_Root;
+ else if (isa<UnsetInit>(Ty) ||
+ isSpecificDef(*Ty, "gi_mo")) // no type = gi_mo.
+ OpKind = PatFrag::PK_MachineOperand;
+ else {
+ PrintError(
+ DiagLoc,
+ '\'' + NameStr +
+ "' operand type was expected to be 'root', 'gi_imm' or 'gi_mo'");
+ return false;
+ }
+
+ if (!ParseAction(NameStr, (unsigned)OpKind))
+ return false;
+ }
+
+ return true;
+}
+
+const PatFrag *PatternParser::parsePatFrag(const Record *Def) {
+ // Cache already parsed PatFrags to avoid doing extra work.
+ static DenseMap<const Record *, std::unique_ptr<PatFrag>> ParsedPatFrags;
+
+ auto It = ParsedPatFrags.find(Def);
+ if (It != ParsedPatFrags.end()) {
+ SeenPatFrags.insert(It->second.get());
+ return It->second.get();
+ }
+
+ std::unique_ptr<PatFrag> NewPatFrag = parsePatFragImpl(Def);
+ if (!NewPatFrag) {
+ PrintError(Def, "Could not parse " + PatFrag::ClassName + " '" +
+ Def->getName() + "'");
+ // Put a nullptr in the map so we don't attempt parsing this again.
+ ParsedPatFrags[Def] = nullptr;
+ return nullptr;
+ }
+
+ const auto *Res = NewPatFrag.get();
+ ParsedPatFrags[Def] = std::move(NewPatFrag);
+ SeenPatFrags.insert(Res);
+ return Res;
+}
+
+} // namespace gi
+} // namespace llvm
diff --git a/llvm/utils/TableGen/Common/GlobalISel/PatternParser.h b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.h
new file mode 100644
index 000000000000..cd6f524075cd
--- /dev/null
+++ b/llvm/utils/TableGen/Common/GlobalISel/PatternParser.h
@@ -0,0 +1,118 @@
+//===- PatternParser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Contains tools to parse MIR patterns from TableGen DAG elements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_GLOBALISEL_PATTERNPARSER_H
+#define LLVM_UTILS_GLOBALISEL_PATTERNPARSER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/SMLoc.h"
+#include <memory>
+
+namespace llvm {
+class CodeGenTarget;
+class DagInit;
+class Init;
+class Record;
+class StringRef;
+class StringInit;
+
+namespace gi {
+class InstructionPattern;
+class Pattern;
+class PatFrag;
+
+/// Helper class to parse MIR Pattern lists.
+///
+/// e.g., `(match (G_FADD $x, $y, $z), (G_FNEG $y, $z))`
+class PatternParser {
+ const CodeGenTarget &CGT;
+ ArrayRef<SMLoc> DiagLoc;
+
+ mutable SmallPtrSet<const PatFrag *, 2> SeenPatFrags;
+
+public:
+ PatternParser(const CodeGenTarget &CGT, ArrayRef<SMLoc> DiagLoc)
+ : CGT(CGT), DiagLoc(DiagLoc) {}
+
+ /// Parses a list of patterns such as:
+ /// (Operator (Pattern1 ...), (Pattern2 ...))
+ /// \param List DagInit of the expected pattern list.
+ /// \param ParseAction Callback to handle a succesfully parsed pattern.
+ /// \param Operator The name of the operator, e.g. "match"
+ /// \param AnonPatNamePrefix Prefix for anonymous pattern names.
+ /// \return true on success, false on failure.
+ bool
+ parsePatternList(const DagInit &List,
+ function_ref<bool(std::unique_ptr<Pattern>)> ParseAction,
+ StringRef Operator, StringRef AnonPatNamePrefix);
+
+ /// \returns all PatFrags encountered by this PatternParser.
+ const auto &getSeenPatFrags() const { return SeenPatFrags; }
+
+private:
+ /// Parse any InstructionPattern from a TableGen Init.
+ /// \param Arg Init to parse.
+ /// \param PatName Name of the pattern that will be parsed.
+ /// \return the parsed pattern on success, nullptr on failure.
+ std::unique_ptr<Pattern> parseInstructionPattern(const Init &Arg,
+ StringRef PatName);
+
+ /// Parse a WipOpcodeMatcher from a TableGen Init.
+ /// \param Arg Init to parse.
+ /// \param PatName Name of the pattern that will be parsed.
+ /// \return the parsed pattern on success, nullptr on failure.
+ std::unique_ptr<Pattern> parseWipMatchOpcodeMatcher(const Init &Arg,
+ StringRef PatName);
+
+ /// Parses an Operand of an InstructionPattern from a TableGen Init.
+ /// \param IP InstructionPattern for which we're parsing.
+ /// \param OpInit Init to parse.
+ /// \param OpName Name of the operand to parse.
+ /// \return true on success, false on failure.
+ bool parseInstructionPatternOperand(InstructionPattern &IP,
+ const Init *OpInit,
+ const StringInit *OpName);
+
+ /// Parses a MIFlag for an InstructionPattern from a TableGen Init.
+ /// \param IP InstructionPattern for which we're parsing.
+ /// \param Op Init to parse.
+ /// \return true on success, false on failure.
+ bool parseInstructionPatternMIFlags(InstructionPattern &IP,
+ const DagInit *Op);
+
+ /// (Uncached) PatFrag parsing implementation.
+ /// \param Def PatFrag def to parsee.
+ /// \return the parsed PatFrag on success, nullptr on failure.
+ std::unique_ptr<PatFrag> parsePatFragImpl(const Record *Def);
+
+ /// Parses the in or out parameter list of a PatFrag.
+ /// \param OpsList Init to parse.
+ /// \param ParseAction Callback on successful parse, with the name of
+ /// the parameter and its \ref PatFrag::ParamKind
+ /// \return true on success, false on failure.
+ bool
+ parsePatFragParamList(const DagInit &OpsList,
+ function_ref<bool(StringRef, unsigned)> ParseAction);
+
+ /// Cached PatFrag parser. This avoids duplicate work by keeping track of
+ /// already-parsed PatFrags.
+ /// \param Def PatFrag def to parsee.
+ /// \return the parsed PatFrag on success, nullptr on failure.
+ const PatFrag *parsePatFrag(const Record *Def);
+};
+
+} // namespace gi
+} // namespace llvm
+
+#endif
diff --git a/llvm/utils/TableGen/GlobalISel/Patterns.cpp b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
index 758eac2dfebd..388bf7e9e833 100644
--- a/llvm/utils/TableGen/GlobalISel/Patterns.cpp
+++ b/llvm/utils/TableGen/Common/GlobalISel/Patterns.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "Patterns.h"
-#include "../CodeGenInstruction.h"
-#include "../CodeGenIntrinsics.h"
+#include "Basic/CodeGenIntrinsics.h"
#include "CXXPredicates.h"
#include "CodeExpander.h"
#include "CodeExpansions.h"
+#include "Common/CodeGenInstruction.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/llvm/utils/TableGen/GlobalISel/Patterns.h b/llvm/utils/TableGen/Common/GlobalISel/Patterns.h
index dac092556548..dac092556548 100644
--- a/llvm/utils/TableGen/GlobalISel/Patterns.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/Patterns.h
diff --git a/llvm/utils/TableGen/InfoByHwMode.cpp b/llvm/utils/TableGen/Common/InfoByHwMode.cpp
index 4a64421c013c..cacf4ece6671 100644
--- a/llvm/utils/TableGen/InfoByHwMode.cpp
+++ b/llvm/utils/TableGen/Common/InfoByHwMode.cpp
@@ -115,7 +115,7 @@ ValueTypeByHwMode llvm::getValueTypeByHwMode(Record *Rec,
return ValueTypeByHwMode(Rec, llvm::getValueType(Rec));
}
-RegSizeInfo::RegSizeInfo(Record *R, const CodeGenHwModes &CGH) {
+RegSizeInfo::RegSizeInfo(Record *R) {
RegSize = R->getValueAsInt("RegSize");
SpillSize = R->getValueAsInt("SpillSize");
SpillAlignment = R->getValueAsInt("SpillAlignment");
@@ -139,7 +139,7 @@ void RegSizeInfo::writeToStream(raw_ostream &OS) const {
RegSizeInfoByHwMode::RegSizeInfoByHwMode(Record *R, const CodeGenHwModes &CGH) {
const HwModeSelect &MS = CGH.getHwModeSelect(R);
for (const HwModeSelect::PairType &P : MS.Items) {
- auto I = Map.insert({P.first, RegSizeInfo(P.second, CGH)});
+ auto I = Map.insert({P.first, RegSizeInfo(P.second)});
assert(I.second && "Duplicate entry?");
(void)I;
}
@@ -183,6 +183,20 @@ void RegSizeInfoByHwMode::writeToStream(raw_ostream &OS) const {
OS << '}';
}
+SubRegRange::SubRegRange(Record *R) {
+ Size = R->getValueAsInt("Size");
+ Offset = R->getValueAsInt("Offset");
+}
+
+SubRegRangeByHwMode::SubRegRangeByHwMode(Record *R, const CodeGenHwModes &CGH) {
+ const HwModeSelect &MS = CGH.getHwModeSelect(R);
+ for (const HwModeSelect::PairType &P : MS.Items) {
+ auto I = Map.insert({P.first, SubRegRange(P.second)});
+ assert(I.second && "Duplicate entry?");
+ (void)I;
+ }
+}
+
EncodingInfoByHwMode::EncodingInfoByHwMode(Record *R,
const CodeGenHwModes &CGH) {
const HwModeSelect &MS = CGH.getHwModeSelect(R);
diff --git a/llvm/utils/TableGen/InfoByHwMode.h b/llvm/utils/TableGen/Common/InfoByHwMode.h
index 001509e5317f..dd0b9830d757 100644
--- a/llvm/utils/TableGen/InfoByHwMode.h
+++ b/llvm/utils/TableGen/Common/InfoByHwMode.h
@@ -176,12 +176,14 @@ struct ValueTypeByHwMode : public InfoByHwMode<MVT> {
ValueTypeByHwMode getValueTypeByHwMode(Record *Rec, const CodeGenHwModes &CGH);
+raw_ostream &operator<<(raw_ostream &OS, const ValueTypeByHwMode &T);
+
struct RegSizeInfo {
unsigned RegSize;
unsigned SpillSize;
unsigned SpillAlignment;
- RegSizeInfo(Record *R, const CodeGenHwModes &CGH);
+ RegSizeInfo(Record *R);
RegSizeInfo() = default;
bool operator<(const RegSizeInfo &I) const;
bool operator==(const RegSizeInfo &I) const {
@@ -213,10 +215,27 @@ struct RegSizeInfoByHwMode : public InfoByHwMode<RegSizeInfo> {
}
};
-raw_ostream &operator<<(raw_ostream &OS, const ValueTypeByHwMode &T);
raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfo &T);
raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfoByHwMode &T);
+struct SubRegRange {
+ uint16_t Size;
+ uint16_t Offset;
+
+ SubRegRange(Record *R);
+ SubRegRange(uint16_t Size, uint16_t Offset) : Size(Size), Offset(Offset) {}
+};
+
+struct SubRegRangeByHwMode : public InfoByHwMode<SubRegRange> {
+ SubRegRangeByHwMode(Record *R, const CodeGenHwModes &CGH);
+ SubRegRangeByHwMode(SubRegRange Range) { Map.insert({DefaultMode, Range}); }
+ SubRegRangeByHwMode() = default;
+
+ void insertSubRegRangeForMode(unsigned Mode, SubRegRange Info) {
+ Map.insert(std::pair(Mode, Info));
+ }
+};
+
struct EncodingInfoByHwMode : public InfoByHwMode<Record *> {
EncodingInfoByHwMode(Record *R, const CodeGenHwModes &CGH);
EncodingInfoByHwMode() = default;
diff --git a/llvm/utils/TableGen/OptEmitter.cpp b/llvm/utils/TableGen/Common/OptEmitter.cpp
index 7fcf3074e093..7fcf3074e093 100644
--- a/llvm/utils/TableGen/OptEmitter.cpp
+++ b/llvm/utils/TableGen/Common/OptEmitter.cpp
diff --git a/llvm/utils/TableGen/OptEmitter.h b/llvm/utils/TableGen/Common/OptEmitter.h
index c8f9246ef1e6..c8f9246ef1e6 100644
--- a/llvm/utils/TableGen/OptEmitter.h
+++ b/llvm/utils/TableGen/Common/OptEmitter.h
diff --git a/llvm/utils/TableGen/PredicateExpander.cpp b/llvm/utils/TableGen/Common/PredicateExpander.cpp
index d0a35ff82df6..d0a35ff82df6 100644
--- a/llvm/utils/TableGen/PredicateExpander.cpp
+++ b/llvm/utils/TableGen/Common/PredicateExpander.cpp
diff --git a/llvm/utils/TableGen/PredicateExpander.h b/llvm/utils/TableGen/Common/PredicateExpander.h
index a0dc63023978..a0dc63023978 100644
--- a/llvm/utils/TableGen/PredicateExpander.h
+++ b/llvm/utils/TableGen/Common/PredicateExpander.h
diff --git a/llvm/utils/TableGen/SubtargetFeatureInfo.cpp b/llvm/utils/TableGen/Common/SubtargetFeatureInfo.cpp
index 819abfa965a5..819abfa965a5 100644
--- a/llvm/utils/TableGen/SubtargetFeatureInfo.cpp
+++ b/llvm/utils/TableGen/Common/SubtargetFeatureInfo.cpp
diff --git a/llvm/utils/TableGen/SubtargetFeatureInfo.h b/llvm/utils/TableGen/Common/SubtargetFeatureInfo.h
index b1016ff24e88..b1016ff24e88 100644
--- a/llvm/utils/TableGen/SubtargetFeatureInfo.h
+++ b/llvm/utils/TableGen/Common/SubtargetFeatureInfo.h
diff --git a/llvm/utils/TableGen/Types.cpp b/llvm/utils/TableGen/Common/Types.cpp
index 35b79b320dc3..35b79b320dc3 100644
--- a/llvm/utils/TableGen/Types.cpp
+++ b/llvm/utils/TableGen/Common/Types.cpp
diff --git a/llvm/utils/TableGen/Types.h b/llvm/utils/TableGen/Common/Types.h
index 74f0f9f2792c..74f0f9f2792c 100644
--- a/llvm/utils/TableGen/Types.h
+++ b/llvm/utils/TableGen/Common/Types.h
diff --git a/llvm/utils/TableGen/VarLenCodeEmitterGen.cpp b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp
index bfb7e5c33317..4263d8f41715 100644
--- a/llvm/utils/TableGen/VarLenCodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp
@@ -337,8 +337,8 @@ static void emitInstBits(raw_ostream &IS, raw_ostream &SS, const APInt &Bits,
return;
}
- IS.indent(4) << "{/*NumBits*/" << Bits.getBitWidth() << ", "
- << "/*Index*/" << Index << "},";
+ IS.indent(4) << "{/*NumBits*/" << Bits.getBitWidth() << ", " << "/*Index*/"
+ << Index << "},";
SS.indent(4);
for (unsigned I = 0; I < Bits.getNumWords(); ++I, ++Index)
@@ -371,8 +371,8 @@ void VarLenCodeEmitterGen::emitInstructionBaseValues(
if (ModeIt == InstIt->second.end())
ModeIt = InstIt->second.find(Universal);
if (ModeIt == InstIt->second.end()) {
- IS.indent(4) << "{/*NumBits*/0, /*Index*/0},\t"
- << "// " << R->getName() << " no encoding\n";
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0},\t" << "// " << R->getName()
+ << " no encoding\n";
continue;
}
const VarLenInst &VLI = ModeIt->second;
@@ -492,10 +492,9 @@ std::string VarLenCodeEmitterGen::getInstructionCaseForEncoding(
SS << ", /*Pos=*/" << utostr(Offset) << ", Scratch, Fixups, STI);\n";
- SS.indent(I) << "Inst.insertBits("
- << "Scratch.extractBits(" << utostr(NumBits) << ", "
- << utostr(LoBit) << ")"
- << ", " << Offset << ");\n";
+ SS.indent(I) << "Inst.insertBits(" << "Scratch.extractBits("
+ << utostr(NumBits) << ", " << utostr(LoBit) << ")" << ", "
+ << Offset << ");\n";
HighScratchAccess = std::max(HighScratchAccess, NumBits + LoBit);
}
diff --git a/llvm/utils/TableGen/VarLenCodeEmitterGen.h b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.h
index 2b55fd1720aa..2b55fd1720aa 100644
--- a/llvm/utils/TableGen/VarLenCodeEmitterGen.h
+++ b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.h
diff --git a/llvm/utils/TableGen/CompressInstEmitter.cpp b/llvm/utils/TableGen/CompressInstEmitter.cpp
index f703fff0ef3e..fcf77934faac 100644
--- a/llvm/utils/TableGen/CompressInstEmitter.cpp
+++ b/llvm/utils/TableGen/CompressInstEmitter.cpp
@@ -64,9 +64,9 @@
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
diff --git a/llvm/utils/TableGen/DAGISelEmitter.cpp b/llvm/utils/TableGen/DAGISelEmitter.cpp
index 336cee09b90c..b43a8e659dd9 100644
--- a/llvm/utils/TableGen/DAGISelEmitter.cpp
+++ b/llvm/utils/TableGen/DAGISelEmitter.cpp
@@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "DAGISelMatcher.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/DAGISelMatcher.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
diff --git a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index 533b8c423690..dcecac4380ce 100644
--- a/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -10,12 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "DAGISelMatcher.h"
-#include "SDNodeProperties.h"
+#include "Basic/SDNodeProperties.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/DAGISelMatcher.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index e8bdabaa0c7e..99babdf07316 100644
--- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "DAGISelMatcher.h"
-#include "InfoByHwMode.h"
-#include "SDNodeProperties.h"
+#include "Basic/SDNodeProperties.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/DAGISelMatcher.h"
+#include "Common/InfoByHwMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/TableGen/Error.h"
diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index 047d285f9914..224102e49d98 100644
--- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -10,9 +10,9 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "DAGISelMatcher.h"
-#include "SDNodeProperties.h"
+#include "Basic/SDNodeProperties.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/DAGISelMatcher.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/llvm/utils/TableGen/DFAEmitter.cpp b/llvm/utils/TableGen/DFAEmitter.cpp
index ce8cc2a078d7..567184d3d5ee 100644
--- a/llvm/utils/TableGen/DFAEmitter.cpp
+++ b/llvm/utils/TableGen/DFAEmitter.cpp
@@ -21,7 +21,7 @@
//===----------------------------------------------------------------------===//
#include "DFAEmitter.h"
-#include "SequenceToOffsetTable.h"
+#include "Basic/SequenceToOffsetTable.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/UniqueVector.h"
diff --git a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
index 26ea1846ffae..3c74df048660 100644
--- a/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
+++ b/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -14,8 +14,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenSchedule.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenSchedule.h"
+#include "Common/CodeGenTarget.h"
#include "DFAEmitter.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index af1efb8aa99f..f2504775d557 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenTarget.h"
-#include "SequenceToOffsetTable.h"
+#include "Basic/SequenceToOffsetTable.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -74,13 +74,13 @@ static ParameterKind getParameterKind(const Record *R) {
auto VTRec = R->getValueAsDef("VT");
switch (getValueType(VTRec)) {
case MVT::isVoid:
- return ParameterKind::VOID;
+ return ParameterKind::Void;
case MVT::f16:
- return ParameterKind::HALF;
+ return ParameterKind::Half;
case MVT::f32:
- return ParameterKind::FLOAT;
+ return ParameterKind::Float;
case MVT::f64:
- return ParameterKind::DOUBLE;
+ return ParameterKind::Double;
case MVT::i1:
return ParameterKind::I1;
case MVT::i8:
@@ -91,11 +91,11 @@ static ParameterKind getParameterKind(const Record *R) {
return ParameterKind::I32;
case MVT::fAny:
case MVT::iAny:
- return ParameterKind::OVERLOAD;
+ return ParameterKind::Overload;
case MVT::Other:
// Handle DXIL-specific overload types
if (R->getValueAsInt("isHalfOrFloat") || R->getValueAsInt("isI16OrI32")) {
- return ParameterKind::OVERLOAD;
+ return ParameterKind::Overload;
}
LLVM_FALLTHROUGH;
default:
@@ -201,16 +201,16 @@ DXILOperationDesc::DXILOperationDesc(const Record *R) {
/// \return std::string string representation of input Kind
static std::string getParameterKindStr(ParameterKind Kind) {
switch (Kind) {
- case ParameterKind::INVALID:
- return "INVALID";
- case ParameterKind::VOID:
- return "VOID";
- case ParameterKind::HALF:
- return "HALF";
- case ParameterKind::FLOAT:
- return "FLOAT";
- case ParameterKind::DOUBLE:
- return "DOUBLE";
+ case ParameterKind::Invalid:
+ return "Invalid";
+ case ParameterKind::Void:
+ return "Void";
+ case ParameterKind::Half:
+ return "Half";
+ case ParameterKind::Float:
+ return "Float";
+ case ParameterKind::Double:
+ return "Double";
case ParameterKind::I1:
return "I1";
case ParameterKind::I8:
@@ -221,14 +221,14 @@ static std::string getParameterKindStr(ParameterKind Kind) {
return "I32";
case ParameterKind::I64:
return "I64";
- case ParameterKind::OVERLOAD:
- return "OVERLOAD";
- case ParameterKind::CBUFFER_RET:
- return "CBUFFER_RET";
- case ParameterKind::RESOURCE_RET:
- return "RESOURCE_RET";
- case ParameterKind::DXIL_HANDLE:
- return "DXIL_HANDLE";
+ case ParameterKind::Overload:
+ return "Overload";
+ case ParameterKind::CBufferRet:
+ return "CBufferRet";
+ case ParameterKind::ResourceRet:
+ return "ResourceRet";
+ case ParameterKind::DXILHandle:
+ return "DXILHandle";
}
llvm_unreachable("Unknown llvm::dxil::ParameterKind enum");
}
@@ -462,7 +462,7 @@ static void emitDXILOperationTable(std::vector<DXILOperationDesc> &Ops,
[](raw_ostream &ParamOS, ParameterKind Kind) {
ParamOS << "ParameterKind::" << getParameterKindStr(Kind);
},
- "ParameterKind::INVALID");
+ "ParameterKind::Invalid");
OS << " };\n\n";
OS << " unsigned Index = Prop.ParameterTableOffset;\n";
OS << " return DXILOpParameterKindTable + Index;\n";
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index 732f34ed04c5..494dc93faace 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenHwModes.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "InfoByHwMode.h"
+#include "Common/CodeGenHwModes.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/InfoByHwMode.h"
+#include "Common/VarLenCodeEmitterGen.h"
#include "TableGenBackends.h"
-#include "VarLenCodeEmitterGen.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
diff --git a/llvm/utils/TableGen/DisassemblerEmitter.cpp b/llvm/utils/TableGen/DisassemblerEmitter.cpp
index 2d653af4d302..d41750075b41 100644
--- a/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenTarget.h"
+#include "Common/CodeGenTarget.h"
#include "TableGenBackends.h"
#include "WebAssemblyDisassemblerEmitter.h"
#include "X86DisassemblerTables.h"
diff --git a/llvm/utils/TableGen/FastISelEmitter.cpp b/llvm/utils/TableGen/FastISelEmitter.cpp
index f04c6e3b3bf0..acfdc20316b7 100644
--- a/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -16,11 +16,11 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "InfoByHwMode.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/InfoByHwMode.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
diff --git a/llvm/utils/TableGen/GlobalISel/CMakeLists.txt b/llvm/utils/TableGen/GlobalISel/CMakeLists.txt
deleted file mode 100644
index 7262c4058399..000000000000
--- a/llvm/utils/TableGen/GlobalISel/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-set(LLVM_LINK_COMPONENTS
- Support
- TableGen
- )
-
-add_llvm_library(LLVMTableGenGlobalISel STATIC DISABLE_LLVM_LINK_LLVM_DYLIB
- CodeExpander.cpp
- CXXPredicates.cpp
- MatchDataInfo.cpp
- Patterns.cpp
-
- DEPENDS
- vt_gen
- )
-
-# Users may include its headers as "GlobalISel/*.h"
-target_include_directories(LLVMTableGenGlobalISel
- INTERFACE
- $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
- )
diff --git a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
index dee3cb4d71a4..ef9e9ff04f85 100644
--- a/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelCombinerEmitter.cpp
@@ -26,18 +26,19 @@
///
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenIntrinsics.h"
-#include "CodeGenTarget.h"
-#include "GlobalISel/CXXPredicates.h"
-#include "GlobalISel/CodeExpander.h"
-#include "GlobalISel/CodeExpansions.h"
-#include "GlobalISel/CombinerUtils.h"
-#include "GlobalISel/MatchDataInfo.h"
-#include "GlobalISel/Patterns.h"
-#include "GlobalISelMatchTable.h"
-#include "GlobalISelMatchTableExecutorEmitter.h"
-#include "SubtargetFeatureInfo.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/GlobalISel/CXXPredicates.h"
+#include "Common/GlobalISel/CodeExpander.h"
+#include "Common/GlobalISel/CodeExpansions.h"
+#include "Common/GlobalISel/CombinerUtils.h"
+#include "Common/GlobalISel/GlobalISelMatchTable.h"
+#include "Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h"
+#include "Common/GlobalISel/MatchDataInfo.h"
+#include "Common/GlobalISel/PatternParser.h"
+#include "Common/GlobalISel/Patterns.h"
+#include "Common/SubtargetFeatureInfo.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/Hashing.h"
@@ -80,7 +81,6 @@ cl::opt<bool> DebugTypeInfer("gicombiner-debug-typeinfer",
constexpr StringLiteral CXXApplyPrefix = "GICXXCustomAction_CombineApply";
constexpr StringLiteral CXXPredPrefix = "GICXXPred_MI_Predicate_";
-constexpr StringLiteral MIFlagsEnumClassName = "MIFlagEnum";
//===- CodeExpansions Helpers --------------------------------------------===//
@@ -109,17 +109,6 @@ void declareTempRegExpansion(CodeExpansions &CE, unsigned TempRegID,
//===- Misc. Helpers -----------------------------------------------------===//
-/// Copies a StringRef into a static pool to preserve it.
-/// Most Pattern classes use StringRef so we need this.
-StringRef insertStrRef(StringRef S) {
- if (S.empty())
- return {};
-
- static StringSet<> Pool;
- auto [It, Inserted] = Pool.insert(S);
- return It->getKey();
-}
-
template <typename Container> auto keys(Container &&C) {
return map_range(C, [](auto &Entry) -> auto & { return Entry.first; });
}
@@ -639,8 +628,9 @@ public:
SubtargetFeatureInfoMap &SubtargetFeatures,
Record &RuleDef, unsigned ID,
std::vector<RuleMatcher> &OutRMs)
- : CGT(CGT), SubtargetFeatures(SubtargetFeatures), RuleDef(RuleDef),
- RuleID(ID), OutRMs(OutRMs) {}
+ : Parser(CGT, RuleDef.getLoc()), CGT(CGT),
+ SubtargetFeatures(SubtargetFeatures), RuleDef(RuleDef), RuleID(ID),
+ OutRMs(OutRMs) {}
/// Parses all fields in the RuleDef record.
bool parseAll();
@@ -718,26 +708,6 @@ private:
bool buildRuleOperandsTable();
bool parseDefs(const DagInit &Def);
- bool
- parsePatternList(const DagInit &List,
- function_ref<bool(std::unique_ptr<Pattern>)> ParseAction,
- StringRef Operator, ArrayRef<SMLoc> DiagLoc,
- StringRef AnonPatNamePrefix) const;
-
- std::unique_ptr<Pattern> parseInstructionPattern(const Init &Arg,
- StringRef PatName) const;
- std::unique_ptr<Pattern> parseWipMatchOpcodeMatcher(const Init &Arg,
- StringRef PatName) const;
- bool parseInstructionPatternOperand(InstructionPattern &IP,
- const Init *OpInit,
- const StringInit *OpName) const;
- bool parseInstructionPatternMIFlags(InstructionPattern &IP,
- const DagInit *Op) const;
- std::unique_ptr<PatFrag> parsePatFragImpl(const Record *Def) const;
- bool parsePatFragParamList(
- ArrayRef<SMLoc> DiagLoc, const DagInit &OpsList,
- function_ref<bool(StringRef, PatFrag::ParamKind)> ParseAction) const;
- const PatFrag *parsePatFrag(const Record *Def) const;
bool emitMatchPattern(CodeExpansions &CE, const PatternAlternatives &Alts,
const InstructionPattern &IP);
@@ -781,6 +751,7 @@ private:
DenseSet<const Pattern *> &SeenPats, OperandDefLookupFn LookupOperandDef,
OperandMapperFnRef OperandMapper = [](const auto &O) { return O; });
+ PatternParser Parser;
const CodeGenTarget &CGT;
SubtargetFeatureInfoMap &SubtargetFeatures;
Record &RuleDef;
@@ -808,9 +779,6 @@ private:
SmallVector<MatchDataInfo, 2> MatchDatas;
SmallVector<PatternAlternatives, 1> PermutationsToEmit;
-
- // print()/debug-only members.
- mutable SmallPtrSet<const PatFrag *, 2> SeenPatFrags;
};
bool CombineRuleBuilder::parseAll() {
@@ -819,16 +787,16 @@ bool CombineRuleBuilder::parseAll() {
if (!parseDefs(*RuleDef.getValueAsDag("Defs")))
return false;
- if (!parsePatternList(
+ if (!Parser.parsePatternList(
*RuleDef.getValueAsDag("Match"),
[this](auto Pat) { return addMatchPattern(std::move(Pat)); }, "match",
- RuleDef.getLoc(), (RuleDef.getName() + "_match").str()))
+ (RuleDef.getName() + "_match").str()))
return false;
- if (!parsePatternList(
+ if (!Parser.parsePatternList(
*RuleDef.getValueAsDag("Apply"),
[this](auto Pat) { return addApplyPattern(std::move(Pat)); }, "apply",
- RuleDef.getLoc(), (RuleDef.getName() + "_apply").str()))
+ (RuleDef.getName() + "_apply").str()))
return false;
if (!buildRuleOperandsTable() || !typecheckPatterns() || !findRoots() ||
@@ -884,9 +852,10 @@ void CombineRuleBuilder::print(raw_ostream &OS) const {
OS << " )\n";
}
- if (!SeenPatFrags.empty()) {
+ const auto &SeenPFs = Parser.getSeenPatFrags();
+ if (!SeenPFs.empty()) {
OS << " (PatFrags\n";
- for (const auto *PF : SeenPatFrags) {
+ for (const auto *PF : Parser.getSeenPatFrags()) {
PF->print(OS, /*Indent=*/" ");
OS << '\n';
}
@@ -1500,426 +1469,6 @@ bool CombineRuleBuilder::parseDefs(const DagInit &Def) {
return true;
}
-bool CombineRuleBuilder::parsePatternList(
- const DagInit &List,
- function_ref<bool(std::unique_ptr<Pattern>)> ParseAction,
- StringRef Operator, ArrayRef<SMLoc> DiagLoc,
- StringRef AnonPatNamePrefix) const {
- if (List.getOperatorAsDef(RuleDef.getLoc())->getName() != Operator) {
- ::PrintError(DiagLoc, "Expected " + Operator + " operator");
- return false;
- }
-
- if (List.getNumArgs() == 0) {
- ::PrintError(DiagLoc, Operator + " pattern list is empty");
- return false;
- }
-
- // The match section consists of a list of matchers and predicates. Parse each
- // one and add the equivalent GIMatchDag nodes, predicates, and edges.
- for (unsigned I = 0; I < List.getNumArgs(); ++I) {
- Init *Arg = List.getArg(I);
- std::string Name = List.getArgName(I)
- ? List.getArgName(I)->getValue().str()
- : ("__" + AnonPatNamePrefix + "_" + Twine(I)).str();
-
- if (auto Pat = parseInstructionPattern(*Arg, Name)) {
- if (!ParseAction(std::move(Pat)))
- return false;
- continue;
- }
-
- if (auto Pat = parseWipMatchOpcodeMatcher(*Arg, Name)) {
- if (!ParseAction(std::move(Pat)))
- return false;
- continue;
- }
-
- // Parse arbitrary C++ code
- if (const auto *StringI = dyn_cast<StringInit>(Arg)) {
- auto CXXPat = std::make_unique<CXXPattern>(*StringI, insertStrRef(Name));
- if (!ParseAction(std::move(CXXPat)))
- return false;
- continue;
- }
-
- ::PrintError(DiagLoc,
- "Failed to parse pattern: '" + Arg->getAsString() + "'");
- return false;
- }
-
- return true;
-}
-
-static const CodeGenInstruction &
-getInstrForIntrinsic(const CodeGenTarget &CGT, const CodeGenIntrinsic *I) {
- StringRef Opc;
- if (I->isConvergent) {
- Opc = I->hasSideEffects ? "G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS"
- : "G_INTRINSIC_CONVERGENT";
- } else {
- Opc = I->hasSideEffects ? "G_INTRINSIC_W_SIDE_EFFECTS" : "G_INTRINSIC";
- }
-
- RecordKeeper &RK = I->TheDef->getRecords();
- return CGT.getInstruction(RK.getDef(Opc));
-}
-
-static const CodeGenIntrinsic *getCodeGenIntrinsic(Record *R) {
- // Intrinsics need to have a static lifetime because the match table keeps
- // references to CodeGenIntrinsic objects.
- static DenseMap<const Record *, std::unique_ptr<CodeGenIntrinsic>>
- AllIntrinsics;
-
- auto &Ptr = AllIntrinsics[R];
- if (!Ptr)
- Ptr = std::make_unique<CodeGenIntrinsic>(R, std::vector<Record *>());
- return Ptr.get();
-}
-
-std::unique_ptr<Pattern>
-CombineRuleBuilder::parseInstructionPattern(const Init &Arg,
- StringRef Name) const {
- const DagInit *DagPat = dyn_cast<DagInit>(&Arg);
- if (!DagPat)
- return nullptr;
-
- std::unique_ptr<InstructionPattern> Pat;
- if (const DagInit *IP = getDagWithOperatorOfSubClass(Arg, "Instruction")) {
- auto &Instr = CGT.getInstruction(IP->getOperatorAsDef(RuleDef.getLoc()));
- Pat =
- std::make_unique<CodeGenInstructionPattern>(Instr, insertStrRef(Name));
- } else if (const DagInit *IP =
- getDagWithOperatorOfSubClass(Arg, "Intrinsic")) {
- Record *TheDef = IP->getOperatorAsDef(RuleDef.getLoc());
- const CodeGenIntrinsic *Intrin = getCodeGenIntrinsic(TheDef);
- const CodeGenInstruction &Instr = getInstrForIntrinsic(CGT, Intrin);
- Pat =
- std::make_unique<CodeGenInstructionPattern>(Instr, insertStrRef(Name));
- cast<CodeGenInstructionPattern>(*Pat).setIntrinsic(Intrin);
- } else if (const DagInit *PFP =
- getDagWithOperatorOfSubClass(Arg, PatFrag::ClassName)) {
- const Record *Def = PFP->getOperatorAsDef(RuleDef.getLoc());
- const PatFrag *PF = parsePatFrag(Def);
- if (!PF)
- return nullptr; // Already diagnosed by parsePatFrag
- Pat = std::make_unique<PatFragPattern>(*PF, insertStrRef(Name));
- } else if (const DagInit *BP =
- getDagWithOperatorOfSubClass(Arg, BuiltinPattern::ClassName)) {
- Pat = std::make_unique<BuiltinPattern>(
- *BP->getOperatorAsDef(RuleDef.getLoc()), insertStrRef(Name));
- } else
- return nullptr;
-
- for (unsigned K = 0; K < DagPat->getNumArgs(); ++K) {
- Init *Arg = DagPat->getArg(K);
- if (auto *DagArg = getDagWithSpecificOperator(*Arg, "MIFlags")) {
- if (!parseInstructionPatternMIFlags(*Pat, DagArg))
- return nullptr;
- continue;
- }
-
- if (!parseInstructionPatternOperand(*Pat, Arg, DagPat->getArgName(K)))
- return nullptr;
- }
-
- if (!Pat->checkSemantics(RuleDef.getLoc()))
- return nullptr;
-
- return std::move(Pat);
-}
-
-std::unique_ptr<Pattern>
-CombineRuleBuilder::parseWipMatchOpcodeMatcher(const Init &Arg,
- StringRef Name) const {
- const DagInit *Matcher = getDagWithSpecificOperator(Arg, "wip_match_opcode");
- if (!Matcher)
- return nullptr;
-
- if (Matcher->getNumArgs() == 0) {
- PrintError("Empty wip_match_opcode");
- return nullptr;
- }
-
- // Each argument is an opcode that can match.
- auto Result = std::make_unique<AnyOpcodePattern>(insertStrRef(Name));
- for (const auto &Arg : Matcher->getArgs()) {
- Record *OpcodeDef = getDefOfSubClass(*Arg, "Instruction");
- if (OpcodeDef) {
- Result->addOpcode(&CGT.getInstruction(OpcodeDef));
- continue;
- }
-
- PrintError("Arguments to wip_match_opcode must be instructions");
- return nullptr;
- }
-
- return std::move(Result);
-}
-
-bool CombineRuleBuilder::parseInstructionPatternOperand(
- InstructionPattern &IP, const Init *OpInit,
- const StringInit *OpName) const {
- const auto ParseErr = [&]() {
- PrintError("cannot parse operand '" + OpInit->getAsUnquotedString() + "' ");
- if (OpName)
- PrintNote("operand name is '" + OpName->getAsUnquotedString() + "'");
- return false;
- };
-
- // untyped immediate, e.g. 0
- if (const auto *IntImm = dyn_cast<IntInit>(OpInit)) {
- std::string Name = OpName ? OpName->getAsUnquotedString() : "";
- IP.addOperand(IntImm->getValue(), insertStrRef(Name), PatternType());
- return true;
- }
-
- // typed immediate, e.g. (i32 0)
- if (const auto *DagOp = dyn_cast<DagInit>(OpInit)) {
- if (DagOp->getNumArgs() != 1)
- return ParseErr();
-
- const Record *TyDef = DagOp->getOperatorAsDef(RuleDef.getLoc());
- auto ImmTy = PatternType::get(RuleDef.getLoc(), TyDef,
- "cannot parse immediate '" +
- DagOp->getAsUnquotedString() + "'");
- if (!ImmTy)
- return false;
-
- if (!IP.hasAllDefs()) {
- PrintError("out operand of '" + IP.getInstName() +
- "' cannot be an immediate");
- return false;
- }
-
- const auto *Val = dyn_cast<IntInit>(DagOp->getArg(0));
- if (!Val)
- return ParseErr();
-
- std::string Name = OpName ? OpName->getAsUnquotedString() : "";
- IP.addOperand(Val->getValue(), insertStrRef(Name), *ImmTy);
- return true;
- }
-
- // Typed operand e.g. $x/$z in (G_FNEG $x, $z)
- if (auto *DefI = dyn_cast<DefInit>(OpInit)) {
- if (!OpName) {
- PrintError("expected an operand name after '" + OpInit->getAsString() +
- "'");
- return false;
- }
- const Record *Def = DefI->getDef();
- auto Ty =
- PatternType::get(RuleDef.getLoc(), Def, "cannot parse operand type");
- if (!Ty)
- return false;
- IP.addOperand(insertStrRef(OpName->getAsUnquotedString()), *Ty);
- return true;
- }
-
- // Untyped operand e.g. $x/$z in (G_FNEG $x, $z)
- if (isa<UnsetInit>(OpInit)) {
- assert(OpName && "Unset w/ no OpName?");
- IP.addOperand(insertStrRef(OpName->getAsUnquotedString()), PatternType());
- return true;
- }
-
- return ParseErr();
-}
-
-bool CombineRuleBuilder::parseInstructionPatternMIFlags(
- InstructionPattern &IP, const DagInit *Op) const {
- auto *CGIP = dyn_cast<CodeGenInstructionPattern>(&IP);
- if (!CGIP) {
- PrintError("matching/writing MIFlags is only allowed on CodeGenInstruction "
- "patterns");
- return false;
- }
-
- const auto CheckFlagEnum = [&](const Record *R) {
- if (!R->isSubClassOf(MIFlagsEnumClassName)) {
- PrintError("'" + R->getName() + "' is not a subclass of '" +
- MIFlagsEnumClassName + "'");
- return false;
- }
-
- return true;
- };
-
- if (CGIP->getMIFlagsInfo()) {
- PrintError("MIFlags can only be present once on an instruction");
- return false;
- }
-
- auto &FI = CGIP->getOrCreateMIFlagsInfo();
- for (unsigned K = 0; K < Op->getNumArgs(); ++K) {
- const Init *Arg = Op->getArg(K);
-
- // Match/set a flag: (MIFlags FmNoNans)
- if (const auto *Def = dyn_cast<DefInit>(Arg)) {
- const Record *R = Def->getDef();
- if (!CheckFlagEnum(R))
- return false;
-
- FI.addSetFlag(R);
- continue;
- }
-
- // Do not match a flag/unset a flag: (MIFlags (not FmNoNans))
- if (const DagInit *NotDag = getDagWithSpecificOperator(*Arg, "not")) {
- for (const Init *NotArg : NotDag->getArgs()) {
- const DefInit *DefArg = dyn_cast<DefInit>(NotArg);
- if (!DefArg) {
- PrintError("cannot parse '" + NotArg->getAsUnquotedString() +
- "': expected a '" + MIFlagsEnumClassName + "'");
- return false;
- }
-
- const Record *R = DefArg->getDef();
- if (!CheckFlagEnum(R))
- return false;
-
- FI.addUnsetFlag(R);
- continue;
- }
-
- continue;
- }
-
- // Copy flags from a matched instruction: (MIFlags $mi)
- if (isa<UnsetInit>(Arg)) {
- FI.addCopyFlag(insertStrRef(Op->getArgName(K)->getAsUnquotedString()));
- continue;
- }
- }
-
- return true;
-}
-
-std::unique_ptr<PatFrag>
-CombineRuleBuilder::parsePatFragImpl(const Record *Def) const {
- auto StackTrace = PrettyStackTraceParse(*Def);
- if (!Def->isSubClassOf(PatFrag::ClassName))
- return nullptr;
-
- const DagInit *Ins = Def->getValueAsDag("InOperands");
- if (Ins->getOperatorAsDef(Def->getLoc())->getName() != "ins") {
- ::PrintError(Def, "expected 'ins' operator for " + PatFrag::ClassName +
- " in operands list");
- return nullptr;
- }
-
- const DagInit *Outs = Def->getValueAsDag("OutOperands");
- if (Outs->getOperatorAsDef(Def->getLoc())->getName() != "outs") {
- ::PrintError(Def, "expected 'outs' operator for " + PatFrag::ClassName +
- " out operands list");
- return nullptr;
- }
-
- auto Result = std::make_unique<PatFrag>(*Def);
- if (!parsePatFragParamList(Def->getLoc(), *Outs,
- [&](StringRef Name, PatFrag::ParamKind Kind) {
- Result->addOutParam(insertStrRef(Name), Kind);
- return true;
- }))
- return nullptr;
-
- if (!parsePatFragParamList(Def->getLoc(), *Ins,
- [&](StringRef Name, PatFrag::ParamKind Kind) {
- Result->addInParam(insertStrRef(Name), Kind);
- return true;
- }))
- return nullptr;
-
- const ListInit *Alts = Def->getValueAsListInit("Alternatives");
- unsigned AltIdx = 0;
- for (const Init *Alt : *Alts) {
- const auto *PatDag = dyn_cast<DagInit>(Alt);
- if (!PatDag) {
- ::PrintError(Def, "expected dag init for PatFrag pattern alternative");
- return nullptr;
- }
-
- PatFrag::Alternative &A = Result->addAlternative();
- const auto AddPat = [&](std::unique_ptr<Pattern> Pat) {
- A.Pats.push_back(std::move(Pat));
- return true;
- };
-
- if (!parsePatternList(
- *PatDag, AddPat, "pattern", Def->getLoc(),
- /*AnonPatPrefix*/
- (Def->getName() + "_alt" + Twine(AltIdx++) + "_pattern").str()))
- return nullptr;
- }
-
- if (!Result->buildOperandsTables() || !Result->checkSemantics())
- return nullptr;
-
- return Result;
-}
-
-bool CombineRuleBuilder::parsePatFragParamList(
- ArrayRef<SMLoc> DiagLoc, const DagInit &OpsList,
- function_ref<bool(StringRef, PatFrag::ParamKind)> ParseAction) const {
- for (unsigned K = 0; K < OpsList.getNumArgs(); ++K) {
- const StringInit *Name = OpsList.getArgName(K);
- const Init *Ty = OpsList.getArg(K);
-
- if (!Name) {
- ::PrintError(DiagLoc, "all operands must be named'");
- return false;
- }
- const std::string NameStr = Name->getAsUnquotedString();
-
- PatFrag::ParamKind OpKind;
- if (isSpecificDef(*Ty, "gi_imm"))
- OpKind = PatFrag::PK_Imm;
- else if (isSpecificDef(*Ty, "root"))
- OpKind = PatFrag::PK_Root;
- else if (isa<UnsetInit>(Ty) ||
- isSpecificDef(*Ty, "gi_mo")) // no type = gi_mo.
- OpKind = PatFrag::PK_MachineOperand;
- else {
- ::PrintError(
- DiagLoc,
- "'" + NameStr +
- "' operand type was expected to be 'root', 'gi_imm' or 'gi_mo'");
- return false;
- }
-
- if (!ParseAction(NameStr, OpKind))
- return false;
- }
-
- return true;
-}
-
-const PatFrag *CombineRuleBuilder::parsePatFrag(const Record *Def) const {
- // Cache already parsed PatFrags to avoid doing extra work.
- static DenseMap<const Record *, std::unique_ptr<PatFrag>> ParsedPatFrags;
-
- auto It = ParsedPatFrags.find(Def);
- if (It != ParsedPatFrags.end()) {
- SeenPatFrags.insert(It->second.get());
- return It->second.get();
- }
-
- std::unique_ptr<PatFrag> NewPatFrag = parsePatFragImpl(Def);
- if (!NewPatFrag) {
- ::PrintError(Def, "Could not parse " + PatFrag::ClassName + " '" +
- Def->getName() + "'");
- // Put a nullptr in the map so we don't attempt parsing this again.
- ParsedPatFrags[Def] = nullptr;
- return nullptr;
- }
-
- const auto *Res = NewPatFrag.get();
- ParsedPatFrags[Def] = std::move(NewPatFrag);
- SeenPatFrags.insert(Res);
- return Res;
-}
-
bool CombineRuleBuilder::emitMatchPattern(CodeExpansions &CE,
const PatternAlternatives &Alts,
const InstructionPattern &IP) {
@@ -2910,6 +2459,7 @@ void GICombinerEmitter::emitRunCustomAction(raw_ostream &OS) {
OS << " switch(ApplyID) {\n";
for (const auto &Apply : ApplyCode) {
OS << " case " << Apply->getEnumNameWithPrefix(CXXApplyPrefix) << ":{\n"
+ << " Helper.getBuilder().setInstrAndDebugLoc(*State.MIs[0]);\n"
<< " " << join(split(Apply->Code, '\n'), "\n ") << '\n'
<< " return;\n";
OS << " }\n";
@@ -2956,8 +2506,8 @@ GICombinerEmitter::buildMatchTable(MutableArrayRef<RuleMatcher> Rules) {
const Matcher *B) {
auto *L = static_cast<const RuleMatcher *>(A);
auto *R = static_cast<const RuleMatcher *>(B);
- return std::tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
- std::tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
+ return std::make_tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
+ std::make_tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
});
for (Matcher *Rule : InputRules)
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index e86057422cd7..25e302ce1ca4 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -30,15 +30,15 @@
///
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenIntrinsics.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "GlobalISelMatchTable.h"
-#include "GlobalISelMatchTableExecutorEmitter.h"
-#include "InfoByHwMode.h"
-#include "SubtargetFeatureInfo.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/GlobalISel/GlobalISelMatchTable.h"
+#include "Common/GlobalISel/GlobalISelMatchTableExecutorEmitter.h"
+#include "Common/InfoByHwMode.h"
+#include "Common/SubtargetFeatureInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGenTypes/LowLevelType.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
diff --git a/llvm/utils/TableGen/InstrDocsEmitter.cpp b/llvm/utils/TableGen/InstrDocsEmitter.cpp
index efabf6bb7ba6..f948540e18db 100644
--- a/llvm/utils/TableGen/InstrDocsEmitter.cpp
+++ b/llvm/utils/TableGen/InstrDocsEmitter.cpp
@@ -18,9 +18,9 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
#include <string>
diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 2d08447429d9..36f8fa146539 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -11,15 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenDAGPatterns.h"
-#include "CodeGenInstruction.h"
-#include "CodeGenSchedule.h"
-#include "CodeGenTarget.h"
-#include "PredicateExpander.h"
-#include "SequenceToOffsetTable.h"
-#include "SubtargetFeatureInfo.h"
+#include "Basic/SequenceToOffsetTable.h"
+#include "Common/CodeGenDAGPatterns.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenSchedule.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/PredicateExpander.h"
+#include "Common/SubtargetFeatureInfo.h"
+#include "Common/Types.h"
#include "TableGenBackends.h"
-#include "Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/llvm/utils/TableGen/IntrinsicEmitter.cpp b/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 50a34eac7ca3..a7e99fa4c050 100644
--- a/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenIntrinsics.h"
-#include "SequenceToOffsetTable.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Basic/SequenceToOffsetTable.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp b/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
index 91c3b0b4359c..e9e63fa8d0de 100644
--- a/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
+++ b/llvm/utils/TableGen/MacroFusionPredicatorEmitter.cpp
@@ -38,8 +38,8 @@
//
//===---------------------------------------------------------------------===//
-#include "CodeGenTarget.h"
-#include "PredicateExpander.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/PredicateExpander.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
diff --git a/llvm/utils/TableGen/OptParserEmitter.cpp b/llvm/utils/TableGen/OptParserEmitter.cpp
index c25f6c59cab3..6334af53f88f 100644
--- a/llvm/utils/TableGen/OptParserEmitter.cpp
+++ b/llvm/utils/TableGen/OptParserEmitter.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "OptEmitter.h"
+#include "Common/OptEmitter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
diff --git a/llvm/utils/TableGen/OptRSTEmitter.cpp b/llvm/utils/TableGen/OptRSTEmitter.cpp
index 5a7f079dc168..75b7cbdf2988 100644
--- a/llvm/utils/TableGen/OptRSTEmitter.cpp
+++ b/llvm/utils/TableGen/OptRSTEmitter.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "OptEmitter.h"
+#include "Common/OptEmitter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/TableGen/Record.h"
diff --git a/llvm/utils/TableGen/PseudoLoweringEmitter.cpp b/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
index 7f692f29192d..01cfd4a1d982 100644
--- a/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
diff --git a/llvm/utils/TableGen/RegisterBankEmitter.cpp b/llvm/utils/TableGen/RegisterBankEmitter.cpp
index 8b59411c5bc3..5546e727af38 100644
--- a/llvm/utils/TableGen/RegisterBankEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterBankEmitter.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "InfoByHwMode.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/InfoByHwMode.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/TableGen/Error.h"
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index d074e31c6245..ee8830edeedb 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -12,12 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenHwModes.h"
-#include "CodeGenRegisters.h"
-#include "CodeGenTarget.h"
-#include "InfoByHwMode.h"
-#include "SequenceToOffsetTable.h"
-#include "Types.h"
+#include "Basic/SequenceToOffsetTable.h"
+#include "Common/CodeGenHwModes.h"
+#include "Common/CodeGenRegisters.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/InfoByHwMode.h"
+#include "Common/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
@@ -955,16 +955,6 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
SubRegIdxSeqs.emit(OS, printSubRegIndex);
OS << "};\n\n";
- // Emit the table of sub-register index sizes.
- OS << "extern const MCRegisterInfo::SubRegCoveredBits " << TargetName
- << "SubRegIdxRanges[] = {\n";
- OS << " { " << (uint16_t)-1 << ", " << (uint16_t)-1 << " },\n";
- for (const auto &Idx : SubRegIndices) {
- OS << " { " << Idx.Offset << ", " << Idx.Size << " },\t// "
- << Idx.getName() << "\n";
- }
- OS << "};\n\n";
-
// Emit the string table.
RegStrings.layout();
RegStrings.emitStringLiteralDef(OS, Twine("extern const char ") + TargetName +
@@ -1101,8 +1091,7 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
<< TargetName << "LaneMaskLists, " << TargetName << "RegStrings, "
<< TargetName << "RegClassStrings, " << TargetName << "SubRegIdxLists, "
<< (std::distance(SubRegIndices.begin(), SubRegIndices.end()) + 1) << ",\n"
- << TargetName << "SubRegIdxRanges, " << TargetName
- << "RegEncodingTable);\n\n";
+ << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, false);
@@ -1253,6 +1242,19 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
}
OS << "\" };\n\n";
+ // Emit the table of sub-register index sizes.
+ OS << "static const TargetRegisterInfo::SubRegCoveredBits "
+ "SubRegIdxRangeTable[] = {\n";
+ for (unsigned M = 0; M < NumModes; ++M) {
+ OS << " { " << (uint16_t)-1 << ", " << (uint16_t)-1 << " },\n";
+ for (const auto &Idx : SubRegIndices) {
+ const SubRegRange &Range = Idx.Range.get(M);
+ OS << " { " << Range.Offset << ", " << Range.Size << " },\t// "
+ << Idx.getName() << "\n";
+ }
+ }
+ OS << "};\n\n";
+
// Emit SubRegIndex lane masks, including 0.
OS << "\nstatic const LaneBitmask SubRegIndexLaneMaskTable[] = {\n "
"LaneBitmask::getAll(),\n";
@@ -1634,8 +1636,6 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "extern const char " << TargetName << "RegClassStrings[];\n";
OS << "extern const MCPhysReg " << TargetName << "RegUnitRoots[][2];\n";
OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[];\n";
- OS << "extern const MCRegisterInfo::SubRegCoveredBits " << TargetName
- << "SubRegIdxRanges[];\n";
OS << "extern const uint16_t " << TargetName << "RegEncodingTable[];\n";
EmitRegMappingTables(OS, Regs, true);
@@ -1646,7 +1646,8 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
" unsigned PC, unsigned HwMode)\n"
<< " : TargetRegisterInfo(&" << TargetName << "RegInfoDesc"
<< ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() << ",\n"
- << " SubRegIndexNameTable, SubRegIndexLaneMaskTable,\n"
+ << " SubRegIndexNameTable, SubRegIdxRangeTable, "
+ "SubRegIndexLaneMaskTable,\n"
<< " ";
printMask(OS, RegBank.CoveringLanes);
OS << ", RegClassInfos, VTLists, HwMode) {\n"
@@ -1661,7 +1662,6 @@ void RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
<< " " << TargetName << "RegClassStrings,\n"
<< " " << TargetName << "SubRegIdxLists,\n"
<< " " << SubRegIndicesSize + 1 << ",\n"
- << " " << TargetName << "SubRegIdxRanges,\n"
<< " " << TargetName << "RegEncodingTable);\n\n";
EmitRegMapping(OS, Regs, true);
@@ -1867,7 +1867,13 @@ void RegisterInfoEmitter::debugDump(raw_ostream &OS) {
OS << "SubRegIndex " << SRI.getName() << ":\n";
OS << "\tLaneMask: " << PrintLaneMask(SRI.LaneMask) << '\n';
OS << "\tAllSuperRegsCovered: " << SRI.AllSuperRegsCovered << '\n';
- OS << "\tOffset, Size: " << SRI.Offset << ", " << SRI.Size << '\n';
+ OS << "\tOffset: {";
+ for (unsigned M = 0; M != NumModes; ++M)
+ OS << ' ' << getModeName(M) << ':' << SRI.Range.get(M).Offset;
+ OS << " }\n\tSize: {";
+ for (unsigned M = 0; M != NumModes; ++M)
+ OS << ' ' << getModeName(M) << ':' << SRI.Range.get(M).Size;
+ OS << " }\n";
}
for (const CodeGenRegister &R : RegBank.getRegisters()) {
diff --git a/llvm/utils/TableGen/SearchableTableEmitter.cpp b/llvm/utils/TableGen/SearchableTableEmitter.cpp
index 51f18f360ed3..48ee23db957d 100644
--- a/llvm/utils/TableGen/SearchableTableEmitter.cpp
+++ b/llvm/utils/TableGen/SearchableTableEmitter.cpp
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenIntrinsics.h"
-#include "CodeGenTarget.h"
+#include "Basic/CodeGenIntrinsics.h"
+#include "Common/CodeGenTarget.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
diff --git a/llvm/utils/TableGen/SubtargetEmitter.cpp b/llvm/utils/TableGen/SubtargetEmitter.cpp
index d350d7de139f..2e2c57b802ee 100644
--- a/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenHwModes.h"
-#include "CodeGenSchedule.h"
-#include "CodeGenTarget.h"
-#include "PredicateExpander.h"
+#include "Common/CodeGenHwModes.h"
+#include "Common/CodeGenSchedule.h"
+#include "Common/CodeGenTarget.h"
+#include "Common/PredicateExpander.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
index 928129f24fcb..e9436ab16e44 100644
--- a/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
+++ b/llvm/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
@@ -14,7 +14,7 @@
//===----------------------------------------------------------------------===//
#include "WebAssemblyDisassemblerEmitter.h"
-#include "CodeGenInstruction.h"
+#include "Common/CodeGenInstruction.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TableGen/Record.h"
diff --git a/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
index 0a9abbfe186e..c721502a395f 100644
--- a/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "X86RecognizableInstr.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 1319042e48d0..5871e678b16e 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "X86RecognizableInstr.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FormattedStream.h"
diff --git a/llvm/utils/TableGen/X86MnemonicTables.cpp b/llvm/utils/TableGen/X86MnemonicTables.cpp
index aeafee157462..d9ceed40f7c7 100644
--- a/llvm/utils/TableGen/X86MnemonicTables.cpp
+++ b/llvm/utils/TableGen/X86MnemonicTables.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
+#include "Common/CodeGenInstruction.h"
+#include "Common/CodeGenTarget.h"
#include "X86RecognizableInstr.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.h b/llvm/utils/TableGen/X86RecognizableInstr.h
index 68af68fb5aa0..12fb41750cb3 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -16,7 +16,7 @@
#ifndef LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
#define LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
-#include "CodeGenInstruction.h"
+#include "Common/CodeGenInstruction.h"
#include "llvm/Support/X86DisassemblerDecoderCommon.h"
#include <cstdint>
#include <string>
diff --git a/llvm/utils/bisect-skip-count b/llvm/utils/bisect-skip-count
index efdd2c937e15..1a64eba381dc 100755
--- a/llvm/utils/bisect-skip-count
+++ b/llvm/utils/bisect-skip-count
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# This script is used to bisect skip and count arguments for --debug-counter.
# It is similar to bisect, except it understands how to increase skip and decrease count
#
diff --git a/llvm/utils/git/code-format-helper.py b/llvm/utils/git/code-format-helper.py
index 1113bf02570b..f1207026704e 100755
--- a/llvm/utils/git/code-format-helper.py
+++ b/llvm/utils/git/code-format-helper.py
@@ -44,6 +44,7 @@ class FormatArgs:
token: str = None
verbose: bool = True
issue_number: int = 0
+ write_comment_to_file: bool = False
def __init__(self, args: argparse.Namespace = None) -> None:
if not args is None:
@@ -53,12 +54,14 @@ class FormatArgs:
self.token = args.token
self.changed_files = args.changed_files
self.issue_number = args.issue_number
+ self.write_comment_to_file = args.write_comment_to_file
class FormatHelper:
COMMENT_TAG = "<!--LLVM CODE FORMAT COMMENT: {fmt}-->"
name: str
friendly_name: str
+ comment: dict = None
@property
def comment_tag(self) -> str:
@@ -119,6 +122,14 @@ View the diff from {self.name} here.
comment_text = self.comment_tag + "\n\n" + comment_text
existing_comment = self.find_comment(pr)
+
+ if args.write_comment_to_file:
+ if create_new or existing_comment:
+ self.comment = {"body": comment_text}
+ if existing_comment:
+ self.comment["id"] = existing_comment.id
+ return
+
if existing_comment:
existing_comment.edit(comment_text)
elif create_new:
@@ -310,6 +321,8 @@ def hook_main():
if fmt.has_tool():
if not fmt.run(args.changed_files, args):
failed_fmts.append(fmt.name)
+ if fmt.comment:
+ comments.append(fmt.comment)
else:
print(f"Couldn't find {fmt.name}, can't check " + fmt.friendly_name.lower())
@@ -350,6 +363,11 @@ if __name__ == "__main__":
type=str,
help="Comma separated list of files that has been changed",
)
+ parser.add_argument(
+ "--write-comment-to-file",
+ action="store_true",
+ help="Don't post comments on the PR, instead write the comments and metadata a file called 'comment'",
+ )
args = FormatArgs(parser.parse_args())
@@ -358,9 +376,18 @@ if __name__ == "__main__":
changed_files = args.changed_files.split(",")
failed_formatters = []
+ comments = []
for fmt in ALL_FORMATTERS:
if not fmt.run(changed_files, args):
failed_formatters.append(fmt.name)
+ if fmt.comment:
+ comments.append(fmt.comment)
+
+ if len(comments):
+ with open("comments", "w") as f:
+ import json
+
+ json.dump(comments, f)
if len(failed_formatters) > 0:
print(f"error: some formatters failed: {' '.join(failed_formatters)}")
diff --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py
index b21f14eca445..1b5141e42594 100755
--- a/llvm/utils/git/github-automation.py
+++ b/llvm/utils/git/github-automation.py
@@ -24,18 +24,19 @@ Hi!
This issue may be a good introductory issue for people new to working on LLVM. If you would like to work on this issue, your first steps are:
-1. In the comments of the issue, request for it to be assigned to you.
-2. Fix the issue locally.
-3. [Run the test suite](https://llvm.org/docs/TestingGuide.html#unit-and-regression-tests) locally. Remember that the subdirectories under `test/` create fine-grained testing targets, so you can e.g. use `make check-clang-ast` to only run Clang's AST tests.
-4. Create a Git commit.
-5. Run [`git clang-format HEAD~1`](https://clang.llvm.org/docs/ClangFormat.html#git-integration) to format your changes.
-6. Open a [pull request](https://github.com/llvm/llvm-project/pulls) to the [upstream repository](https://github.com/llvm/llvm-project) on GitHub. Detailed instructions can be found [in GitHub's documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request).
+1. Check that no other contributor has already been assigned to this issue. If you believe that no one is actually working on it despite an assignment, ping the person. After one week without a response, the assignee may be changed.
+1. In the comments of this issue, request for it to be assigned to you, or just create a [pull request](https://github.com/llvm/llvm-project/pulls) after following the steps below. [Mention](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) this issue in the description of the pull request.
+1. Fix the issue locally.
+1. [Run the test suite](https://llvm.org/docs/TestingGuide.html#unit-and-regression-tests) locally. Remember that the subdirectories under `test/` create fine-grained testing targets, so you can e.g. use `make check-clang-ast` to only run Clang's AST tests.
+1. Create a Git commit.
+1. Run [`git clang-format HEAD~1`](https://clang.llvm.org/docs/ClangFormat.html#git-integration) to format your changes.
+1. Open a [pull request](https://github.com/llvm/llvm-project/pulls) to the [upstream repository](https://github.com/llvm/llvm-project) on GitHub. Detailed instructions can be found [in GitHub's documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request). [Mention](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) this issue in the description of the pull request.
If you have any further questions about this issue, don't hesitate to ask via a comment in the thread below.
"""
-def _get_curent_team(team_name, teams) -> Optional[github.Team.Team]:
+def _get_current_team(team_name, teams) -> Optional[github.Team.Team]:
for team in teams:
if team_name == team.name.lower():
return team
@@ -69,7 +70,7 @@ class IssueSubscriber:
self._team_name = "issue-subscribers-{}".format(label_name).lower()
def run(self) -> bool:
- team = _get_curent_team(self.team_name, self.org.get_teams())
+ team = _get_current_team(self.team_name, self.org.get_teams())
if not team:
print(f"couldn't find team named {self.team_name}")
return False
@@ -124,7 +125,7 @@ class PRSubscriber:
def run(self) -> bool:
patch = None
- team = _get_curent_team(self.team_name, self.org.get_teams())
+ team = _get_current_team(self.team_name, self.org.get_teams())
if not team:
print(f"couldn't find team named {self.team_name}")
return False
@@ -200,7 +201,7 @@ Author: {self.pr.user.name} ({self.pr.user.login})
)
return True
- def _get_curent_team(self) -> Optional[github.Team.Team]:
+ def _get_current_team(self) -> Optional[github.Team.Team]:
for team in self.org.get_teams():
if self.team_name == team.name.lower():
return team
@@ -280,7 +281,7 @@ class PRBuildbotInformation:
@{self.author} Congratulations on having your first Pull Request (PR) merged into the LLVM Project!
Your changes will be combined with recent changes from other authors, then tested
-by our [build bots](https://lab.llvm.org/buildbot/). If there is a problem with a build, you may recieve a report in an email or a comment on this PR.
+by our [build bots](https://lab.llvm.org/buildbot/). If there is a problem with a build, you may receive a report in an email or a comment on this PR.
Please check whether problems have been caused by your change specifically, as
the builds can include changes from many authors. It is not uncommon for your
@@ -638,7 +639,7 @@ class ReleaseWorkflow:
parser = argparse.ArgumentParser()
parser.add_argument(
- "--token", type=str, required=True, help="GitHub authentiation token"
+ "--token", type=str, required=True, help="GitHub authentication token"
)
parser.add_argument(
"--repo",
@@ -668,7 +669,7 @@ release_workflow_parser.add_argument(
"--llvm-project-dir",
type=str,
default=".",
- help="directory containing the llvm-project checout",
+ help="directory containing the llvm-project checkout",
)
release_workflow_parser.add_argument(
"--issue-number", type=int, required=True, help="The issue number to update"
diff --git a/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn b/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
index c2999a67f58a..103954e5756d 100644
--- a/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/unittests/Interpreter/BUILD.gn
@@ -8,6 +8,7 @@ unittest("ClangReplInterpreterTests") {
"//clang/lib/Frontend",
"//clang/lib/Interpreter",
"//llvm/lib/IR",
+ "//llvm/lib/Target:TargetsToBuild",
"//llvm/lib/TargetParser",
"//llvm/lib/Testing/Support",
]
diff --git a/llvm/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn b/llvm/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn
index 01c2b6ced336..adaf6c476dfe 100644
--- a/llvm/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/unittests/StaticAnalyzer/BUILD.gn
@@ -19,6 +19,8 @@ unittest("StaticAnalysisTests") {
"CallEventTest.cpp",
"ConflictingEvalCallsTest.cpp",
"FalsePositiveRefutationBRVisitorTest.cpp",
+ "IsCLibraryFunctionTest.cpp",
+ "MemRegionDescriptiveNameTest.cpp",
"NoStateChangeFuncVisitorTest.cpp",
"ParamRegionTest.cpp",
"RangeSetTest.cpp",
diff --git a/llvm/utils/gn/secondary/compiler-rt/test/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/test/BUILD.gn
index efb324713cfe..d533e79e6374 100644
--- a/llvm/utils/gn/secondary/compiler-rt/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/compiler-rt/test/BUILD.gn
@@ -65,6 +65,7 @@ write_cmake_config("lit_common_configured") {
"SANITIZER_USE_STATIC_CXX_ABI_PYBOOL=False",
"SANITIZER_USE_STATIC_LLVM_UNWINDER_PYBOOL=False",
"COMPILER_RT_HAS_AARCH64_SME_PYBOOL=False",
+ "COMPILER_RT_DARWIN_LINKER_VERSION=",
"COMPILER_RT_HAS_LLD_PYBOOL=True",
"COMPILER_RT_HAS_GWP_ASAN_PYBOOL=False",
"HAVE_RPC_XDR_H=0",
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 3c8b80526773..8a2ab18bf953 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -287,6 +287,7 @@ if (current_toolchain == default_toolchain) {
"__algorithm/shift_right.h",
"__algorithm/shuffle.h",
"__algorithm/sift_down.h",
+ "__algorithm/simd_utils.h",
"__algorithm/sort.h",
"__algorithm/sort_heap.h",
"__algorithm/stable_partition.h",
@@ -499,22 +500,27 @@ if (current_toolchain == default_toolchain) {
"__fwd/array.h",
"__fwd/bit_reference.h",
"__fwd/complex.h",
+ "__fwd/deque.h",
"__fwd/format.h",
"__fwd/fstream.h",
"__fwd/functional.h",
"__fwd/ios.h",
"__fwd/istream.h",
"__fwd/mdspan.h",
+ "__fwd/memory.h",
"__fwd/memory_resource.h",
"__fwd/ostream.h",
"__fwd/pair.h",
+ "__fwd/queue.h",
"__fwd/span.h",
"__fwd/sstream.h",
+ "__fwd/stack.h",
"__fwd/streambuf.h",
"__fwd/string.h",
"__fwd/string_view.h",
"__fwd/subrange.h",
"__fwd/tuple.h",
+ "__fwd/vector.h",
"__hash_table",
"__ios/fpos.h",
"__iterator/access.h",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/MCTargetDesc/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/MCTargetDesc/BUILD.gn
index 12d875cf40c9..5ba91fcec83a 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/MCTargetDesc/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/MCTargetDesc/BUILD.gn
@@ -104,6 +104,7 @@ static_library("MCTargetDesc") {
"AMDGPUMCAsmInfo.cpp",
"AMDGPUMCCodeEmitter.cpp",
"AMDGPUMCExpr.cpp",
+ "AMDGPUMCKernelDescriptor.cpp",
"AMDGPUMCTargetDesc.cpp",
"AMDGPUTargetStreamer.cpp",
"R600InstPrinter.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn
index 2003e86e90b9..0d134c7bdffb 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/IPO/BUILD.gn
@@ -57,6 +57,7 @@ static_library("IPO") {
"SCCP.cpp",
"SampleContextTracker.cpp",
"SampleProfile.cpp",
+ "SampleProfileMatcher.cpp",
"SampleProfileProbe.cpp",
"StripDeadPrototypes.cpp",
"StripSymbols.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/TableGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/TableGen/BUILD.gn
index ce04c967c248..a08001d0b319 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/TableGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/TableGen/BUILD.gn
@@ -19,7 +19,7 @@ unittest("TableGenTests") {
":AutomataTables",
"//llvm/lib/Support",
"//llvm/lib/TableGen",
- "//llvm/utils/TableGen/GlobalISel",
+ "//llvm/utils/TableGen/Common",
]
include_dirs = [ "//llvm/utils/TableGen" ]
sources = [
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
index 58335000950d..53a9d8d01519 100644
--- a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
@@ -1,51 +1,45 @@
source_set("llvm-min-tblgen-sources") {
sources = [
"Attributes.cpp",
- "CodeGenIntrinsics.cpp",
"DirectiveEmitter.cpp",
"IntrinsicEmitter.cpp",
"RISCVTargetDefEmitter.cpp",
- "SDNodeProperties.cpp",
"TableGen.cpp",
"VTEmitter.cpp",
]
- deps = [ "//llvm/lib/Support" ]
+ deps = [
+ "Basic",
+ "//llvm/lib/Support",
+ ]
}
executable("llvm-min-tblgen") {
deps = [
":llvm-min-tblgen-sources",
- "//llvm/lib/TableGen",
+ "Basic",
]
}
executable("llvm-tblgen") {
deps = [
":llvm-min-tblgen-sources",
+ "Basic",
+ "Common",
"//llvm/include/llvm/Config:llvm-config",
"//llvm/lib/CodeGenTypes",
"//llvm/lib/Support",
"//llvm/lib/TableGen",
- "//llvm/utils/TableGen/GlobalISel",
]
+ include_dirs = [ "." ]
sources = [
"AsmMatcherEmitter.cpp",
"AsmWriterEmitter.cpp",
- "AsmWriterInst.cpp",
"CTagsEmitter.cpp",
"CallingConvEmitter.cpp",
"CodeEmitterGen.cpp",
- "CodeGenDAGPatterns.cpp",
- "CodeGenHwModes.cpp",
- "CodeGenInstAlias.cpp",
- "CodeGenInstruction.cpp",
"CodeGenMapTable.cpp",
- "CodeGenRegisters.cpp",
- "CodeGenSchedule.cpp",
- "CodeGenTarget.cpp",
"CompressInstEmitter.cpp",
"DAGISelEmitter.cpp",
- "DAGISelMatcher.cpp",
"DAGISelMatcherEmitter.cpp",
"DAGISelMatcherGen.cpp",
"DAGISelMatcherOpt.cpp",
@@ -58,24 +52,16 @@ executable("llvm-tblgen") {
"FastISelEmitter.cpp",
"GlobalISelCombinerEmitter.cpp",
"GlobalISelEmitter.cpp",
- "GlobalISelMatchTable.cpp",
- "GlobalISelMatchTableExecutorEmitter.cpp",
- "InfoByHwMode.cpp",
"InstrDocsEmitter.cpp",
"InstrInfoEmitter.cpp",
"MacroFusionPredicatorEmitter.cpp",
- "OptEmitter.cpp",
"OptParserEmitter.cpp",
"OptRSTEmitter.cpp",
- "PredicateExpander.cpp",
"PseudoLoweringEmitter.cpp",
"RegisterBankEmitter.cpp",
"RegisterInfoEmitter.cpp",
"SearchableTableEmitter.cpp",
"SubtargetEmitter.cpp",
- "SubtargetFeatureInfo.cpp",
- "Types.cpp",
- "VarLenCodeEmitterGen.cpp",
"WebAssemblyDisassemblerEmitter.cpp",
"X86CompressEVEXTablesEmitter.cpp",
"X86DisassemblerTables.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/Basic/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/Basic/BUILD.gn
new file mode 100644
index 000000000000..2ebe393fa0fd
--- /dev/null
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/Basic/BUILD.gn
@@ -0,0 +1,10 @@
+static_library("Basic") {
+ deps = [
+ "//llvm/lib/Support",
+ "//llvm/lib/TableGen",
+ ]
+ sources = [
+ "CodeGenIntrinsics.cpp",
+ "SDNodeProperties.cpp",
+ ]
+}
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/Common/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/Common/BUILD.gn
new file mode 100644
index 000000000000..daa3278d56d7
--- /dev/null
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/Common/BUILD.gn
@@ -0,0 +1,34 @@
+static_library("Common") {
+ deps = [
+ "//llvm/include/llvm/CodeGen:GenVT",
+ "//llvm/lib/CodeGenTypes",
+ "//llvm/lib/Support",
+ "//llvm/lib/TableGen",
+ ]
+ include_dirs = [ ".." ]
+ sources = [
+ "AsmWriterInst.cpp",
+ "CodeGenDAGPatterns.cpp",
+ "CodeGenHwModes.cpp",
+ "CodeGenInstAlias.cpp",
+ "CodeGenInstruction.cpp",
+ "CodeGenRegisters.cpp",
+ "CodeGenSchedule.cpp",
+ "CodeGenTarget.cpp",
+ "DAGISelMatcher.cpp",
+ "GlobalISel/CXXPredicates.cpp",
+ "GlobalISel/CodeExpander.cpp",
+ "GlobalISel/CombinerUtils.cpp",
+ "GlobalISel/GlobalISelMatchTable.cpp",
+ "GlobalISel/GlobalISelMatchTableExecutorEmitter.cpp",
+ "GlobalISel/MatchDataInfo.cpp",
+ "GlobalISel/PatternParser.cpp",
+ "GlobalISel/Patterns.cpp",
+ "InfoByHwMode.cpp",
+ "OptEmitter.cpp",
+ "PredicateExpander.cpp",
+ "SubtargetFeatureInfo.cpp",
+ "Types.cpp",
+ "VarLenCodeEmitterGen.cpp",
+ ]
+}
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/GlobalISel/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/GlobalISel/BUILD.gn
deleted file mode 100644
index 8b0ca8561517..000000000000
--- a/llvm/utils/gn/secondary/llvm/utils/TableGen/GlobalISel/BUILD.gn
+++ /dev/null
@@ -1,13 +0,0 @@
-static_library("GlobalISel") {
- deps = [
- "//llvm/include/llvm/CodeGen:GenVT",
- "//llvm/lib/CodeGenTypes",
- "//llvm/lib/Support",
- ]
- sources = [
- "CXXPredicates.cpp",
- "CodeExpander.cpp",
- "MatchDataInfo.cpp",
- "Patterns.cpp",
- ]
-}
diff --git a/mlir/docs/DataLayout.md b/mlir/docs/DataLayout.md
index b9dde30519d6..21430c44ca95 100644
--- a/mlir/docs/DataLayout.md
+++ b/mlir/docs/DataLayout.md
@@ -77,6 +77,7 @@ public:
llvm::TypeSize getTypeSizeInBits(Type type) const;
uint64_t getTypeABIAlignment(Type type) const;
uint64_t getTypePreferredAlignment(Type type) const;
+ std::optional<uint64_t> getTypeIndexBitwidth(Type type) const;
};
```
@@ -267,7 +268,8 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
>} {}
```
-specifies that `index` has 32 bits. All other layout properties of `index` match
+specifies that `index` has 32 bits and index computations should be performed
+using 32-bit precision as well. All other layout properties of `index` match
those of the integer type with the same bitwidth defined above.
In absence of the corresponding entry, `index` is assumed to be a 64-bit
@@ -287,7 +289,7 @@ The default data layout assumes 8-bit bytes.
### DLTI Dialect
-The [DLTI](Dialects/DLTI.md) dialect provides the attributes implementing
+The [DLTI](../Dialects/DLTIDialect/) dialect provides the attributes implementing
`DataLayoutSpecInterface` and `DataLayoutEntryInterface`, as well as a dialect
attribute that can be used to attach the specification to a given operation. The
verifier of this attribute triggers those of the specification and checks the
diff --git a/mlir/examples/transform/Ch4/include/MyExtension.td b/mlir/examples/transform/Ch4/include/MyExtension.td
index 6c83ff0f46c8..660680334178 100644
--- a/mlir/examples/transform/Ch4/include/MyExtension.td
+++ b/mlir/examples/transform/Ch4/include/MyExtension.td
@@ -14,7 +14,7 @@
#ifndef MY_EXTENSION
#define MY_EXTENSION
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
include "mlir/IR/OpBase.td"
diff --git a/mlir/include/mlir/Analysis/Presburger/Matrix.h b/mlir/include/mlir/Analysis/Presburger/Matrix.h
index 4484ebc747e6..c20a7bcecd52 100644
--- a/mlir/include/mlir/Analysis/Presburger/Matrix.h
+++ b/mlir/include/mlir/Analysis/Presburger/Matrix.h
@@ -244,6 +244,9 @@ protected:
SmallVector<T, 16> data;
};
+extern template class Matrix<MPInt>;
+extern template class Matrix<Fraction>;
+
// An inherited class for integer matrices, with no new data attributes.
// This is only used for the matrix-related methods which apply only
// to integers (hermite normal form computation and row normalisation).
diff --git a/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h b/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h
new file mode 100644
index 000000000000..734ffdba520c
--- /dev/null
+++ b/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h
@@ -0,0 +1,21 @@
+//===- MemRefToEmitC.h - Convert MemRef to EmitC --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITC_H
+#define MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITC_H
+
+namespace mlir {
+class RewritePatternSet;
+class TypeConverter;
+
+void populateMemRefToEmitCTypeConversion(TypeConverter &typeConverter);
+
+void populateMemRefToEmitCConversionPatterns(RewritePatternSet &patterns,
+ TypeConverter &converter);
+} // namespace mlir
+
+#endif // MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITC_H
diff --git a/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h b/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h
new file mode 100644
index 000000000000..4a63014c19ad
--- /dev/null
+++ b/mlir/include/mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h
@@ -0,0 +1,20 @@
+//===- MemRefToEmitCPass.h - A Pass to convert MemRef to EmitC ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITCPASS_H
+#define MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITCPASS_H
+
+#include <memory>
+
+namespace mlir {
+class Pass;
+
+#define GEN_PASS_DECL_CONVERTMEMREFTOEMITC
+#include "mlir/Conversion/Passes.h.inc"
+} // namespace mlir
+
+#endif // MLIR_CONVERSION_MEMREFTOEMITC_MEMREFTOEMITCPASS_H
diff --git a/mlir/include/mlir/Conversion/Passes.h b/mlir/include/mlir/Conversion/Passes.h
index f2aa4fb53540..2179ae18ac07 100644
--- a/mlir/include/mlir/Conversion/Passes.h
+++ b/mlir/include/mlir/Conversion/Passes.h
@@ -45,6 +45,7 @@
#include "mlir/Conversion/MathToLLVM/MathToLLVM.h"
#include "mlir/Conversion/MathToLibm/MathToLibm.h"
#include "mlir/Conversion/MathToSPIRV/MathToSPIRVPass.h"
+#include "mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h"
#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
#include "mlir/Conversion/MemRefToSPIRV/MemRefToSPIRVPass.h"
#include "mlir/Conversion/NVGPUToNVVM/NVGPUToNVVM.h"
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index bd81cc6d5323..d094ee3b36ab 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -172,10 +172,6 @@ def ConvertArithToSPIRV : Pass<"convert-arith-to-spirv"> {
"bool", /*default=*/"true",
"Emulate narrower scalar types with 32-bit ones if not supported by "
"the target">,
- Option<"enableFastMath", "enable-fast-math",
- "bool", /*default=*/"false",
- "Enable fast math mode (assuming no NaN and infinity for floating "
- "point values) when performing conversion">
];
}
@@ -754,6 +750,15 @@ def ConvertMathToFuncs : Pass<"convert-math-to-funcs", "ModuleOp"> {
}
//===----------------------------------------------------------------------===//
+// MemRefToEmitC
+//===----------------------------------------------------------------------===//
+
+def ConvertMemRefToEmitC : Pass<"convert-memref-to-emitc"> {
+ let summary = "Convert MemRef dialect to EmitC dialect";
+ let dependentDialects = ["emitc::EmitCDialect"];
+}
+
+//===----------------------------------------------------------------------===//
// MemRefToLLVM
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Arith/IR/Arith.h b/mlir/include/mlir/Dialect/Arith/IR/Arith.h
index 971c78f4a86a..00cdb13feb29 100644
--- a/mlir/include/mlir/Dialect/Arith/IR/Arith.h
+++ b/mlir/include/mlir/Dialect/Arith/IR/Arith.h
@@ -53,6 +53,7 @@ namespace arith {
class ConstantIntOp : public arith::ConstantOp {
public:
using arith::ConstantOp::ConstantOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<ConstantOp>(); }
/// Build a constant int op that produces an integer of the specified width.
static void build(OpBuilder &builder, OperationState &result, int64_t value,
@@ -74,6 +75,7 @@ public:
class ConstantFloatOp : public arith::ConstantOp {
public:
using arith::ConstantOp::ConstantOp;
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<ConstantOp>(); }
/// Build a constant float op that produces a float of the specified type.
static void build(OpBuilder &builder, OperationState &result,
@@ -90,7 +92,7 @@ public:
class ConstantIndexOp : public arith::ConstantOp {
public:
using arith::ConstantOp::ConstantOp;
-
+ static ::mlir::TypeID resolveTypeID() { return TypeID::get<ConstantOp>(); }
/// Build a constant int op that produces an index.
static void build(OpBuilder &builder, OperationState &result, int64_t value);
diff --git a/mlir/include/mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h b/mlir/include/mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h
new file mode 100644
index 000000000000..a2b3a9bb655b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h
@@ -0,0 +1,20 @@
+//===- BufferViewFlowOpInterfaceImpl.h - Buffer View Analysis ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_ARITH_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
+#define MLIR_DIALECT_ARITH_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
+
+namespace mlir {
+class DialectRegistry;
+
+namespace arith {
+void registerBufferViewFlowOpInterfaceExternalModels(DialectRegistry &registry);
+} // namespace arith
+} // namespace mlir
+
+#endif // MLIR_DIALECT_ARITH_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h
new file mode 100644
index 000000000000..84e67fe72b62
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h
@@ -0,0 +1,27 @@
+//===- BufferViewFlowOpInterface.h - Buffer View Flow Analysis --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_BUFFERIZATION_IR_BUFFERVIEWFLOWOPINTERFACE_H_
+#define MLIR_DIALECT_BUFFERIZATION_IR_BUFFERVIEWFLOWOPINTERFACE_H_
+
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Support/LLVM.h"
+
+namespace mlir {
+class ValueRange;
+
+namespace bufferization {
+
+using RegisterDependenciesFn = std::function<void(ValueRange, ValueRange)>;
+
+} // namespace bufferization
+} // namespace mlir
+
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h.inc"
+
+#endif // MLIR_DIALECT_BUFFERIZATION_IR_BUFFERVIEWFLOWOPINTERFACE_H_
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td
new file mode 100644
index 000000000000..58885d742266
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td
@@ -0,0 +1,73 @@
+//===-- BufferViewFlowOpInterface.td - Buffer View Flow ----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef BUFFER_VIEW_FLOW_OP_INTERFACE
+#define BUFFER_VIEW_FLOW_OP_INTERFACE
+
+include "mlir/IR/OpBase.td"
+
+def BufferViewFlowOpInterface :
+ OpInterface<"BufferViewFlowOpInterface"> {
+ let description = [{
+ An op interface for the buffer view flow analysis. This interface describes
+ buffer dependencies between operands and op results/region entry block
+ arguments.
+ }];
+ let cppNamespace = "::mlir::bufferization";
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/[{
+ Populate buffer dependencies between operands and op results/region
+ entry block arguments.
+
+ Implementations should register dependencies between an operand ("X")
+ and an op result/region entry block argument ("Y") if Y may depend
+ on X. Y depends on X if Y and X are the same buffer or if Y is a
+ subview of X.
+
+ Example:
+ ```
+ %r = arith.select %c, %m1, %m2 : memref<5xf32>
+ ```
+ In the above example, %0 may depend on %m1 or %m2 and a correct
+ interface implementation should call:
+ - "registerDependenciesFn(%m1, %r)".
+ - "registerDependenciesFn(%m2, %r)"
+ }],
+ /*retType=*/"void",
+ /*methodName=*/"populateDependencies",
+ /*args=*/(ins
+ "::mlir::bufferization::RegisterDependenciesFn"
+ :$registerDependenciesFn)
+ >,
+ InterfaceMethod<
+ /*desc=*/[{
+ Return "true" if the given value may be a terminal buffer. A buffer
+ value is "terminal" if it cannot be traced back any further in the
+ buffer view flow analysis.
+
+ Examples: A buffer could be terminal because:
+ - it is a newly allocated buffer (e.g., "memref.alloc"),
+ - or: because there is not enough compile-time information available
+ to make a definite decision (e.g., "memref.realloc" may reallocate
+ but we do not know for sure; another example are call ops where we
+ would have to analyze the body of the callee).
+
+ Implementations can assume that the given SSA value is an OpResult of
+ this operation or a region entry block argument of this operation.
+ }],
+ /*retType=*/"bool",
+ /*methodName=*/"mayBeTerminalBuffer",
+ /*args=*/(ins "Value":$value),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/"return false;"
+ >,
+ ];
+}
+
+#endif // BUFFER_VIEW_FLOW_OP_INTERFACE
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 3a61a4b34765..2d8add82383b 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -366,10 +366,6 @@ struct BufferizationOptions {
DefaultMemorySpaceFn defaultMemorySpaceFn =
[](TensorType t) -> std::optional<Attribute> { return Attribute(); };
- /// Seed for the analysis fuzzer. If set to `0`, the fuzzer is deactivated.
- /// Should be used only with `testAnalysisOnly = true`.
- unsigned analysisFuzzerSeed = 0;
-
/// If set to `true`, the analysis is skipped. A buffer is copied before every
/// write. This flag cannot be used together with `testAnalysisOnly = true`.
bool copyBeforeWrite = false;
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
index 9dc6afcaab31..4f609ddff9a4 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td
@@ -10,6 +10,7 @@
#define BUFFERIZATION_OPS
include "mlir/Dialect/Bufferization/IR/AllocationOpInterface.td"
+include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td"
include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td"
include "mlir/Dialect/Bufferization/IR/BufferizationBase.td"
include "mlir/Interfaces/DestinationStyleOpInterface.td"
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt
index 31a553f9a32f..13a5bc370a4f 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/CMakeLists.txt
@@ -3,6 +3,7 @@ add_mlir_doc(BufferizationOps BufferizationOps Dialects/ -gen-dialect-doc)
add_mlir_interface(AllocationOpInterface)
add_mlir_interface(BufferDeallocationOpInterface)
add_mlir_interface(BufferizableOpInterface)
+add_mlir_interface(BufferViewFlowOpInterface)
set(LLVM_TARGET_DEFINITIONS BufferizationEnums.td)
mlir_tablegen(BufferizationEnums.h.inc -gen-enum-decls)
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h
index 24825db69f90..4015231c845d 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h
@@ -53,6 +53,7 @@ public:
///
/// Results in resolve(B) returning {B, C}
ValueSetT resolve(Value value) const;
+ ValueSetT resolveReverse(Value value) const;
/// Removes the given values from all alias sets.
void remove(const SetVector<Value> &aliasValues);
@@ -63,6 +64,9 @@ public:
/// results have to be changed.
void rename(Value from, Value to);
+ /// Returns "true" if the given value may be a terminal.
+ bool mayBeTerminalBuffer(Value value) const;
+
private:
/// This function constructs a mapping from values to its immediate
/// dependencies.
@@ -70,6 +74,44 @@ private:
/// Maps values to all immediate dependencies this value can have.
ValueMapT dependencies;
+ ValueMapT reverseDependencies;
+
+ /// A set of all SSA values that may be terminal buffers.
+ DenseSet<Value> terminals;
+};
+
+/// An is-same-buffer analysis that checks if two SSA values belong to the same
+/// buffer allocation or not.
+class BufferOriginAnalysis {
+public:
+ BufferOriginAnalysis(Operation *op);
+
+ /// Return "true" if `v1` and `v2` originate from the same buffer allocation.
+ /// Return "false" if `v1` and `v2` originate from different allocations.
+ /// Return "nullopt" if we do not know for sure.
+ ///
+ /// Example 1: isSameAllocation(%0, %1) == true
+ /// ```
+ /// %0 = memref.alloc()
+ /// %1 = memref.subview %0
+ /// ```
+ ///
+ /// Example 2: isSameAllocation(%0, %1) == false
+ /// ```
+ /// %0 = memref.alloc()
+ /// %1 = memref.alloc()
+ /// ```
+ ///
+ /// Example 3: isSameAllocation(%0, %2) == nullopt
+ /// ```
+ /// %0 = memref.alloc()
+ /// %1 = memref.alloc()
+ /// %2 = arith.select %c, %0, %1
+ /// ```
+ std::optional<bool> isSameAllocation(Value v1, Value v2);
+
+private:
+ BufferViewFlowAnalysis analysis;
};
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
index a29af853eb21..d50a3042aeea 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
@@ -24,7 +24,12 @@ class OneShotAnalysisState;
/// Options for analysis-enabled bufferization.
struct OneShotBufferizationOptions : public BufferizationOptions {
- enum class AnalysisHeuristic { BottomUp, TopDown };
+ enum class AnalysisHeuristic {
+ BottomUp,
+ TopDown,
+ BottomUpFromTerminators,
+ Fuzzer
+ };
OneShotBufferizationOptions() = default;
@@ -42,6 +47,11 @@ struct OneShotBufferizationOptions : public BufferizationOptions {
/// Specify the functions that should not be analyzed. copyBeforeWrite will be
/// set to true when bufferizing them.
llvm::ArrayRef<std::string> noAnalysisFuncFilter;
+
+ /// Seed for the analysis fuzzer. Used only if the heuristic is set to
+ /// `AnalysisHeuristic::Fuzzer`. The fuzzer should be used only with
+ /// `testAnalysisOnly = true`.
+ unsigned analysisFuzzerSeed = 0;
};
/// State for analysis-enabled bufferization. This class keeps track of alias
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
index 1c3cdec81a39..1303dc2c9ae1 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
@@ -459,6 +459,24 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> {
argument is read/written and which returned values are aliasing/equivalent.
For debugging purposes, such information can be printed with
`test-analysis-only`.
+
+ The order in which ops are analyzed is important. The analysis is greedy and
+ ops that are analyzed earlier are more likely to bufferize in-place. The
+ heuristic can be set with `analysis-heuristic`. At the moment, the following
+ heuristics are available:
+
+ * `bottom-up` (default): Analyze ops from bottom to top.
+ * `top-down`: Analyze ops from top to bottom.
+ * `fuzzer`: Randomize the ordering of ops with `analysis-fuzzer-seed`.
+ * `bottom-up-from-terminators`: Traverse the reverse use-def chains of
+ tensor IR, starting from region branch terminators (bottom-up). Nested
+ regions are traversed before enclosing regions. Analyze the traversed ops
+ first, then analyze the remaining ops bottom-up. This heuristic is useful
+ for bufferizing loop constructs. One-Shot Bufferize currently supports
+ only such IR where yielded tensor values bufferize to equivalent region
+ iter_args, and first analyzing all ops on the path from the "yielding" op
+ to the beginning of the loop body makes it more likely for the region
+ iter_args and yielded values to bufferize to equivalent buffers.
}];
let options = [
Option<"allowReturnAllocsFromLoops", "allow-return-allocs-from-loops",
diff --git a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
index e19d714cadf8..d660292478b1 100644
--- a/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
+++ b/mlir/include/mlir/Dialect/Complex/IR/ComplexOps.td
@@ -225,6 +225,8 @@ def DivOp : ComplexArithmeticOp<"div"> {
%a = complex.div %b, %c : complex<f32>
```
}];
+
+ let hasFolder = 1;
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
index 1b1824a28e99..91bd3702f93b 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
@@ -546,7 +546,7 @@ def LLVM_DISubprogramAttr : LLVM_Attr<"DISubprogram", "di_subprogram",
"DIFileAttr":$file,
OptionalParameter<"unsigned">:$line,
OptionalParameter<"unsigned">:$scopeLine,
- "DISubprogramFlags":$subprogramFlags,
+ OptionalParameter<"DISubprogramFlags">:$subprogramFlags,
OptionalParameter<"DISubroutineTypeAttr">:$type
);
let builders = [
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
index 06df4a601b7a..9341a5a11cd6 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
@@ -30,7 +30,6 @@
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/ThreadLocalCache.h"
-#include "mlir/Transforms/Mem2Reg.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index a7b269eb41ee..04d797031245 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -705,4 +705,61 @@ def FramePointerKindEnum : LLVM_EnumAttr<
let cppNamespace = "::mlir::LLVM::framePointerKind";
}
+//===----------------------------------------------------------------------===//
+// RoundingMode
+//===----------------------------------------------------------------------===//
+
+// These values must match llvm::RoundingMode ones.
+// See llvm/include/llvm/ADT/FloatingPointMode.h.
+def RoundTowardZero
+ : LLVM_EnumAttrCase<"TowardZero", "towardzero", "TowardZero", 0>;
+def RoundNearestTiesToEven
+ : LLVM_EnumAttrCase<"NearestTiesToEven", "tonearest", "NearestTiesToEven", 1>;
+def RoundTowardPositive
+ : LLVM_EnumAttrCase<"TowardPositive", "upward", "TowardPositive", 2>;
+def RoundTowardNegative
+ : LLVM_EnumAttrCase<"TowardNegative", "downward", "TowardNegative", 3>;
+def RoundNearestTiesToAway
+ : LLVM_EnumAttrCase<"NearestTiesToAway", "tonearestaway", "NearestTiesToAway", 4>;
+def RoundDynamic
+ : LLVM_EnumAttrCase<"Dynamic", "dynamic", "Dynamic", 7>;
+// Needed as llvm::RoundingMode defines this.
+def RoundInvalid
+ : LLVM_EnumAttrCase<"Invalid", "invalid", "Invalid", -1>;
+
+// RoundingModeAttr should not be used in operations definitions.
+// Use ValidRoundingModeAttr instead.
+def RoundingModeAttr : LLVM_EnumAttr<
+ "RoundingMode",
+ "::llvm::RoundingMode",
+ "LLVM Rounding Mode",
+ [RoundTowardZero, RoundNearestTiesToEven, RoundTowardPositive,
+ RoundTowardNegative, RoundNearestTiesToAway, RoundDynamic, RoundInvalid]> {
+ let cppNamespace = "::mlir::LLVM";
+}
+
+def ValidRoundingModeAttr : ConfinedAttr<RoundingModeAttr, [IntMinValue<0>]>;
+
+//===----------------------------------------------------------------------===//
+// FPExceptionBehavior
+//===----------------------------------------------------------------------===//
+
+// These values must match llvm::fp::ExceptionBehavior ones.
+// See llvm/include/llvm/IR/FPEnv.h.
+def FPExceptionBehaviorIgnore
+ : LLVM_EnumAttrCase<"Ignore", "ignore", "ebIgnore", 0>;
+def FPExceptionBehaviorMayTrap
+ : LLVM_EnumAttrCase<"MayTrap", "maytrap", "ebMayTrap", 1>;
+def FPExceptionBehaviorStrict
+ : LLVM_EnumAttrCase<"Strict", "strict", "ebStrict", 2>;
+
+def FPExceptionBehaviorAttr : LLVM_EnumAttr<
+ "FPExceptionBehavior",
+ "::llvm::fp::ExceptionBehavior",
+ "LLVM Exception Behavior",
+ [FPExceptionBehaviorIgnore, FPExceptionBehaviorMayTrap,
+ FPExceptionBehaviorStrict]> {
+ let cppNamespace = "::mlir::LLVM";
+}
+
#endif // LLVMIR_ENUMS
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
index e7a1da8ee560..cee752aeb269 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
@@ -290,6 +290,73 @@ def GetResultPtrElementType : OpInterface<"GetResultPtrElementType"> {
];
}
+def FPExceptionBehaviorOpInterface : OpInterface<"FPExceptionBehaviorOpInterface"> {
+ let description = [{
+ An interface for operations receiving an exception behavior attribute
+ controlling FP exception behavior.
+ }];
+
+ let cppNamespace = "::mlir::LLVM";
+
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/ "Returns a FPExceptionBehavior attribute for the operation",
+ /*returnType=*/ "FPExceptionBehaviorAttr",
+ /*methodName=*/ "getFPExceptionBehaviorAttr",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{
+ auto op = cast<ConcreteOp>(this->getOperation());
+ return op.getFpExceptionBehaviorAttr();
+ }]
+ >,
+ StaticInterfaceMethod<
+ /*desc=*/ [{Returns the name of the FPExceptionBehaviorAttr
+ attribute for the operation}],
+ /*returnType=*/ "StringRef",
+ /*methodName=*/ "getFPExceptionBehaviorAttrName",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{
+ return "fpExceptionBehavior";
+ }]
+ >
+ ];
+}
+
+def RoundingModeOpInterface : OpInterface<"RoundingModeOpInterface"> {
+ let description = [{
+ An interface for operations receiving a rounding mode attribute
+ controlling FP rounding mode.
+ }];
+
+ let cppNamespace = "::mlir::LLVM";
+
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/ "Returns a RoundingMode attribute for the operation",
+ /*returnType=*/ "RoundingModeAttr",
+ /*methodName=*/ "getRoundingModeAttr",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{
+ auto op = cast<ConcreteOp>(this->getOperation());
+ return op.getRoundingmodeAttr();
+ }]
+ >,
+ StaticInterfaceMethod<
+ /*desc=*/ [{Returns the name of the RoundingModeAttr attribute
+ for the operation}],
+ /*returnType=*/ "StringRef",
+ /*methodName=*/ "getRoundingModeAttrName",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{
+ return "roundingmode";
+ }]
+ >,
+ ];
+}
//===----------------------------------------------------------------------===//
// LLVM dialect type interfaces.
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index b88f1186a44b..28526f1a1560 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -311,6 +311,91 @@ def LLVM_InvariantEndOp : LLVM_ZeroResultIntrOp<"invariant.end", [2],
"qualified(type($ptr))";
}
+// Constrained Floating-Point Intrinsics.
+
+class LLVM_ConstrainedIntr<string mnem, int numArgs,
+ bit overloadedResult, list<int> overloadedOperands,
+ bit hasRoundingMode>
+ : LLVM_OneResultIntrOp<"experimental.constrained." # mnem,
+ /*overloadedResults=*/
+ !cond(!gt(overloadedResult, 0) : [0],
+ true : []),
+ overloadedOperands,
+ /*traits=*/[Pure, DeclareOpInterfaceMethods<FPExceptionBehaviorOpInterface>]
+ # !cond(
+ !gt(hasRoundingMode, 0) : [DeclareOpInterfaceMethods<RoundingModeOpInterface>],
+ true : []),
+ /*requiresFastmath=*/0,
+ /*immArgPositions=*/[],
+ /*immArgAttrNames=*/[]> {
+ dag regularArgs = !dag(ins, !listsplat(LLVM_Type, numArgs), !foreach(i, !range(numArgs), "arg_" #i));
+ dag attrArgs = !con(!cond(!gt(hasRoundingMode, 0) : (ins ValidRoundingModeAttr:$roundingmode),
+ true : (ins)),
+ (ins FPExceptionBehaviorAttr:$fpExceptionBehavior));
+ let arguments = !con(regularArgs, attrArgs);
+ let llvmBuilder = [{
+ SmallVector<llvm::Value *> args =
+ moduleTranslation.lookupValues(opInst.getOperands());
+ SmallVector<llvm::Type *> overloadedTypes; }] #
+ !cond(!gt(overloadedResult, 0) : [{
+ // Take into account overloaded result type.
+ overloadedTypes.push_back($_resultType); }],
+ // No overloaded result type.
+ true : "") # [{
+ llvm::transform(ArrayRef<unsigned>}] # overloadedOperandsCpp # [{,
+ std::back_inserter(overloadedTypes),
+ [&args](unsigned index) { return args[index]->getType(); });
+ llvm::Module *module = builder.GetInsertBlock()->getModule();
+ llvm::Function *callee =
+ llvm::Intrinsic::getDeclaration(module,
+ llvm::Intrinsic::experimental_constrained_}] #
+ mnem # [{, overloadedTypes); }] #
+ !cond(!gt(hasRoundingMode, 0) : [{
+ // Get rounding mode using interface.
+ llvm::RoundingMode rounding =
+ moduleTranslation.translateRoundingMode($roundingmode); }],
+ true : [{
+ // No rounding mode.
+ std::optional<llvm::RoundingMode> rounding; }]) # [{
+ llvm::fp::ExceptionBehavior except =
+ moduleTranslation.translateFPExceptionBehavior($fpExceptionBehavior);
+ $res = builder.CreateConstrainedFPCall(callee, args, "", rounding, except);
+ }];
+ let mlirBuilder = [{
+ SmallVector<Value> mlirOperands;
+ SmallVector<NamedAttribute> mlirAttrs;
+ if (failed(moduleImport.convertIntrinsicArguments(
+ llvmOperands.take_front( }] # numArgs # [{),
+ {}, {}, mlirOperands, mlirAttrs))) {
+ return failure();
+ }
+
+ FPExceptionBehaviorAttr fpExceptionBehaviorAttr =
+ $_fpExceptionBehavior_attr($fpExceptionBehavior);
+ mlirAttrs.push_back(
+ $_builder.getNamedAttr(
+ $_qualCppClassName::getFPExceptionBehaviorAttrName(),
+ fpExceptionBehaviorAttr)); }] #
+ !cond(!gt(hasRoundingMode, 0) : [{
+ RoundingModeAttr roundingModeAttr = $_roundingMode_attr($roundingmode);
+ mlirAttrs.push_back(
+ $_builder.getNamedAttr($_qualCppClassName::getRoundingModeAttrName(),
+ roundingModeAttr));
+ }], true : "") # [{
+ $res = $_builder.create<$_qualCppClassName>($_location,
+ $_resultType, mlirOperands, mlirAttrs);
+ }];
+}
+
+def LLVM_ConstrainedFPTruncIntr
+ : LLVM_ConstrainedIntr<"fptrunc", /*numArgs=*/1,
+ /*overloadedResult=*/1, /*overloadedOperands=*/[0],
+ /*hasRoundingMode=*/1> {
+ let assemblyFormat = [{
+ $arg_0 $roundingmode $fpExceptionBehavior attr-dict `:` type($arg_0) `to` type(results)
+ }];
+}
+
// Intrinsics with multiple returns.
class LLVM_ArithWithOverflowOp<string mnem>
@@ -526,19 +611,19 @@ def LLVM_DbgLabelOp : LLVM_IntrOp<"dbg.label", [], [], [], 0> {
// Variadic function intrinsics.
//
-def LLVM_VaStartOp : LLVM_ZeroResultIntrOp<"vastart">,
+def LLVM_VaStartOp : LLVM_ZeroResultIntrOp<"vastart", [0]>,
Arguments<(ins LLVM_AnyPointer:$arg_list)> {
let assemblyFormat = "$arg_list attr-dict `:` qualified(type($arg_list))";
let summary = "Initializes `arg_list` for subsequent variadic argument extractions.";
}
-def LLVM_VaCopyOp : LLVM_ZeroResultIntrOp<"vacopy">,
+def LLVM_VaCopyOp : LLVM_ZeroResultIntrOp<"vacopy", [0]>,
Arguments<(ins LLVM_AnyPointer:$dest_list, LLVM_AnyPointer:$src_list)> {
let assemblyFormat = "$src_list `to` $dest_list attr-dict `:` type(operands)";
let summary = "Copies the current argument position from `src_list` to `dest_list`.";
}
-def LLVM_VaEndOp : LLVM_ZeroResultIntrOp<"vaend">,
+def LLVM_VaEndOp : LLVM_ZeroResultIntrOp<"vaend", [0]>,
Arguments<(ins LLVM_AnyPointer:$arg_list)> {
let assemblyFormat = "$arg_list attr-dict `:` qualified(type($arg_list))";
let summary = "Destroys `arg_list`, which has been initialized by `intr.vastart` or `intr.vacopy`.";
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index b6aa73dad229..7b9a9cf017c5 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -170,6 +170,10 @@ class LLVM_OpBase<Dialect dialect, string mnemonic, list<Trait> traits = []> :
// - $_float_attr - substituted by a call to a float attribute matcher;
// - $_var_attr - substituted by a call to a variable attribute matcher;
// - $_label_attr - substituted by a call to a label attribute matcher;
+ // - $_roundingMode_attr - substituted by a call to a rounding mode
+ // attribute matcher;
+ // - $_fpExceptionBehavior_attr - substituted by a call to a FP exception
+ // behavior attribute matcher;
// - $_resultType - substituted with the MLIR result type;
// - $_location - substituted with the MLIR location;
// - $_builder - substituted with the MLIR builder;
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index b523374f6c06..f8f9264b3889 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -323,7 +323,8 @@ def LLVM_GEPOp : LLVM_Op<"getelementptr", [Pure,
}
def LLVM_LoadOp : LLVM_MemAccessOpBase<"load",
- [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ [DeclareOpInterfaceMethods<DestructurableAccessorOpInterface>,
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
DeclareOpInterfaceMethods<PromotableMemOpInterface>,
DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>]> {
dag args = (ins LLVM_AnyPointer:$addr,
@@ -402,7 +403,8 @@ def LLVM_LoadOp : LLVM_MemAccessOpBase<"load",
}
def LLVM_StoreOp : LLVM_MemAccessOpBase<"store",
- [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ [DeclareOpInterfaceMethods<DestructurableAccessorOpInterface>,
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
DeclareOpInterfaceMethods<PromotableMemOpInterface>,
DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>]> {
dag args = (ins LLVM_LoadableType:$value,
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
index 96cdbf01b4bd..b7176aa93ff1 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
@@ -123,7 +123,7 @@ def LLVMFunctionType : LLVMType<"LLVMFunction", "func"> {
def LLVMPointerType : LLVMType<"LLVMPointer", "ptr", [
DeclareTypeInterfaceMethods<DataLayoutTypeInterface, [
- "areCompatible", "verifyEntries"]>]> {
+ "getIndexBitwidth", "areCompatible", "verifyEntries"]>]> {
let summary = "LLVM pointer type";
let description = [{
The `!llvm.ptr` type is an LLVM pointer type. This type typically represents
diff --git a/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h b/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h
index b32ac56d7079..cacb241bfd7a 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/Transforms/TypeConsistency.h
@@ -29,18 +29,6 @@ namespace LLVM {
/// interpret pointee types as consistently as possible.
std::unique_ptr<Pass> createTypeConsistencyPass();
-/// Transforms uses of pointers to a whole struct to uses of pointers to the
-/// first element of a struct. This is achieved by inserting a GEP to the first
-/// element when possible.
-template <class User>
-class AddFieldGetterToStructDirectUse : public OpRewritePattern<User> {
-public:
- using OpRewritePattern<User>::OpRewritePattern;
-
- LogicalResult matchAndRewrite(User user,
- PatternRewriter &rewriter) const override;
-};
-
/// Canonicalizes GEPs of which the base type and the pointer's type hint do not
/// match. This is done by replacing the original GEP into a GEP with the type
/// hint as a base type when an element of the hinted type aligns with the
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index 92d844eefb72..5ee363ed3257 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -391,7 +391,6 @@ def ReduceOp : LinalgStructuredBase_Op<"reduce", [
def TransposeOp : LinalgStructuredBase_Op<"transpose", [
DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
- SameVariadicOperandSize,
SingleBlockImplicitTerminator<"YieldOp">]> {
let summary = "Transpose operator";
let description = [{
@@ -470,7 +469,6 @@ def TransposeOp : LinalgStructuredBase_Op<"transpose", [
def BroadcastOp : LinalgStructuredBase_Op<"broadcast", [
DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
- SameVariadicOperandSize,
SingleBlockImplicitTerminator<"YieldOp">]> {
let summary = "Static broadcast operator";
let description = [{
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.h b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.h
index d6bbcf88b79f..fdebcb031b11 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.h
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.h
@@ -10,8 +10,8 @@
#define MLIR_DIALECT_LINALG_TRANSFORMOPS_LINALGMATCHOPS_H
#include "mlir/Dialect/Linalg/IR/Linalg.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformAttrs.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
namespace mlir {
namespace transform {
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td
index dfeb8ae5d5dd..cdc29d053e5a 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgMatchOps.td
@@ -10,7 +10,7 @@
#define LINALG_MATCH_OPS
include "mlir/Dialect/Linalg/TransformOps/LinalgTransformEnums.td"
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformAttrs.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/IR/TransformTypes.td"
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index 4f34016066b4..c260fe3f7a46 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -1918,7 +1918,9 @@ def TileUsingForallOp :
It is the user's responsibility to ensure that `num_threads/tile_sizes` is
a valid tiling specification (i.e. that only tiles parallel dimensions,
- e.g. in the Linalg case).
+ e.g. in the Linalg case). If the dimension is not parallelizable, a warning
+ is issued to notify the user that the generated code is not safe to
+ parallelize.
If non-empty, the `mapping` is added as an attribute to the
resulting `scf.forall`.
diff --git a/mlir/include/mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h b/mlir/include/mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h
new file mode 100644
index 000000000000..714518a21e97
--- /dev/null
+++ b/mlir/include/mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h
@@ -0,0 +1,20 @@
+//===- BufferViewFlowOpInterfaceImpl.h - Buffer View Analysis ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_MEMREF_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
+#define MLIR_DIALECT_MEMREF_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
+
+namespace mlir {
+class DialectRegistry;
+
+namespace memref {
+void registerBufferViewFlowOpInterfaceExternalModels(DialectRegistry &registry);
+} // namespace memref
+} // namespace mlir
+
+#endif // MLIR_DIALECT_MEMREF_TRANSFORMS_BUFFERVIEWFLOWOPINTERFACEIMPL_H
diff --git a/mlir/include/mlir/Dialect/SCF/TransformOps/SCFTransformOps.td b/mlir/include/mlir/Dialect/SCF/TransformOps/SCFTransformOps.td
index 6f94cee5b019..5eefe2664d0a 100644
--- a/mlir/include/mlir/Dialect/SCF/TransformOps/SCFTransformOps.td
+++ b/mlir/include/mlir/Dialect/SCF/TransformOps/SCFTransformOps.td
@@ -333,23 +333,24 @@ def TakeAssumedBranchOp : Op<Transform_Dialect, "scf.take_assumed_branch", [
}];
}
-def LoopFuseSibling : Op<Transform_Dialect, "loop.fuse_sibling",
+def LoopFuseSiblingOp : Op<Transform_Dialect, "loop.fuse_sibling",
[FunctionalStyleTransformOpTrait, MemoryEffectsOpInterface,
DeclareOpInterfaceMethods<TransformOpInterface>]> {
let summary = "Fuse a loop into another loop, assuming the fusion is legal.";
let description = [{
Fuses the `target` loop into the `source` loop assuming they are
- independent of each other. It is the responsibility of the user to ensure
- that the given two loops are independent of each other, this operation will
- not performa any legality checks and will simply fuse the two given loops.
+ independent of each other. In the fused loop, the arguments, body and
+ results of `target` are placed _before_ those of `source`.
- Currently, the only fusion supported is when both `target` and `source`
- are `scf.forall` operations. For `scf.forall` fusion, the bounds and the
- mapping must match, otherwise a silencable failure is produced.
+ For fusion of two `scf.for` loops, the bounds and step size must match. For
+ fusion of two `scf.forall` loops, the bounds and the mapping must match.
+ Otherwise a silencable failure is produced.
- The input handles `target` and `source` must map to exactly one operation,
- a definite failure is produced otherwise.
+ The `target` and `source` handles must refer to exactly one operation,
+ otherwise a definite failure is produced. It is the responsibility of the
+ user to ensure that the `target` and `source` loops are independent of each
+ other -- this op will only perform rudimentary legality checks.
#### Return modes
@@ -362,10 +363,6 @@ def LoopFuseSibling : Op<Transform_Dialect, "loop.fuse_sibling",
let results = (outs TransformHandleTypeInterface:$fused_loop);
let assemblyFormat = "$target `into` $source attr-dict "
" `:` functional-type(operands, results)";
-
- let builders = [
- OpBuilder<(ins "Value":$loop, "Value":$fused_loop)>
- ];
}
#endif // SCF_TRANSFORM_OPS
diff --git a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
index 9bdd6eb83387..883d11bcc4df 100644
--- a/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
+++ b/mlir/include/mlir/Dialect/SCF/Utils/Utils.h
@@ -162,6 +162,16 @@ scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target,
scf::ForallOp source,
RewriterBase &rewriter);
+/// Given two scf.for loops, `target` and `source`, fuses `target` into
+/// `source`. Assumes that the given loops are siblings and are independent of
+/// each other.
+///
+/// This function does not perform any legality checks and simply fuses the
+/// loops. The caller is responsible for ensuring that the loops are legal to
+/// fuse.
+scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source,
+ RewriterBase &rewriter);
+
} // namespace mlir
#endif // MLIR_DIALECT_SCF_UTILS_UTILS_H_
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
index 3ee239d6e1e3..61c5a7a6394f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
@@ -659,6 +659,8 @@ def SPIRV_SGreaterThanOp : SPIRV_LogicalBinaryOp<"SGreaterThan",
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -688,6 +690,8 @@ def SPIRV_SGreaterThanEqualOp : SPIRV_LogicalBinaryOp<"SGreaterThanEqual",
%5 = spirv.SGreaterThanEqual %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -716,6 +720,8 @@ def SPIRV_SLessThanOp : SPIRV_LogicalBinaryOp<"SLessThan",
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -745,6 +751,8 @@ def SPIRV_SLessThanEqualOp : SPIRV_LogicalBinaryOp<"SLessThanEqual",
%5 = spirv.SLessThanEqual %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -830,6 +838,8 @@ def SPIRV_UGreaterThanOp : SPIRV_LogicalBinaryOp<"UGreaterThan",
%5 = spirv.UGreaterThan %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -859,6 +869,8 @@ def SPIRV_UGreaterThanEqualOp : SPIRV_LogicalBinaryOp<"UGreaterThanEqual",
%5 = spirv.UGreaterThanEqual %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -886,6 +898,8 @@ def SPIRV_ULessThanOp : SPIRV_LogicalBinaryOp<"ULessThan",
%5 = spirv.ULessThan %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
// -----
@@ -949,6 +963,8 @@ def SPIRV_ULessThanEqualOp : SPIRV_LogicalBinaryOp<"ULessThanEqual",
%5 = spirv.ULessThanEqual %2, %3 : vector<4xi32>
```
}];
+
+ let hasFolder = 1;
}
#endif // MLIR_DIALECT_SPIRV_IR_LOGICAL_OPS
diff --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
index 933d62e35fce..09eecafc0c8a 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
+++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
@@ -55,11 +55,6 @@ struct SPIRVConversionOptions {
/// values will be packed into one 32-bit value to be memory efficient.
bool emulateLT32BitScalarTypes{true};
- /// Whether to enable fast math mode during conversion. If true, various
- /// patterns would assume no NaN/infinity numbers as inputs, and thus there
- /// will be no special guards emitted to check and handle such cases.
- bool enableFastMathMode{false};
-
/// Use 64-bit integers when converting index types.
bool use64bitIndex{false};
};
diff --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.h b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.h
index ac4d38e0c5b1..d0fc85ccc9de 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.h
+++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.h
@@ -18,12 +18,18 @@
namespace mlir {
namespace spirv {
-/// Appends to a pattern list additional patterns to expand extended
-/// multiplication ops into regular arithmetic ops. Extended multiplication ops
-/// are not supported by the WebGPU Shading Language (WGSL).
+/// Appends patterns to expand extended multiplication and adition ops into
+/// regular arithmetic ops. Extended arithmetic ops are not supported by the
+/// WebGPU Shading Language (WGSL).
void populateSPIRVExpandExtendedMultiplicationPatterns(
RewritePatternSet &patterns);
+/// Appends patterns to expand non-finite arithmetic ops `IsNan` and `IsInf`.
+/// These are not supported by the WebGPU Shading Language (WGSL). We follow
+/// fast math assumptions and assume that all floating point values are finite.
+void populateSPIRVExpandNonFiniteArithmeticPatterns(
+ RewritePatternSet &patterns);
+
} // namespace spirv
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.h b/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.h
index 54a9e2aec805..8c3124909052 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.h
@@ -9,9 +9,9 @@
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMOPS_SPARSETENSORTRANSFORMOPS_H
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMOPS_SPARSETENSORTRANSFORMOPS_H
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformAttrs.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/RegionKindInterface.h"
diff --git a/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.td b/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.td
index 9f0436e701b8..e340228795cd 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.td
+++ b/mlir/include/mlir/Dialect/SparseTensor/TransformOps/SparseTensorTransformOps.td
@@ -11,7 +11,7 @@
#ifndef SPARSETENSOR_TRANSFORM_OPS
#define SPARSETENSOR_TRANSFORM_OPS
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformAttrs.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/IR/TransformTypes.td"
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index 0ecded75c5d8..306e4a439520 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -1942,7 +1942,7 @@ def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike, Pure,
);
let results = (outs
- TensorOf<[AnyTypeOf<[Tosa_AnyNumber_Plus_F64, Tosa_Int4]>]>:$output
+ TensorOf<[AnyTypeOf<[Tosa_AnyNumber_Plus_F64]>]>:$output
);
let hasFolder = 1;
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
index 5a4d6ff464f1..cff3de0a69af 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
@@ -38,29 +38,17 @@ class Tosa_QuantizedType<string n, list<int> params, bit signed>
// Used to express accumulator results or compare results.
//===----------------------------------------------------------------------===//
-def Tosa_UInt8 : UI<8>;
-def Tosa_UInt16 : UI<16>;
-
def Tosa_Int4 : I<4>;
def Tosa_Int8 : I<8>;
-def Tosa_Int16 : I<16>;
def Tosa_Int32 : I<32>;
-def Tosa_Int48 : I<48>;
def Tosa_Int64 : I<64>;
-def Tosa_SignedInt : AnyTypeOf<[Tosa_Int8,
- Tosa_Int16,
- Tosa_Int32,
- Tosa_Int48,
- Tosa_Int64]>;
-
-def Tosa_Bool : I<1>;
-
-// No unsigned unquantized int types.
-def Tosa_Int : AnyTypeOf<[Tosa_Bool,
- Tosa_UInt8,
- Tosa_UInt16,
- Tosa_SignedInt]>;
+// The TOSA dialect allows more types than the TOSA standard to allow for
+// experimentation. For historical reasons, signless is used in the place of
+// signed.
+// The TosaValidation pass can be used to check for standard conformance.
+def Tosa_Int : AnyTypeOf<[AnyUnsignedInteger,
+ AnySignlessInteger]>;
def Tosa_Int32Or64 : AnyTypeOf<[Tosa_Int32,
Tosa_Int64]>;
@@ -172,9 +160,6 @@ class Tosa_TypeLike<list<Type> types, string description = ""> : TypeConstraint<
def Tosa_IntLike : Tosa_TypeLike<[Tosa_Int], "signless-integer-like">;
def Tosa_Int8Like : Tosa_TypeLike<[Tosa_Int8], "signless-integer-8-bit-like">;
-def Tosa_Int16Like : Tosa_TypeLike<[Tosa_Int16], "signless-integer-16-bit-like">;
-def Tosa_Int32Like : Tosa_TypeLike<[Tosa_Int32], "signless-integer-32-bit-like">;
-def Tosa_Int64Like : Tosa_TypeLike<[Tosa_Int64], "signless-integer-64-bit-like">;
//===----------------------------------------------------------------------===//
// Attribute predicates and classes.
diff --git a/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.h b/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.h
index 05abe5adbe80..ea541c9515b8 100644
--- a/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.h
+++ b/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.h
@@ -10,8 +10,8 @@
#define MLIR_DIALECT_TRANSFORM_DEBUGEXTENSION_DEBUGEXTENSIONOPS_H
#include "mlir/Bytecode/BytecodeOpInterface.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
diff --git a/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.td b/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.td
index dc9b7c4229ac..0275f241fda3 100644
--- a/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.td
+++ b/mlir/include/mlir/Dialect/Transform/DebugExtension/DebugExtensionOps.td
@@ -16,7 +16,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
diff --git a/mlir/include/mlir/Dialect/Transform/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Transform/IR/CMakeLists.txt
index 3ccac2f5c165..df5af7ae710d 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/Transform/IR/CMakeLists.txt
@@ -24,7 +24,3 @@ add_dependencies(mlir-headers MLIRTransformDialectEnumIncGen)
add_mlir_dialect(TransformOps transform)
add_mlir_doc(TransformOps TransformOps Dialects/ -gen-op-doc -dialect=transform)
-add_mlir_interface(MatchInterfaces)
-add_dependencies(MLIRMatchInterfacesIncGen MLIRTransformInterfacesIncGen)
-add_mlir_doc(TransformInterfaces MatchOpInterfaces Dialects/ -gen-op-interface-docs)
-
diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformDialect.h b/mlir/include/mlir/Dialect/Transform/IR/TransformDialect.h
index db27f2c6fc49..128eacdbe6ab 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/TransformDialect.h
+++ b/mlir/include/mlir/Dialect/Transform/IR/TransformDialect.h
@@ -252,21 +252,21 @@ private:
template <typename OpTy>
void TransformDialect::addOperationIfNotRegistered() {
- StringRef name = OpTy::getOperationName();
std::optional<RegisteredOperationName> opName =
- RegisteredOperationName::lookup(name, getContext());
+ RegisteredOperationName::lookup(TypeID::get<OpTy>(), getContext());
if (!opName) {
addOperations<OpTy>();
#ifndef NDEBUG
+ StringRef name = OpTy::getOperationName();
detail::checkImplementsTransformOpInterface(name, getContext());
#endif // NDEBUG
return;
}
- if (opName->getTypeID() == TypeID::get<OpTy>())
+ if (LLVM_LIKELY(opName->getTypeID() == TypeID::get<OpTy>()))
return;
- reportDuplicateOpRegistration(name);
+ reportDuplicateOpRegistration(OpTy::getOperationName());
}
template <typename Type>
diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.h b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.h
index 6c10fcf75804..88185a07966d 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.h
+++ b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.h
@@ -10,10 +10,10 @@
#define MLIR_DIALECT_TRANSFORM_IR_TRANSFORMOPS_H
#include "mlir/Bytecode/BytecodeOpInterface.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformAttrs.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/IR/TransformTypes.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td
index 9caa7632c177..bf1a8016cd9d 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td
+++ b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td
@@ -18,7 +18,7 @@ include "mlir/Interfaces/FunctionInterfaces.td"
include "mlir/IR/OpAsmInterface.td"
include "mlir/IR/RegionKindInterface.td"
include "mlir/IR/SymbolInterfaces.td"
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformAttrs.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
diff --git a/mlir/include/mlir/Dialect/Transform/Interfaces/CMakeLists.txt b/mlir/include/mlir/Dialect/Transform/Interfaces/CMakeLists.txt
index b3396b67b4f7..14ce5b82b811 100644
--- a/mlir/include/mlir/Dialect/Transform/Interfaces/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/Transform/Interfaces/CMakeLists.txt
@@ -9,3 +9,9 @@ mlir_tablegen(TransformTypeInterfaces.cpp.inc -gen-type-interface-defs)
add_public_tablegen_target(MLIRTransformDialectTypeInterfacesIncGen)
add_dependencies(mlir-headers MLIRTransformDialectTypeInterfacesIncGen)
add_mlir_doc(TransformInterfaces TransformTypeInterfaces Dialects/ -gen-type-interface-docs)
+
+add_mlir_interface(MatchInterfaces)
+add_dependencies(MLIRMatchInterfacesIncGen MLIRTransformInterfacesIncGen)
+add_dependencies(mlir-headers MLIRMatchInterfacesIncGen)
+add_mlir_doc(MatchInterfaces MatchOpInterfaces Dialects/ -gen-op-interface-docs)
+
diff --git a/mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.h b/mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.h
index 13a52b54201e..ad3e375c326f 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.h
+++ b/mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.h
@@ -218,6 +218,6 @@ expandTargetSpecification(Location loc, bool isAll, bool isInverted,
} // namespace transform
} // namespace mlir
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h.inc"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h.inc"
#endif // MLIR_DIALECT_TRANSFORM_IR_MATCHINTERFACES_H
diff --git a/mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.td b/mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.td
index 56d2ac648599..56d2ac648599 100644
--- a/mlir/include/mlir/Dialect/Transform/IR/MatchInterfaces.td
+++ b/mlir/include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.td
diff --git a/mlir/include/mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h b/mlir/include/mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h
new file mode 100644
index 000000000000..31e19ff1ad39
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h
@@ -0,0 +1,104 @@
+//===- ScalableValueBoundsConstraintSet.h - Scalable Value Bounds ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VECTOR_IR_SCALABLEVALUEBOUNDSCONSTRAINTSET_H
+#define MLIR_DIALECT_VECTOR_IR_SCALABLEVALUEBOUNDSCONSTRAINTSET_H
+
+#include "mlir/Analysis/Presburger/IntegerRelation.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/Interfaces/ValueBoundsOpInterface.h"
+
+namespace mlir::vector {
+
+namespace detail {
+
+/// Parent class for the value bounds RTTIExtends. Uses protected inheritance to
+/// hide all ValueBoundsConstraintSet methods by default (as some do not use the
+/// ScalableValueBoundsConstraintSet, so may produce unexpected results).
+struct ValueBoundsConstraintSet : protected ::mlir::ValueBoundsConstraintSet {
+ using ::mlir::ValueBoundsConstraintSet::ValueBoundsConstraintSet;
+};
+} // namespace detail
+
+/// A version of `ValueBoundsConstraintSet` that can solve for scalable bounds.
+struct ScalableValueBoundsConstraintSet
+ : public llvm::RTTIExtends<ScalableValueBoundsConstraintSet,
+ detail::ValueBoundsConstraintSet> {
+ ScalableValueBoundsConstraintSet(MLIRContext *context, unsigned vscaleMin,
+ unsigned vscaleMax)
+ : RTTIExtends(context), vscaleMin(vscaleMin), vscaleMax(vscaleMax){};
+
+ using RTTIExtends::bound;
+ using RTTIExtends::StopConditionFn;
+
+ /// A thin wrapper over an `AffineMap` which can represent a constant bound,
+ /// or a scalable bound (in terms of vscale). The `AffineMap` will always
+ /// take at most one parameter, vscale, and returns a single result, which is
+ /// the bound of value.
+ struct ConstantOrScalableBound {
+ AffineMap map;
+
+ struct BoundSize {
+ int64_t baseSize{0};
+ bool scalable{false};
+ };
+
+ /// Get the (possibly) scalable size of the bound, returns failure if
+ /// the bound cannot be represented as a single quantity.
+ FailureOr<BoundSize> getSize() const;
+ };
+
+ /// Computes a (possibly) scalable bound for a given value. This is
+ /// similar to `ValueBoundsConstraintSet::computeConstantBound()`, but
+ /// uses knowledge of the range of vscale to compute either a constant
+ /// bound, an expression in terms of vscale, or failure if no bound can
+ /// be computed.
+ ///
+ /// The resulting `AffineMap` will always take at most one parameter,
+ /// vscale, and return a single result, which is the bound of `value`.
+ ///
+ /// Note: `vscaleMin` must be `<=` to `vscaleMax`. If `vscaleMin` ==
+ /// `vscaleMax`, the resulting bound (if found), will be constant.
+ static FailureOr<ConstantOrScalableBound>
+ computeScalableBound(Value value, std::optional<int64_t> dim,
+ unsigned vscaleMin, unsigned vscaleMax,
+ presburger::BoundType boundType, bool closedUB = true,
+ StopConditionFn stopCondition = nullptr);
+
+ /// Get the value of vscale. Returns `nullptr` vscale as not been encountered.
+ Value getVscaleValue() const { return vscale; }
+
+ /// Sets the value of vscale. Asserts if vscale has already been set.
+ void setVscale(vector::VectorScaleOp vscaleOp) {
+ assert(!vscale && "expected vscale to be unset");
+ vscale = vscaleOp.getResult();
+ }
+
+ /// The minimum possible value of vscale.
+ unsigned getVscaleMin() const { return vscaleMin; }
+
+ /// The maximum possible value of vscale.
+ unsigned getVscaleMax() const { return vscaleMax; }
+
+ static char ID;
+
+private:
+ const unsigned vscaleMin;
+ const unsigned vscaleMax;
+
+ // This will be set when the first `vector.vscale` operation is found within
+ // the `ValueBoundsOpInterface` implementation then reused from there on.
+ Value vscale = nullptr;
+};
+
+using ConstantOrScalableBound =
+ ScalableValueBoundsConstraintSet::ConstantOrScalableBound;
+
+} // namespace mlir::vector
+
+#endif // MLIR_DIALECT_VECTOR_IR_SCALABLEVALUEBOUNDSCONSTRAINTSET_H
diff --git a/mlir/include/mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h b/mlir/include/mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h
new file mode 100644
index 000000000000..4794bc9016c6
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h
@@ -0,0 +1,20 @@
+//===- ValueBoundsOpInterfaceImpl.h - Impl. of ValueBoundsOpInterface -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_VECTOR_IR_VALUEBOUNDSOPINTERFACEIMPL_H
+#define MLIR_DIALECT_VECTOR_IR_VALUEBOUNDSOPINTERFACEIMPL_H
+
+namespace mlir {
+class DialectRegistry;
+
+namespace vector {
+void registerValueBoundsOpInterfaceExternalModels(DialectRegistry &registry);
+} // namespace vector
+} // namespace mlir
+
+#endif // MLIR_DIALECT_VECTOR_IR_VALUEBOUNDSOPINTERFACEIMPL_H
diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransforms.h b/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransforms.h
index 08d3bb157a0e..1f7d6411cd5a 100644
--- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransforms.h
+++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransforms.h
@@ -110,8 +110,10 @@ void transferOpflowOpt(RewriterBase &rewriter, Operation *rootOp);
/// Cast away the leading unit dim, if exists, for the given contract op.
/// Return success if the transformation applies; return failure otherwise.
-LogicalResult castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
- RewriterBase &rewriter);
+FailureOr<Value>
+castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
+ MaskingOpInterface maskingOp,
+ RewriterBase &rewriter);
} // namespace vector
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 35e76a8b623a..f88fbdf9e627 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -127,8 +127,8 @@ SmallVector<OpFoldResult> getMixedSizesXfer(bool hasTensorSemantics,
/// responsible for providing an updated ("rewritten") version of:
/// a. the source Op when mask _is not_ present,
/// b. the source Op and the masking Op when mask _is_ present.
-/// Note that the return value from `matchAndRewriteMaskableOp` depends on the
-/// case above.
+/// To use this pattern, implement `matchAndRewriteMaskableOp`. Note that
+/// the return value will depend on the case above.
template <class SourceOp>
struct MaskableOpRewritePattern : OpRewritePattern<SourceOp> {
using OpRewritePattern<SourceOp>::OpRewritePattern;
@@ -162,14 +162,24 @@ private:
}
public:
- // Matches SourceOp that can potentially be masked with `maskingOp`. If the
- // latter is present, returns an updated masking op (with a replacement for
- // `sourceOp` nested inside). Otherwise, returns an updated `sourceOp`.
+ // Matches `sourceOp` that can potentially be masked with `maskingOp`. If the
+ // latter is present, returns a replacement for `maskingOp`. Otherwise,
+ // returns a replacement for `sourceOp`.
virtual FailureOr<Value>
matchAndRewriteMaskableOp(SourceOp sourceOp, MaskingOpInterface maskingOp,
PatternRewriter &rewriter) const = 0;
};
+/// Returns true if the input Vector type can be linearized.
+///
+/// Linearization is meant in the sense of flattening vectors, e.g.:
+/// * vector<NxMxKxi32> -> vector<N*M*Kxi32>
+/// In this sense, Vectors that are either:
+/// * already linearized, or
+/// * contain more than 1 scalable dimensions,
+/// are not linearizable.
+bool isLinearizableVector(VectorType type);
+
} // namespace vector
/// Constructs a permutation map of invariant memref indices to vector
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index 773957a8b511..80e3fec22694 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -143,6 +143,12 @@ public:
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETCOORDINATES)
#undef DECL_GETCOORDINATES
+ /// Gets coordinates-overhead storage buffer for the given level.
+#define DECL_GETCOORDINATESBUFFER(INAME, C) \
+ virtual void getCoordinatesBuffer(std::vector<C> **, uint64_t);
+ MLIR_SPARSETENSOR_FOREVERY_FIXED_O(DECL_GETCOORDINATESBUFFER)
+#undef DECL_GETCOORDINATESBUFFER
+
/// Gets primary storage.
#define DECL_GETVALUES(VNAME, V) virtual void getValues(std::vector<V> **);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
@@ -251,6 +257,31 @@ public:
assert(lvl < getLvlRank());
*out = &coordinates[lvl];
}
+ void getCoordinatesBuffer(std::vector<C> **out, uint64_t lvl) final {
+ assert(out && "Received nullptr for out parameter");
+ assert(lvl < getLvlRank());
+ // Note that the sparse tensor support library always stores COO in SoA
+ // format, even when AoS is requested. This is never an issue, since all
+ // actual code/library generation requests "views" into the coordinate
+ // storage for the individual levels, which is trivially provided for
+ // both AoS and SoA (as well as all the other storage formats). The only
+ // exception is when the buffer version of coordinate storage is requested
+ // (currently only for printing). In that case, we do the following
+ // potentially expensive transformation to provide that view. If this
+ // operation becomes more common beyond debugging, we should consider
+ // implementing proper AoS in the support library as well.
+ uint64_t lvlRank = getLvlRank();
+ uint64_t nnz = values.size();
+ crdBuffer.clear();
+ crdBuffer.reserve(nnz * (lvlRank - lvl));
+ for (uint64_t i = 0; i < nnz; i++) {
+ for (uint64_t l = lvl; l < lvlRank; l++) {
+ assert(i < coordinates[l].size());
+ crdBuffer.push_back(coordinates[l][i]);
+ }
+ }
+ *out = &crdBuffer;
+ }
void getValues(std::vector<V> **out) final {
assert(out && "Received nullptr for out parameter");
*out = &values;
@@ -529,10 +560,14 @@ private:
return -1u;
}
+ // Sparse tensor storage components.
std::vector<std::vector<P>> positions;
std::vector<std::vector<C>> coordinates;
std::vector<V> values;
+
+ // Auxiliary data structures.
std::vector<uint64_t> lvlCursor;
+ std::vector<C> crdBuffer; // just for AoS view
};
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index d916186c835c..396f76fd8f92 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -77,6 +77,14 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOSITIONS)
MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
#undef DECL_SPARSECOORDINATES
+/// Tensor-storage method to obtain direct access to the coordinates array
+/// buffer for the given level (provides an AoS view into the library).
+#define DECL_SPARSECOORDINATES(CNAME, C) \
+ MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_sparseCoordinatesBuffer##CNAME( \
+ StridedMemRefType<C, 1> *out, void *tensor, index_type lvl);
+MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
+#undef DECL_SPARSECOORDINATES
+
/// Tensor-storage method to insert elements in lexicographical
/// level-coordinate order.
#define DECL_LEXINSERT(VNAME, V) \
diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h
index 43b6d2b38416..3beade017d1a 100644
--- a/mlir/include/mlir/IR/Builders.h
+++ b/mlir/include/mlir/IR/Builders.h
@@ -490,7 +490,7 @@ private:
template <typename OpT>
RegisteredOperationName getCheckRegisteredInfo(MLIRContext *ctx) {
std::optional<RegisteredOperationName> opName =
- RegisteredOperationName::lookup(OpT::getOperationName(), ctx);
+ RegisteredOperationName::lookup(TypeID::get<OpT>(), ctx);
if (LLVM_UNLIKELY(!opName)) {
llvm::report_fatal_error(
"Building op `" + OpT::getOperationName() +
diff --git a/mlir/include/mlir/IR/Dialect.h b/mlir/include/mlir/IR/Dialect.h
index 6c8a170a03c7..f7c1f4df16fc 100644
--- a/mlir/include/mlir/IR/Dialect.h
+++ b/mlir/include/mlir/IR/Dialect.h
@@ -210,7 +210,7 @@ public:
/// registration. The promised interface type can be an interface of any type
/// not just a dialect interface, i.e. it may also be an
/// AttributeInterface/OpInterface/TypeInterface/etc.
- template <typename ConcreteT, typename InterfaceT>
+ template <typename InterfaceT, typename ConcreteT>
void declarePromisedInterface() {
unresolvedPromisedInterfaces.insert(
{TypeID::get<ConcreteT>(), InterfaceT::getInterfaceID()});
@@ -221,7 +221,7 @@ public:
// declarePromisedInterfaces<FunctionOpInterface, MyFuncType1, MyFuncType2>()
template <typename InterfaceT, typename... ConcreteT>
void declarePromisedInterfaces() {
- (declarePromisedInterface<ConcreteT, InterfaceT>(), ...);
+ (declarePromisedInterface<InterfaceT, ConcreteT>(), ...);
}
/// Checks if the given interface, which is attempting to be used, is a
diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h
index bd68c2744574..c177ae3594d1 100644
--- a/mlir/include/mlir/IR/OpDefinition.h
+++ b/mlir/include/mlir/IR/OpDefinition.h
@@ -1729,8 +1729,7 @@ public:
template <typename... Models>
static void attachInterface(MLIRContext &context) {
std::optional<RegisteredOperationName> info =
- RegisteredOperationName::lookup(ConcreteType::getOperationName(),
- &context);
+ RegisteredOperationName::lookup(TypeID::get<ConcreteType>(), &context);
if (!info)
llvm::report_fatal_error(
"Attempting to attach an interface to an unregistered operation " +
diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h
index f2aa6cee8403..90e63ff8fcb3 100644
--- a/mlir/include/mlir/IR/OperationSupport.h
+++ b/mlir/include/mlir/IR/OperationSupport.h
@@ -676,6 +676,11 @@ public:
static std::optional<RegisteredOperationName> lookup(StringRef name,
MLIRContext *ctx);
+ /// Lookup the registered operation information for the given operation.
+ /// Returns std::nullopt if the operation isn't registered.
+ static std::optional<RegisteredOperationName> lookup(TypeID typeID,
+ MLIRContext *ctx);
+
/// Register a new operation in a Dialect object.
/// This constructor is used by Dialect objects when they register the list
/// of operations they contain.
diff --git a/mlir/include/mlir/IR/TensorEncoding.td b/mlir/include/mlir/IR/TensorEncoding.td
index 3991520d72a5..4907dcbb5de9 100644
--- a/mlir/include/mlir/IR/TensorEncoding.td
+++ b/mlir/include/mlir/IR/TensorEncoding.td
@@ -34,8 +34,8 @@ def VerifiableTensorEncoding : AttrInterface<"VerifiableTensorEncoding"> {
/*retTy=*/"::mlir::LogicalResult",
/*methodName=*/"verifyEncoding",
/*args=*/(ins
- "ArrayRef<int64_t>":$shape,
- "Type":$elementType,
+ "::mlir::ArrayRef<int64_t>":$shape,
+ "::mlir::Type":$elementType,
"::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError)
>,
];
diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 21775e11e071..c558dc53cc7f 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -21,6 +21,7 @@
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Arith/IR/ValueBoundsOpInterfaceImpl.h"
#include "mlir/Dialect/Arith/Transforms/BufferDeallocationOpInterfaceImpl.h"
+#include "mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h"
#include "mlir/Dialect/Arith/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/ArmNeon/ArmNeonDialect.h"
#include "mlir/Dialect/ArmSME/IR/ArmSME.h"
@@ -52,6 +53,7 @@
#include "mlir/Dialect/MemRef/IR/MemRefMemorySlot.h"
#include "mlir/Dialect/MemRef/IR/ValueBoundsOpInterfaceImpl.h"
#include "mlir/Dialect/MemRef/Transforms/AllocationOpInterfaceImpl.h"
+#include "mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h"
#include "mlir/Dialect/MemRef/Transforms/RuntimeOpVerification.h"
#include "mlir/Dialect/Mesh/IR/MeshDialect.h"
#include "mlir/Dialect/NVGPU/IR/NVGPUDialect.h"
@@ -82,6 +84,7 @@
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/PDLExtension/PDLExtension.h"
#include "mlir/Dialect/UB/IR/UBOps.h"
+#include "mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/BufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Vector/Transforms/SubsetOpInterfaceImpl.h"
@@ -147,6 +150,7 @@ inline void registerAllDialects(DialectRegistry &registry) {
affine::registerValueBoundsOpInterfaceExternalModels(registry);
arith::registerBufferDeallocationOpInterfaceExternalModels(registry);
arith::registerBufferizableOpInterfaceExternalModels(registry);
+ arith::registerBufferViewFlowOpInterfaceExternalModels(registry);
arith::registerValueBoundsOpInterfaceExternalModels(registry);
bufferization::func_ext::registerBufferizableOpInterfaceExternalModels(
registry);
@@ -156,6 +160,7 @@ inline void registerAllDialects(DialectRegistry &registry) {
gpu::registerBufferDeallocationOpInterfaceExternalModels(registry);
linalg::registerAllDialectInterfaceImplementations(registry);
memref::registerAllocationOpInterfaceExternalModels(registry);
+ memref::registerBufferViewFlowOpInterfaceExternalModels(registry);
memref::registerRuntimeVerifiableOpInterfaceExternalModels(registry);
memref::registerValueBoundsOpInterfaceExternalModels(registry);
memref::registerMemorySlotExternalModels(registry);
@@ -174,6 +179,7 @@ inline void registerAllDialects(DialectRegistry &registry) {
tosa::registerShardingInterfaceExternalModels(registry);
vector::registerBufferizableOpInterfaceExternalModels(registry);
vector::registerSubsetOpInterfaceExternalModels(registry);
+ vector::registerValueBoundsOpInterfaceExternalModels(registry);
NVVM::registerNVVMTargetInterfaceExternalModels(registry);
ROCDL::registerROCDLTargetInterfaceExternalModels(registry);
spirv::registerSPIRVTargetInterfaceExternalModels(registry);
diff --git a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
index 4a21f76dfc5d..046354677e6a 100644
--- a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
+++ b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
@@ -57,6 +57,13 @@ uint64_t
getDefaultPreferredAlignment(Type type, const DataLayout &dataLayout,
ArrayRef<DataLayoutEntryInterface> params);
+/// Default handler for the index bitwidth request. Computes the result for
+/// the built-in index type and dispatches to the DataLayoutTypeInterface for
+/// other types.
+std::optional<uint64_t>
+getDefaultIndexBitwidth(Type type, const DataLayout &dataLayout,
+ ArrayRef<DataLayoutEntryInterface> params);
+
/// Default handler for alloca memory space request. Dispatches to the
/// DataLayoutInterface if specified, otherwise returns the default.
Attribute getDefaultAllocaMemorySpace(DataLayoutEntryInterface entry);
@@ -180,6 +187,11 @@ public:
/// Returns the preferred of the given type in the current scope.
uint64_t getTypePreferredAlignment(Type t) const;
+ /// Returns the bitwidth that should be used when performing index
+ /// computations for the given pointer-like type in the current scope. If the
+ /// type is not a pointer-like type, it returns std::nullopt.
+ std::optional<uint64_t> getTypeIndexBitwidth(Type t) const;
+
/// Returns the memory space used for AllocaOps.
Attribute getAllocaMemorySpace() const;
@@ -216,6 +228,7 @@ private:
mutable DenseMap<Type, llvm::TypeSize> bitsizes;
mutable DenseMap<Type, uint64_t> abiAlignments;
mutable DenseMap<Type, uint64_t> preferredAlignments;
+ mutable DenseMap<Type, std::optional<uint64_t>> indexBitwidths;
/// Cache for alloca, global, and program memory spaces.
mutable std::optional<Attribute> allocaMemorySpace;
diff --git a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
index a8def967fffc..0ee7a116d114 100644
--- a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
+++ b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.td
@@ -281,6 +281,22 @@ def DataLayoutOpInterface : OpInterface<"DataLayoutOpInterface"> {
}]
>,
StaticInterfaceMethod<
+ /*description=*/"Returns the bitwidth that should be used when "
+ "performing index computations for the type computed "
+ "using the relevant entries. The data layout object can "
+ "be used for recursive queries.",
+ /*retTy=*/"std::optional<uint64_t>",
+ /*methodName=*/"getIndexBitwidth",
+ /*args=*/(ins "::mlir::Type":$type,
+ "const ::mlir::DataLayout &":$dataLayout,
+ "::mlir::DataLayoutEntryListRef":$params),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{
+ return ::mlir::detail::getDefaultIndexBitwidth(type, dataLayout,
+ params);
+ }]
+ >,
+ StaticInterfaceMethod<
/*description=*/"Returns the memory space used by the ABI computed "
"using the relevant entries. The data layout object "
"can be used for recursive queries.",
@@ -401,6 +417,18 @@ def DataLayoutTypeInterface : TypeInterface<"DataLayoutTypeInterface"> {
"::mlir::DataLayoutEntryListRef":$params)
>,
InterfaceMethod<
+ /*description=*/"Returns the bitwidth that should be used when "
+ "performing index computations for the given "
+ "pointer-like type. If the type is not a pointer-like "
+ "type, returns std::nullopt.",
+ /*retTy=*/"std::optional<uint64_t>",
+ /*methodName=*/"getIndexBitwidth",
+ /*args=*/(ins "const ::mlir::DataLayout &":$dataLayout,
+ "::mlir::DataLayoutEntryListRef":$params),
+ /*methodBody=*/"",
+ /*defaultImplementation=*/[{ return std::nullopt; }]
+ >,
+ InterfaceMethod<
/*desc=*/"Returns true if the two lists of entries are compatible, that "
"is, that `newLayout` spec entries can be nested in an op with "
"`oldLayout` spec entries.",
diff --git a/mlir/include/mlir/Interfaces/MemorySlotInterfaces.h b/mlir/include/mlir/Interfaces/MemorySlotInterfaces.h
index 56e5e96aecd1..aaa261be6553 100644
--- a/mlir/include/mlir/Interfaces/MemorySlotInterfaces.h
+++ b/mlir/include/mlir/Interfaces/MemorySlotInterfaces.h
@@ -26,8 +26,7 @@ struct MemorySlot {
/// Memory slot attached with information about its destructuring procedure.
struct DestructurableMemorySlot : public MemorySlot {
- /// Maps an index within the memory slot to the type of the pointer that
- /// will be generated to access the element directly.
+ /// Maps an index within the memory slot to the corresponding subelement type.
DenseMap<Attribute, Type> elementPtrs;
};
diff --git a/mlir/include/mlir/Interfaces/ValueBoundsOpInterface.h b/mlir/include/mlir/Interfaces/ValueBoundsOpInterface.h
index 28dadfb9ecf8..bdfd689c7ac4 100644
--- a/mlir/include/mlir/Interfaces/ValueBoundsOpInterface.h
+++ b/mlir/include/mlir/Interfaces/ValueBoundsOpInterface.h
@@ -15,6 +15,7 @@
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/DestinationStyleOpInterface.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/ExtensibleRTTI.h"
#include <queue>
@@ -63,7 +64,8 @@ using ValueDimList = SmallVector<std::pair<Value, std::optional<int64_t>>>;
///
/// Note: Any modification of existing IR invalides the data stored in this
/// class. Adding new operations is allowed.
-class ValueBoundsConstraintSet {
+class ValueBoundsConstraintSet
+ : public llvm::RTTIExtends<ValueBoundsConstraintSet, llvm::RTTIRoot> {
protected:
/// Helper class that builds a bound for a shaped value dimension or
/// index-typed value.
@@ -107,6 +109,8 @@ protected:
};
public:
+ static char ID;
+
/// The stop condition when traversing the backward slice of a shaped value/
/// index-type value. The traversal continues until the stop condition
/// evaluates to "true" for a value.
@@ -255,6 +259,10 @@ public:
/// Return an expression that represents a constant.
AffineExpr getExpr(int64_t constant);
+ /// Debugging only: Dump the constraint set and the column-to-value/dim
+ /// mapping to llvm::errs.
+ void dump() const;
+
protected:
/// Dimension identifier to indicate a value is index-typed. This is used for
/// internal data structures/API only.
@@ -265,6 +273,16 @@ protected:
ValueBoundsConstraintSet(MLIRContext *ctx);
+ /// Populates the constraint set for a value/map without actually computing
+ /// the bound. Returns the position for the value/map (via the return value
+ /// and `posOut` output parameter).
+ int64_t populateConstraintsSet(Value value,
+ std::optional<int64_t> dim = std::nullopt,
+ StopConditionFn stopCondition = nullptr);
+ int64_t populateConstraintsSet(AffineMap map, ValueDimList mapOperands,
+ StopConditionFn stopCondition = nullptr,
+ int64_t *posOut = nullptr);
+
/// Iteratively process all elements on the worklist until an index-typed
/// value or shaped value meets `stopCondition`. Such values are not processed
/// any further.
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
index b49d2f539453..b551eb937cfe 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
@@ -152,6 +152,14 @@ public:
/// Converts `value` to a label attribute. Asserts if the matching fails.
DILabelAttr matchLabelAttr(llvm::Value *value);
+ /// Converts `value` to a FP exception behavior attribute. Asserts if the
+ /// matching fails.
+ FPExceptionBehaviorAttr matchFPExceptionBehaviorAttr(llvm::Value *value);
+
+ /// Converts `value` to a rounding mode attribute. Asserts if the matching
+ /// fails.
+ RoundingModeAttr matchRoundingModeAttr(llvm::Value *value);
+
/// Converts `value` to an array of alias scopes or returns failure if the
/// conversion fails.
FailureOr<SmallVector<AliasScopeAttr>>
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
index fb4392eb223c..310a43e0de96 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleTranslation.h
@@ -201,6 +201,13 @@ public:
/// Translates the given LLVM debug info metadata.
llvm::Metadata *translateDebugInfo(LLVM::DINodeAttr attr);
+ /// Translates the given LLVM rounding mode metadata.
+ llvm::RoundingMode translateRoundingMode(LLVM::RoundingMode rounding);
+
+ /// Translates the given LLVM FP exception behavior metadata.
+ llvm::fp::ExceptionBehavior
+ translateFPExceptionBehavior(LLVM::FPExceptionBehavior exceptionBehavior);
+
/// Translates the contents of the given block to LLVM IR using this
/// translator. The LLVM IR basic block corresponding to the given block is
/// expected to exist in the mapping of this translator. Uses `builder` to
diff --git a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
index 8adc80908de1..4f7f83cdb473 100644
--- a/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
+++ b/mlir/include/mlir/Tools/mlir-opt/MlirOptMain.h
@@ -144,7 +144,6 @@ public:
splitInputFileFlag = std::move(splitMarker);
return *this;
}
- bool shouldSplitInputFile() const { return splitInputFileFlag.empty(); }
StringRef inputSplitMarker() const { return splitInputFileFlag; }
/// Set whether to merge the output chunks into one file using the given
diff --git a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
index 3532785c31b9..db493c1294ba 100644
--- a/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
+++ b/mlir/lib/Conversion/ArithToEmitC/ArithToEmitC.cpp
@@ -55,6 +55,55 @@ public:
}
};
+template <typename ArithOp, typename EmitCOp>
+class IntegerOpConversion final : public OpConversionPattern<ArithOp> {
+public:
+ using OpConversionPattern<ArithOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(ArithOp op, typename ArithOp::Adaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+
+ Type type = this->getTypeConverter()->convertType(op.getType());
+ if (!isa_and_nonnull<IntegerType, IndexType>(type)) {
+ return rewriter.notifyMatchFailure(op, "expected integer type");
+ }
+
+ if (type.isInteger(1)) {
+ // arith expects wrap-around arithmethic, which doesn't happen on `bool`.
+ return rewriter.notifyMatchFailure(op, "i1 type is not implemented");
+ }
+
+ Value lhs = adaptor.getLhs();
+ Value rhs = adaptor.getRhs();
+ Type arithmeticType = type;
+ if ((type.isSignlessInteger() || type.isSignedInteger()) &&
+ !bitEnumContainsAll(op.getOverflowFlags(),
+ arith::IntegerOverflowFlags::nsw)) {
+ // If the C type is signed and the op doesn't guarantee "No Signed Wrap",
+ // we compute in unsigned integers to avoid UB.
+ arithmeticType = rewriter.getIntegerType(type.getIntOrFloatBitWidth(),
+ /*isSigned=*/false);
+ }
+ if (arithmeticType != type) {
+ lhs = rewriter.template create<emitc::CastOp>(op.getLoc(), arithmeticType,
+ lhs);
+ rhs = rewriter.template create<emitc::CastOp>(op.getLoc(), arithmeticType,
+ rhs);
+ }
+
+ Value result = rewriter.template create<EmitCOp>(op.getLoc(),
+ arithmeticType, lhs, rhs);
+
+ if (arithmeticType != type) {
+ result =
+ rewriter.template create<emitc::CastOp>(op.getLoc(), type, result);
+ }
+ rewriter.replaceOp(op, result);
+ return success();
+ }
+};
+
class SelectOpConversion : public OpConversionPattern<arith::SelectOp> {
public:
using OpConversionPattern<arith::SelectOp>::OpConversionPattern;
@@ -96,6 +145,9 @@ void mlir::populateArithToEmitCPatterns(TypeConverter &typeConverter,
ArithOpConversion<arith::DivFOp, emitc::DivOp>,
ArithOpConversion<arith::MulFOp, emitc::MulOp>,
ArithOpConversion<arith::SubFOp, emitc::SubOp>,
+ IntegerOpConversion<arith::AddIOp, emitc::AddOp>,
+ IntegerOpConversion<arith::MulIOp, emitc::MulOp>,
+ IntegerOpConversion<arith::SubIOp, emitc::SubOp>,
SelectOpConversion
>(typeConverter, ctx);
// clang-format on
diff --git a/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp b/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
index edf81bd7a8f3..7456bf7a87a3 100644
--- a/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
+++ b/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
@@ -992,10 +992,9 @@ public:
return failure();
Location loc = op.getLoc();
- auto *converter = getTypeConverter<SPIRVTypeConverter>();
Value replace;
- if (converter->getOptions().enableFastMathMode) {
+ if (bitEnumContainsAll(op.getFastmath(), arith::FastMathFlags::nnan)) {
if (op.getPredicate() == arith::CmpFPredicate::ORD) {
// Ordered comparsion checks if neither operand is NaN.
replace = spirv::ConstantOp::getOne(op.getType(), loc, rewriter);
@@ -1122,7 +1121,7 @@ public:
Value spirvOp =
rewriter.create<SPIRVOp>(loc, dstType, adaptor.getOperands());
- if (converter->getOptions().enableFastMathMode) {
+ if (bitEnumContainsAll(op.getFastmath(), arith::FastMathFlags::nnan)) {
rewriter.replaceOp(op, spirvOp);
return success();
}
@@ -1177,7 +1176,7 @@ public:
rewriter.create<SPIRVOp>(loc, dstType, adaptor.getOperands());
if (!shouldInsertNanGuards<SPIRVOp>() ||
- converter->getOptions().enableFastMathMode) {
+ bitEnumContainsAll(op.getFastmath(), arith::FastMathFlags::nnan)) {
rewriter.replaceOp(op, spirvOp);
return success();
}
@@ -1286,7 +1285,6 @@ struct ConvertArithToSPIRVPass
SPIRVConversionOptions options;
options.emulateLT32BitScalarTypes = this->emulateLT32BitScalarTypes;
- options.enableFastMathMode = this->enableFastMath;
SPIRVTypeConverter typeConverter(targetAttr, options);
// Use UnrealizedConversionCast as the bridge so that we don't need to pull
diff --git a/mlir/lib/Conversion/CMakeLists.txt b/mlir/lib/Conversion/CMakeLists.txt
index 8219cf98575f..41ab7046b91c 100644
--- a/mlir/lib/Conversion/CMakeLists.txt
+++ b/mlir/lib/Conversion/CMakeLists.txt
@@ -35,6 +35,7 @@ add_subdirectory(MathToFuncs)
add_subdirectory(MathToLibm)
add_subdirectory(MathToLLVM)
add_subdirectory(MathToSPIRV)
+add_subdirectory(MemRefToEmitC)
add_subdirectory(MemRefToLLVM)
add_subdirectory(MemRefToSPIRV)
add_subdirectory(NVGPUToNVVM)
diff --git a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
index 76729278ec1b..17f64f1b65b7 100644
--- a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
+++ b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
@@ -196,6 +196,7 @@ struct TrigonometricOpConversion : public OpConversionPattern<TrigonometricOp> {
auto loc = op.getLoc();
auto type = cast<ComplexType>(adaptor.getComplex().getType());
auto elementType = cast<FloatType>(type.getElementType());
+ arith::FastMathFlagsAttr fmf = op.getFastMathFlagsAttr();
Value real =
rewriter.create<complex::ReOp>(loc, elementType, adaptor.getComplex());
@@ -207,14 +208,14 @@ struct TrigonometricOpConversion : public OpConversionPattern<TrigonometricOp> {
// implementation in the subclass to combine them.
Value half = rewriter.create<arith::ConstantOp>(
loc, elementType, rewriter.getFloatAttr(elementType, 0.5));
- Value exp = rewriter.create<math::ExpOp>(loc, imag);
- Value scaledExp = rewriter.create<arith::MulFOp>(loc, half, exp);
- Value reciprocalExp = rewriter.create<arith::DivFOp>(loc, half, exp);
- Value sin = rewriter.create<math::SinOp>(loc, real);
- Value cos = rewriter.create<math::CosOp>(loc, real);
+ Value exp = rewriter.create<math::ExpOp>(loc, imag, fmf);
+ Value scaledExp = rewriter.create<arith::MulFOp>(loc, half, exp, fmf);
+ Value reciprocalExp = rewriter.create<arith::DivFOp>(loc, half, exp, fmf);
+ Value sin = rewriter.create<math::SinOp>(loc, real, fmf);
+ Value cos = rewriter.create<math::CosOp>(loc, real, fmf);
auto resultPair =
- combine(loc, scaledExp, reciprocalExp, sin, cos, rewriter);
+ combine(loc, scaledExp, reciprocalExp, sin, cos, rewriter, fmf);
rewriter.replaceOpWithNewOp<complex::CreateOp>(op, type, resultPair.first,
resultPair.second);
@@ -223,15 +224,17 @@ struct TrigonometricOpConversion : public OpConversionPattern<TrigonometricOp> {
virtual std::pair<Value, Value>
combine(Location loc, Value scaledExp, Value reciprocalExp, Value sin,
- Value cos, ConversionPatternRewriter &rewriter) const = 0;
+ Value cos, ConversionPatternRewriter &rewriter,
+ arith::FastMathFlagsAttr fmf) const = 0;
};
struct CosOpConversion : public TrigonometricOpConversion<complex::CosOp> {
using TrigonometricOpConversion<complex::CosOp>::TrigonometricOpConversion;
- std::pair<Value, Value>
- combine(Location loc, Value scaledExp, Value reciprocalExp, Value sin,
- Value cos, ConversionPatternRewriter &rewriter) const override {
+ std::pair<Value, Value> combine(Location loc, Value scaledExp,
+ Value reciprocalExp, Value sin, Value cos,
+ ConversionPatternRewriter &rewriter,
+ arith::FastMathFlagsAttr fmf) const override {
// Complex cosine is defined as;
// cos(x + iy) = 0.5 * (exp(i(x + iy)) + exp(-i(x + iy)))
// Plugging in:
@@ -241,10 +244,12 @@ struct CosOpConversion : public TrigonometricOpConversion<complex::CosOp> {
// We get:
// Re(cos(x + iy)) = (0.5/t + 0.5*t) * cos x
// Im(cos(x + iy)) = (0.5/t - 0.5*t) * sin x
- Value sum = rewriter.create<arith::AddFOp>(loc, reciprocalExp, scaledExp);
- Value resultReal = rewriter.create<arith::MulFOp>(loc, sum, cos);
- Value diff = rewriter.create<arith::SubFOp>(loc, reciprocalExp, scaledExp);
- Value resultImag = rewriter.create<arith::MulFOp>(loc, diff, sin);
+ Value sum =
+ rewriter.create<arith::AddFOp>(loc, reciprocalExp, scaledExp, fmf);
+ Value resultReal = rewriter.create<arith::MulFOp>(loc, sum, cos, fmf);
+ Value diff =
+ rewriter.create<arith::SubFOp>(loc, reciprocalExp, scaledExp, fmf);
+ Value resultImag = rewriter.create<arith::MulFOp>(loc, diff, sin, fmf);
return {resultReal, resultImag};
}
};
@@ -813,9 +818,10 @@ struct NegOpConversion : public OpConversionPattern<complex::NegOp> {
struct SinOpConversion : public TrigonometricOpConversion<complex::SinOp> {
using TrigonometricOpConversion<complex::SinOp>::TrigonometricOpConversion;
- std::pair<Value, Value>
- combine(Location loc, Value scaledExp, Value reciprocalExp, Value sin,
- Value cos, ConversionPatternRewriter &rewriter) const override {
+ std::pair<Value, Value> combine(Location loc, Value scaledExp,
+ Value reciprocalExp, Value sin, Value cos,
+ ConversionPatternRewriter &rewriter,
+ arith::FastMathFlagsAttr fmf) const override {
// Complex sine is defined as;
// sin(x + iy) = -0.5i * (exp(i(x + iy)) - exp(-i(x + iy)))
// Plugging in:
@@ -825,10 +831,12 @@ struct SinOpConversion : public TrigonometricOpConversion<complex::SinOp> {
// We get:
// Re(sin(x + iy)) = (0.5*t + 0.5/t) * sin x
// Im(cos(x + iy)) = (0.5*t - 0.5/t) * cos x
- Value sum = rewriter.create<arith::AddFOp>(loc, scaledExp, reciprocalExp);
- Value resultReal = rewriter.create<arith::MulFOp>(loc, sum, sin);
- Value diff = rewriter.create<arith::SubFOp>(loc, scaledExp, reciprocalExp);
- Value resultImag = rewriter.create<arith::MulFOp>(loc, diff, cos);
+ Value sum =
+ rewriter.create<arith::AddFOp>(loc, scaledExp, reciprocalExp, fmf);
+ Value resultReal = rewriter.create<arith::MulFOp>(loc, sum, sin, fmf);
+ Value diff =
+ rewriter.create<arith::SubFOp>(loc, scaledExp, reciprocalExp, fmf);
+ Value resultImag = rewriter.create<arith::MulFOp>(loc, diff, cos, fmf);
return {resultReal, resultImag};
}
};
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index d6a5d8cd74d5..b95fba20a00c 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -155,8 +155,6 @@ struct GPUShuffleOpLowering : public ConvertOpToLLVMPattern<gpu::ShuffleOp> {
auto valueTy = adaptor.getValue().getType();
auto int32Type = IntegerType::get(rewriter.getContext(), 32);
auto predTy = IntegerType::get(rewriter.getContext(), 1);
- auto resultTy = LLVM::LLVMStructType::getLiteral(rewriter.getContext(),
- {valueTy, predTy});
Value one = rewriter.create<LLVM::ConstantOp>(loc, int32Type, 1);
Value minusOne = rewriter.create<LLVM::ConstantOp>(loc, int32Type, -1);
@@ -176,14 +174,25 @@ struct GPUShuffleOpLowering : public ConvertOpToLLVMPattern<gpu::ShuffleOp> {
rewriter.create<LLVM::SubOp>(loc, int32Type, adaptor.getWidth(), one);
}
- auto returnValueAndIsValidAttr = rewriter.getUnitAttr();
+ bool predIsUsed = !op->getResult(1).use_empty();
+ UnitAttr returnValueAndIsValidAttr = nullptr;
+ Type resultTy = valueTy;
+ if (predIsUsed) {
+ returnValueAndIsValidAttr = rewriter.getUnitAttr();
+ resultTy = LLVM::LLVMStructType::getLiteral(rewriter.getContext(),
+ {valueTy, predTy});
+ }
Value shfl = rewriter.create<NVVM::ShflOp>(
loc, resultTy, activeMask, adaptor.getValue(), adaptor.getOffset(),
maskAndClamp, convertShflKind(op.getMode()), returnValueAndIsValidAttr);
- Value shflValue = rewriter.create<LLVM::ExtractValueOp>(loc, shfl, 0);
- Value isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(loc, shfl, 1);
-
- rewriter.replaceOp(op, {shflValue, isActiveSrcLane});
+ if (predIsUsed) {
+ Value shflValue = rewriter.create<LLVM::ExtractValueOp>(loc, shfl, 0);
+ Value isActiveSrcLane =
+ rewriter.create<LLVM::ExtractValueOp>(loc, shfl, 1);
+ rewriter.replaceOp(op, {shflValue, isActiveSrcLane});
+ } else {
+ rewriter.replaceOp(op, {shfl, nullptr});
+ }
return success();
}
};
diff --git a/mlir/lib/Conversion/MemRefToEmitC/CMakeLists.txt b/mlir/lib/Conversion/MemRefToEmitC/CMakeLists.txt
new file mode 100644
index 000000000000..8a72e747d024
--- /dev/null
+++ b/mlir/lib/Conversion/MemRefToEmitC/CMakeLists.txt
@@ -0,0 +1,18 @@
+add_mlir_conversion_library(MLIRMemRefToEmitC
+ MemRefToEmitC.cpp
+ MemRefToEmitCPass.cpp
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/MemRefToEmitC
+
+ DEPENDS
+ MLIRConversionPassIncGen
+
+ LINK_COMPONENTS
+ Core
+
+ LINK_LIBS PUBLIC
+ MLIREmitCDialect
+ MLIRMemRefDialect
+ MLIRTransforms
+ )
diff --git a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp
new file mode 100644
index 000000000000..0e3b64692126
--- /dev/null
+++ b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitC.cpp
@@ -0,0 +1,114 @@
+//===- MemRefToEmitC.cpp - MemRef to EmitC conversion ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements patterns to convert memref ops into emitc ops.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h"
+
+#include "mlir/Dialect/EmitC/IR/EmitC.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/IR/Builders.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+using namespace mlir;
+
+namespace {
+struct ConvertAlloca final : public OpConversionPattern<memref::AllocaOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(memref::AllocaOp op, OpAdaptor operands,
+ ConversionPatternRewriter &rewriter) const override {
+
+ if (!op.getType().hasStaticShape()) {
+ return rewriter.notifyMatchFailure(
+ op.getLoc(), "cannot transform alloca with dynamic shape");
+ }
+
+ if (op.getAlignment().value_or(1) > 1) {
+ // TODO: Allow alignment if it is not more than the natural alignment
+ // of the C array.
+ return rewriter.notifyMatchFailure(
+ op.getLoc(), "cannot transform alloca with alignment requirement");
+ }
+
+ auto resultTy = getTypeConverter()->convertType(op.getType());
+ if (!resultTy) {
+ return rewriter.notifyMatchFailure(op.getLoc(), "cannot convert type");
+ }
+ auto noInit = emitc::OpaqueAttr::get(getContext(), "");
+ rewriter.replaceOpWithNewOp<emitc::VariableOp>(op, resultTy, noInit);
+ return success();
+ }
+};
+
+struct ConvertLoad final : public OpConversionPattern<memref::LoadOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(memref::LoadOp op, OpAdaptor operands,
+ ConversionPatternRewriter &rewriter) const override {
+
+ auto resultTy = getTypeConverter()->convertType(op.getType());
+ if (!resultTy) {
+ return rewriter.notifyMatchFailure(op.getLoc(), "cannot convert type");
+ }
+
+ auto subscript = rewriter.create<emitc::SubscriptOp>(
+ op.getLoc(), operands.getMemref(), operands.getIndices());
+
+ auto noInit = emitc::OpaqueAttr::get(getContext(), "");
+ auto var =
+ rewriter.create<emitc::VariableOp>(op.getLoc(), resultTy, noInit);
+
+ rewriter.create<emitc::AssignOp>(op.getLoc(), var, subscript);
+ rewriter.replaceOp(op, var);
+ return success();
+ }
+};
+
+struct ConvertStore final : public OpConversionPattern<memref::StoreOp> {
+ using OpConversionPattern::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(memref::StoreOp op, OpAdaptor operands,
+ ConversionPatternRewriter &rewriter) const override {
+
+ auto subscript = rewriter.create<emitc::SubscriptOp>(
+ op.getLoc(), operands.getMemref(), operands.getIndices());
+ rewriter.replaceOpWithNewOp<emitc::AssignOp>(op, subscript,
+ operands.getValue());
+ return success();
+ }
+};
+} // namespace
+
+void mlir::populateMemRefToEmitCTypeConversion(TypeConverter &typeConverter) {
+ typeConverter.addConversion(
+ [&](MemRefType memRefType) -> std::optional<Type> {
+ if (!memRefType.hasStaticShape() ||
+ !memRefType.getLayout().isIdentity() || memRefType.getRank() == 0) {
+ return {};
+ }
+ Type convertedElementType =
+ typeConverter.convertType(memRefType.getElementType());
+ if (!convertedElementType)
+ return {};
+ return emitc::ArrayType::get(memRefType.getShape(),
+ convertedElementType);
+ });
+}
+
+void mlir::populateMemRefToEmitCConversionPatterns(RewritePatternSet &patterns,
+ TypeConverter &converter) {
+ patterns.add<ConvertAlloca, ConvertLoad, ConvertStore>(converter,
+ patterns.getContext());
+}
diff --git a/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp
new file mode 100644
index 000000000000..4e5d1912d157
--- /dev/null
+++ b/mlir/lib/Conversion/MemRefToEmitC/MemRefToEmitCPass.cpp
@@ -0,0 +1,55 @@
+//===- MemRefToEmitC.cpp - MemRef to EmitC conversion ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass to convert memref ops into emitc ops.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/MemRefToEmitC/MemRefToEmitCPass.h"
+
+#include "mlir/Conversion/MemRefToEmitC/MemRefToEmitC.h"
+#include "mlir/Dialect/EmitC/IR/EmitC.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+namespace mlir {
+#define GEN_PASS_DEF_CONVERTMEMREFTOEMITC
+#include "mlir/Conversion/Passes.h.inc"
+} // namespace mlir
+
+using namespace mlir;
+
+namespace {
+struct ConvertMemRefToEmitCPass
+ : public impl::ConvertMemRefToEmitCBase<ConvertMemRefToEmitCPass> {
+ void runOnOperation() override {
+ TypeConverter converter;
+
+ // Fallback for other types.
+ converter.addConversion([](Type type) -> std::optional<Type> {
+ if (isa<MemRefType>(type))
+ return {};
+ return type;
+ });
+
+ populateMemRefToEmitCTypeConversion(converter);
+
+ RewritePatternSet patterns(&getContext());
+ populateMemRefToEmitCConversionPatterns(patterns, converter);
+
+ ConversionTarget target(getContext());
+ target.addIllegalDialect<memref::MemRefDialect>();
+ target.addLegalDialect<emitc::EmitCDialect>();
+
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns))))
+ return signalPassFailure();
+ }
+};
+} // namespace
diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
index 0acb2142f3f6..81b9f55cac80 100644
--- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
+++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
@@ -50,11 +50,12 @@ static Value getOffsetForBitwidth(Location loc, Value srcIdx, int sourceBits,
assert(targetBits % sourceBits == 0);
Type type = srcIdx.getType();
IntegerAttr idxAttr = builder.getIntegerAttr(type, targetBits / sourceBits);
- auto idx = builder.create<spirv::ConstantOp>(loc, type, idxAttr);
+ auto idx = builder.createOrFold<spirv::ConstantOp>(loc, type, idxAttr);
IntegerAttr srcBitsAttr = builder.getIntegerAttr(type, sourceBits);
- auto srcBitsValue = builder.create<spirv::ConstantOp>(loc, type, srcBitsAttr);
- auto m = builder.create<spirv::UModOp>(loc, srcIdx, idx);
- return builder.create<spirv::IMulOp>(loc, type, m, srcBitsValue);
+ auto srcBitsValue =
+ builder.createOrFold<spirv::ConstantOp>(loc, type, srcBitsAttr);
+ auto m = builder.createOrFold<spirv::UModOp>(loc, srcIdx, idx);
+ return builder.createOrFold<spirv::IMulOp>(loc, type, m, srcBitsValue);
}
/// Returns an adjusted spirv::AccessChainOp. Based on the
@@ -74,11 +75,11 @@ adjustAccessChainForBitwidth(const SPIRVTypeConverter &typeConverter,
Value lastDim = op->getOperand(op.getNumOperands() - 1);
Type type = lastDim.getType();
IntegerAttr attr = builder.getIntegerAttr(type, targetBits / sourceBits);
- auto idx = builder.create<spirv::ConstantOp>(loc, type, attr);
+ auto idx = builder.createOrFold<spirv::ConstantOp>(loc, type, attr);
auto indices = llvm::to_vector<4>(op.getIndices());
// There are two elements if this is a 1-D tensor.
assert(indices.size() == 2);
- indices.back() = builder.create<spirv::SDivOp>(loc, lastDim, idx);
+ indices.back() = builder.createOrFold<spirv::SDivOp>(loc, lastDim, idx);
Type t = typeConverter.convertType(op.getComponentPtr().getType());
return builder.create<spirv::AccessChainOp>(loc, t, op.getBasePtr(), indices);
}
@@ -91,7 +92,8 @@ static Value castBoolToIntN(Location loc, Value srcBool, Type dstType,
return srcBool;
Value zero = spirv::ConstantOp::getZero(dstType, loc, builder);
Value one = spirv::ConstantOp::getOne(dstType, loc, builder);
- return builder.create<spirv::SelectOp>(loc, dstType, srcBool, one, zero);
+ return builder.createOrFold<spirv::SelectOp>(loc, dstType, srcBool, one,
+ zero);
}
/// Returns the `targetBits`-bit value shifted by the given `offset`, and cast
@@ -111,10 +113,10 @@ static Value shiftValue(Location loc, Value value, Value offset, Value mask,
loc, builder.getIntegerType(targetBits), value);
}
- value = builder.create<spirv::BitwiseAndOp>(loc, value, mask);
+ value = builder.createOrFold<spirv::BitwiseAndOp>(loc, value, mask);
}
- return builder.create<spirv::ShiftLeftLogicalOp>(loc, value.getType(), value,
- offset);
+ return builder.createOrFold<spirv::ShiftLeftLogicalOp>(loc, value.getType(),
+ value, offset);
}
/// Returns true if the allocations of memref `type` generated from `allocOp`
@@ -165,7 +167,7 @@ static Value castIntNToBool(Location loc, Value srcInt, OpBuilder &builder) {
return srcInt;
auto one = spirv::ConstantOp::getOne(srcInt.getType(), loc, builder);
- return builder.create<spirv::IEqualOp>(loc, srcInt, one);
+ return builder.createOrFold<spirv::IEqualOp>(loc, srcInt, one);
}
//===----------------------------------------------------------------------===//
@@ -597,13 +599,14 @@ IntLoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor,
// ____XXXX________ -> ____________XXXX
Value lastDim = accessChainOp->getOperand(accessChainOp.getNumOperands() - 1);
Value offset = getOffsetForBitwidth(loc, lastDim, srcBits, dstBits, rewriter);
- Value result = rewriter.create<spirv::ShiftRightArithmeticOp>(
+ Value result = rewriter.createOrFold<spirv::ShiftRightArithmeticOp>(
loc, spvLoadOp.getType(), spvLoadOp, offset);
// Apply the mask to extract corresponding bits.
- Value mask = rewriter.create<spirv::ConstantOp>(
+ Value mask = rewriter.createOrFold<spirv::ConstantOp>(
loc, dstType, rewriter.getIntegerAttr(dstType, (1 << srcBits) - 1));
- result = rewriter.create<spirv::BitwiseAndOp>(loc, dstType, result, mask);
+ result =
+ rewriter.createOrFold<spirv::BitwiseAndOp>(loc, dstType, result, mask);
// Apply sign extension on the loading value unconditionally. The signedness
// semantic is carried in the operator itself, we relies other pattern to
@@ -611,11 +614,11 @@ IntLoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor,
IntegerAttr shiftValueAttr =
rewriter.getIntegerAttr(dstType, dstBits - srcBits);
Value shiftValue =
- rewriter.create<spirv::ConstantOp>(loc, dstType, shiftValueAttr);
- result = rewriter.create<spirv::ShiftLeftLogicalOp>(loc, dstType, result,
- shiftValue);
- result = rewriter.create<spirv::ShiftRightArithmeticOp>(loc, dstType, result,
- shiftValue);
+ rewriter.createOrFold<spirv::ConstantOp>(loc, dstType, shiftValueAttr);
+ result = rewriter.createOrFold<spirv::ShiftLeftLogicalOp>(loc, dstType,
+ result, shiftValue);
+ result = rewriter.createOrFold<spirv::ShiftRightArithmeticOp>(
+ loc, dstType, result, shiftValue);
rewriter.replaceOp(loadOp, result);
@@ -744,11 +747,12 @@ IntStoreOpPattern::matchAndRewrite(memref::StoreOp storeOp, OpAdaptor adaptor,
// Create a mask to clear the destination. E.g., if it is the second i8 in
// i32, 0xFFFF00FF is created.
- Value mask = rewriter.create<spirv::ConstantOp>(
+ Value mask = rewriter.createOrFold<spirv::ConstantOp>(
loc, dstType, rewriter.getIntegerAttr(dstType, (1 << srcBits) - 1));
- Value clearBitsMask =
- rewriter.create<spirv::ShiftLeftLogicalOp>(loc, dstType, mask, offset);
- clearBitsMask = rewriter.create<spirv::NotOp>(loc, dstType, clearBitsMask);
+ Value clearBitsMask = rewriter.createOrFold<spirv::ShiftLeftLogicalOp>(
+ loc, dstType, mask, offset);
+ clearBitsMask =
+ rewriter.createOrFold<spirv::NotOp>(loc, dstType, clearBitsMask);
Value storeVal = shiftValue(loc, adaptor.getValue(), offset, mask, rewriter);
Value adjustedPtr = adjustAccessChainForBitwidth(typeConverter, accessChainOp,
@@ -910,7 +914,7 @@ LogicalResult ReinterpretCastPattern::matchAndRewrite(
int64_t attrVal = cast<IntegerAttr>(offset.get<Attribute>()).getInt();
Attribute attr = rewriter.getIntegerAttr(intType, attrVal);
- return rewriter.create<spirv::ConstantOp>(loc, intType, attr);
+ return rewriter.createOrFold<spirv::ConstantOp>(loc, intType, attr);
}();
rewriter.replaceOpWithNewOp<spirv::InBoundsPtrAccessChainOp>(
diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 505d85f21111..cd6da3558246 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -19,24 +19,99 @@
#include "mlir/IR/PatternMatch.h"
#include "mlir/Transforms/DialectConversion.h"
+#include <numeric>
+
using namespace mlir;
using namespace tosa;
-static bool findIntermediateShape(ArrayRef<int64_t> lhsShape,
- ArrayRef<int64_t> rhsShape,
- SmallVector<int64_t> &intermediateShape,
- bool isDynamic) {
- if (isDynamic) {
- // TODO (natashaknk): Make dynamic intermediate shape not always be rank-1
- intermediateShape = {ShapedType::kDynamic};
- return true;
- }
+namespace {
- if (lhsShape.empty() || rhsShape.empty()) {
- intermediateShape = {};
- return true;
- }
+// Infer the type to which the input of a 'tosa.reshape' op must be cast when
+// lowered.
+TensorType inferReshapeInputType(TypedValue<TensorType> input,
+ ArrayRef<int64_t> newShape) {
+ // No need to cast input for non-empty target shape
+ if (!newShape.empty())
+ return input.getType();
+
+ // The input type must be cast into a tensor with the same rank and all static
+ // dimensions set to 1. This prevents the generation of a tensor.collapse_shape
+ // op that converts a dynamically shaped tensor into a 0D tensor. While such
+ // construct is not incorrect on its own, bufferization cannot properly handle
+ // it at the moment, so we avoid it.
+ SmallVector<int64_t> shape(input.getType().getRank(), 1);
+ return input.getType().clone(shape);
+}
+
+// Infer the result type of 'tensor.expand_shape' in the collapse-expand
+// pair emitted for a 'tosa.reshape' op.
+TensorType inferReshapeExpandedType(TensorType inputType,
+ ArrayRef<int64_t> newShape) {
+ // Special case for 0D output tensor. Note: Watch out when using Type::clone()
+ // with just '{}', as it will invoke the incorrect overload.
+ if (newShape.empty())
+ return inputType.clone(ArrayRef<int64_t>{});
+
+ // Check if the input is static, and if so, get its total size
+ bool inputIsStatic = inputType.hasStaticShape();
+ int64_t totalSize = inputIsStatic ? inputType.getNumElements() : -1;
+
+ // Compute result shape
+ bool resultIsStatic = true;
+ auto resultShape = llvm::map_to_vector(newShape, [&](int64_t size) -> int64_t {
+ // If this is not a placeholder, do not change it
+ if (size >= 0)
+ return size;
+
+ // If we do not know the total size of the tensor, keep this dimension
+ // dynamic in the result shape.
+ if (!inputIsStatic) {
+ resultIsStatic = false;
+ return ShapedType::kDynamic;
+ }
+ // Calculate the product of all elements in 'newShape' except for the -1
+ // placeholder, which we discard by negating the result.
+ int64_t totalSizeNoPlaceholder = -std::accumulate(
+ newShape.begin(), newShape.end(), 1, std::multiplies<int64_t>());
+
+ // If there is a 0 component in 'newShape', resolve the placeholder as 0.
+ if (totalSizeNoPlaceholder == 0)
+ return 0;
+
+ // Resolve the placeholder as the quotient between the total tensor size and
+ // the product of all other sizes.
+ return totalSize / totalSizeNoPlaceholder;
+ });
+
+ // A syntactic restriction in 'tensor.expand_shape' forbids a dynamically
+ // shaped input from being reshaped into a statically shaped result. We may
+ // simply turn the first result dimension dynamic to address this.
+ if (!inputIsStatic && resultIsStatic)
+ resultShape[0] = ShapedType::kDynamic;
+
+ // The 'tensor.expand_shape' op also forbids a statically shaped input from
+ // being reshaped into a dynamically shaped result, but the placeholder
+ // inference algorithm above guarantees that this will never be the case.
+ assert(!inputIsStatic || resultIsStatic);
+
+ // Create result type
+ return inputType.clone(resultShape);
+}
+
+// Infer the result type of 'tensor.collapse_shape' in the collapse-expand
+// pair emitted for a 'tosa.reshape' op.
+TensorType inferReshapeCollapsedType(TensorType lhsType, TensorType rhsType) {
+ auto lhsShape = lhsType.getShape();
+ auto rhsShape = rhsType.getShape();
+
+ if (lhsShape.empty() || rhsShape.empty())
+ return lhsType.clone(ArrayRef<int64_t>{});
+
+ if (ShapedType::isDynamicShape(lhsShape) || ShapedType::isDynamicShape(rhsShape))
+ return lhsType.clone({ShapedType::kDynamic});
+
+ SmallVector<int64_t> intermediateShape;
unsigned currLhsDim = 0, currRhsDim = 0;
while (currLhsDim < lhsShape.size() && currRhsDim < rhsShape.size()) {
int64_t rhsSize = rhsShape[currRhsDim];
@@ -62,174 +137,113 @@ static bool findIntermediateShape(ArrayRef<int64_t> lhsShape,
currLhsDim++;
}
- // If the iterators didn't reach the end and their leftover dimensions are not
- // equal to 1 an intermediate shape was not found.
- while (currLhsDim < lhsShape.size()) {
- if (lhsShape[currLhsDim++] != 1) {
- return false;
- }
+ // Static shapes are guaranteed to be compatible by the op verifier, so all
+ // leftover dimensions should be 1.
+ for (; currLhsDim < lhsShape.size(); currLhsDim++) {
+ assert(lhsShape[currLhsDim] == 1);
}
-
- while (currRhsDim < rhsShape.size()) {
- if (rhsShape[currRhsDim++] != 1) {
- return false;
- }
+ for (; currRhsDim < rhsShape.size(); currRhsDim++) {
+ assert(rhsShape[currRhsDim] == 1);
}
-
- return true;
+
+ return lhsType.clone(intermediateShape);
}
-static bool createReassociationMapsForCollapse(
- PatternRewriter &rewriter, ArrayRef<int64_t> srcShape,
- ArrayRef<int64_t> dstShape,
- SmallVector<ReassociationExprs, 4> &reassociationMap, bool isDynamic) {
+SmallVector<ReassociationExprs>
+createReassociationMapForCollapse(OpBuilder &builder, Type srcType, Type dstType) {
+ auto srcShape = cast<TensorType>(srcType).getShape();
+ auto dstShape = cast<TensorType>(dstType).getShape();
- // If the shape is dynamic, create a map for collapsing into one dimension.
- if (isDynamic) {
- SmallVector<AffineExpr, 2> exprs;
- for (int i = 0, s = srcShape.size(); i < s; ++i)
- exprs.push_back(rewriter.getAffineDimExpr(i));
- reassociationMap = {exprs};
- return true;
- }
+ if (srcShape.empty() || dstShape.empty())
+ return {};
- if (dstShape.empty()) {
- reassociationMap = {};
- return true;
+ if (ShapedType::isDynamicShape(srcShape) || ShapedType::isDynamicShape(dstShape)) {
+ assert(dstShape.size() == 1);
+ SmallVector<AffineExpr, 2> exprs;
+ for (auto i : llvm::seq<int64_t>(srcShape.size()))
+ exprs.push_back(builder.getAffineDimExpr(i));
+ return {exprs};
}
- reassociationMap.resize(dstShape.size());
+ SmallVector<ReassociationExprs> reassociationMap(dstShape.size());
unsigned currSrcDim = 0, currDstDim = 0;
while (currSrcDim < srcShape.size() && currDstDim < dstShape.size()) {
int64_t dstSize = dstShape[currDstDim];
int64_t srcSize = srcShape[currSrcDim];
while (srcSize < dstSize && currSrcDim < srcShape.size()) {
reassociationMap[currDstDim].push_back(
- rewriter.getAffineDimExpr(currSrcDim++));
+ builder.getAffineDimExpr(currSrcDim++));
srcSize *= srcShape[currSrcDim];
}
if (srcSize == dstSize) {
reassociationMap[currDstDim].push_back(
- rewriter.getAffineDimExpr(currSrcDim++));
+ builder.getAffineDimExpr(currSrcDim++));
// If the next dim in collapsedShape is not 1, treat subsequent dims in
// expandedShape which are 1 to be collapsed.
if (currDstDim == dstShape.size() - 1 || dstShape[currDstDim + 1] != 1) {
while (currSrcDim < srcShape.size() && srcShape[currSrcDim] == 1) {
reassociationMap[currDstDim].push_back(
- rewriter.getAffineDimExpr(currSrcDim++));
+ builder.getAffineDimExpr(currSrcDim++));
}
}
}
currDstDim++;
}
- // If both iterators didn't reach the end, we have leftover dimentions which
- // implies that we have a mismatch in shape.
- return currSrcDim == srcShape.size() && currDstDim == dstShape.size();
+ // If the source and target shapes are compatible, both iterators must have
+ // reached the end. This condition is guaranteed by the op verifier for
+ // static shapes.
+ assert(currSrcDim == srcShape.size() && currDstDim == dstShape.size());
+ return reassociationMap;
}
-namespace {
-Value createCollapse(ConversionPatternRewriter &rewriter, Location loc,
- ShapedType resultTy, Value operand) {
- ShapedType operandTy = cast<ShapedType>(operand.getType());
- if (resultTy == operandTy)
- return operand;
-
- bool isDynamic = !operandTy.hasStaticShape();
-
- if (isDynamic && resultTy.getRank() != 1) {
- (void)rewriter.notifyMatchFailure(
- loc, "Cannot collapse dynamic dims to more than one dimension");
- return {};
- }
-
- SmallVector<ReassociationExprs, 4> reassociationMap;
- if (!createReassociationMapsForCollapse(rewriter, operandTy.getShape(),
- resultTy.getShape(),
- reassociationMap, isDynamic)) {
- (void)rewriter.notifyMatchFailure(
- loc, "tosa.reshape Attempting to collapse into an incompatible shape");
- return {};
- }
-
- SmallVector<int64_t> intermediateShape;
- if (!findIntermediateShape(operandTy.getShape(), resultTy.getShape(),
- intermediateShape, isDynamic)) {
- (void)rewriter.notifyMatchFailure(
- loc, "tosa.reshape Cannot collapse into given shape");
- return {};
- }
- return rewriter.create<tensor::CollapseShapeOp>(loc, resultTy, operand,
- reassociationMap);
+// Create a tensor.collapse_shape op that reshapes the input into the given
+// result type.
+Value createCollapse(OpBuilder &builder, Location loc, TensorType resultType,
+ Value input) {
+ auto reassociationMap =
+ createReassociationMapForCollapse(builder, input.getType(), resultType);
+ return builder.createOrFold<tensor::CollapseShapeOp>(loc, resultType, input,
+ reassociationMap);
}
-Value createExpand(ConversionPatternRewriter &rewriter, Location loc,
- ShapedType resultTy, Value operand) {
- ShapedType operandTy = cast<ShapedType>(operand.getType());
- if (resultTy == operandTy)
- return operand;
-
- bool isDynamic = !operandTy.hasStaticShape();
-
- if (isDynamic && operandTy.getRank() != 1) {
- (void)rewriter.notifyMatchFailure(
- loc, "Cannot expand dynamic dims from more than one dimension");
- return {};
- }
-
- SmallVector<ReassociationExprs, 4> reassociationMap;
- if (!createReassociationMapsForCollapse(rewriter, resultTy.getShape(),
- operandTy.getShape(),
- reassociationMap, isDynamic)) {
- (void)rewriter.notifyMatchFailure(
- loc, "tosa.reshape Attempting to expand into an incompatible shape");
- return {};
- }
-
- SmallVector<int64_t> intermediateShape;
- if (!findIntermediateShape(operandTy.getShape(), resultTy.getShape(),
- intermediateShape, isDynamic) ||
- intermediateShape != operandTy.getShape()) {
- (void)rewriter.notifyMatchFailure(
- loc, "tosa.reshape Cannot expand into given shape");
- return {};
- }
- return rewriter.create<tensor::ExpandShapeOp>(loc, resultTy, operand,
- reassociationMap);
+// Create a tensor.expand_shape op that reshapes the input into the given result
+// type.
+Value createExpand(OpBuilder &builder, Location loc, TensorType resultType,
+ Value input) {
+ auto reassociationMap =
+ createReassociationMapForCollapse(builder, resultType, input.getType());
+ return builder.createOrFold<tensor::ExpandShapeOp>(loc, resultType, input,
+ reassociationMap);
}
-class ReshapeConverterCollapseExpand
- : public OpConversionPattern<tosa::ReshapeOp> {
+class ReshapeConverter : public OpConversionPattern<tosa::ReshapeOp> {
public:
using OpConversionPattern<tosa::ReshapeOp>::OpConversionPattern;
LogicalResult
matchAndRewrite(tosa::ReshapeOp reshape, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
- ShapedType operandTy = cast<ShapedType>(adaptor.getInput1().getType());
- ShapedType resultTy = cast<ShapedType>(reshape.getType());
- bool isDynamic = !operandTy.hasStaticShape();
-
- SmallVector<int64_t> intermediateShape;
- if (!findIntermediateShape(resultTy.getShape(), operandTy.getShape(),
- intermediateShape, isDynamic)) {
- return rewriter.notifyMatchFailure(
- reshape, "tosa.reshape Cannot identify an intermediate shape between "
- "the given two shapes");
- }
- auto intermediateTy = RankedTensorType::get(
- intermediateShape, reshape.getType().getElementType());
-
- Value collapse = createCollapse(rewriter, reshape.getLoc(), intermediateTy,
- adaptor.getInput1());
- if (!collapse)
- return failure();
-
- Value expand = createExpand(rewriter, reshape.getLoc(), resultTy, collapse);
- if (!expand)
- return failure();
-
- rewriter.replaceOp(reshape, expand);
+ auto loc = reshape.getLoc();
+ auto resultType = reshape.getResult().getType();
+ auto input = reshape.getInput1();
+ auto newShape = reshape.getNewShape();
+
+ // Infer all intermediate types
+ auto inputType = inferReshapeInputType(input, newShape);
+ auto expandedType = inferReshapeExpandedType(inputType, newShape);
+ auto collapsedType = inferReshapeCollapsedType(inputType, expandedType);
+
+ // Cast input if needed
+ auto castInput = rewriter.createOrFold<tensor::CastOp>(loc, inputType, input);
+
+ // Emit collaspe-expand pair
+ auto collapsed = createCollapse(rewriter, loc, collapsedType, castInput);
+ auto expanded = createExpand(rewriter, loc, expandedType, collapsed);
+
+ // Cast to final result type if needed
+ auto result = rewriter.createOrFold<tensor::CastOp>(loc, resultType, expanded);
+ rewriter.replaceOp(reshape, result);
return success();
}
};
@@ -416,8 +430,10 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
void mlir::tosa::populateTosaToTensorConversionPatterns(
RewritePatternSet *patterns) {
- patterns->add<SliceConverter, PadConverter, ConcatConverter>(
- patterns->getContext());
-
- patterns->add<ReshapeConverterCollapseExpand>(patterns->getContext());
+ patterns->add<
+ ConcatConverter,
+ PadConverter,
+ ReshapeConverter,
+ SliceConverter
+ >(patterns->getContext());
}
diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
index 3dc5539cde3d..8b8ed2578ca5 100644
--- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp
@@ -1792,8 +1792,7 @@ MemRefType mlir::affine::normalizeMemRefType(MemRefType memrefType) {
MLIRContext *context = memrefType.getContext();
for (unsigned d = 0; d < newRank; ++d) {
// Check if this dimension is dynamic.
- if (bool isDynDim =
- isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims)) {
+ if (isNormalizedMemRefDynamicDim(d, layoutMap, memrefTypeDynDims)) {
newShape[d] = ShapedType::kDynamic;
continue;
}
diff --git a/mlir/lib/Dialect/Arith/IR/ArithDialect.cpp b/mlir/lib/Dialect/Arith/IR/ArithDialect.cpp
index 6a593185cced..042acf610090 100644
--- a/mlir/lib/Dialect/Arith/IR/ArithDialect.cpp
+++ b/mlir/lib/Dialect/Arith/IR/ArithDialect.cpp
@@ -48,9 +48,9 @@ void arith::ArithDialect::initialize() {
#include "mlir/Dialect/Arith/IR/ArithOpsAttributes.cpp.inc"
>();
addInterfaces<ArithInlinerInterface>();
- declarePromisedInterface<ArithDialect, ConvertToLLVMPatternInterface>();
- declarePromisedInterface<SelectOp,
- bufferization::BufferDeallocationOpInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, ArithDialect>();
+ declarePromisedInterface<bufferization::BufferDeallocationOpInterface,
+ SelectOp>();
declarePromisedInterfaces<bufferization::BufferizableOpInterface, ConstantOp,
IndexCastOp, SelectOp>();
declarePromisedInterfaces<ValueBoundsOpInterface, AddIOp, ConstantOp, SubIOp,
diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
index 9f64a07f31e3..2f32d9a26e77 100644
--- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
+++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp
@@ -689,43 +689,17 @@ OpFoldResult arith::FloorDivSIOp::fold(FoldAdaptor adaptor) {
return getLhs();
// Don't fold if it would overflow or if it requires a division by zero.
- bool overflowOrDiv0 = false;
+ bool overflowOrDiv = false;
auto result = constFoldBinaryOp<IntegerAttr>(
adaptor.getOperands(), [&](APInt a, const APInt &b) {
- if (overflowOrDiv0 || !b) {
- overflowOrDiv0 = true;
+ if (b.isZero()) {
+ overflowOrDiv = true;
return a;
}
- if (!a)
- return a;
- // After this point we know that neither a or b are zero.
- unsigned bits = a.getBitWidth();
- APInt zero = APInt::getZero(bits);
- bool aGtZero = a.sgt(zero);
- bool bGtZero = b.sgt(zero);
- if (aGtZero && bGtZero) {
- // Both positive, return a / b.
- return a.sdiv_ov(b, overflowOrDiv0);
- }
- if (!aGtZero && !bGtZero) {
- // Both negative, return -a / -b.
- APInt posA = zero.ssub_ov(a, overflowOrDiv0);
- APInt posB = zero.ssub_ov(b, overflowOrDiv0);
- return posA.sdiv_ov(posB, overflowOrDiv0);
- }
- if (!aGtZero && bGtZero) {
- // A is negative, b is positive, return - ceil(-a, b).
- APInt posA = zero.ssub_ov(a, overflowOrDiv0);
- APInt ceil = signedCeilNonnegInputs(posA, b, overflowOrDiv0);
- return zero.ssub_ov(ceil, overflowOrDiv0);
- }
- // A is positive, b is negative, return - ceil(a, -b).
- APInt posB = zero.ssub_ov(b, overflowOrDiv0);
- APInt ceil = signedCeilNonnegInputs(a, posB, overflowOrDiv0);
- return zero.ssub_ov(ceil, overflowOrDiv0);
+ return a.sfloordiv_ov(b, overflowOrDiv);
});
- return overflowOrDiv0 ? Attribute() : result;
+ return overflowOrDiv ? Attribute() : result;
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.cpp
new file mode 100644
index 000000000000..9df9df86b64f
--- /dev/null
+++ b/mlir/lib/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.cpp
@@ -0,0 +1,44 @@
+//===- BufferViewFlowOpInterfaceImpl.cpp - Buffer View Flow Analysis ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Arith/Transforms/BufferViewFlowOpInterfaceImpl.h"
+
+#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h"
+
+using namespace mlir;
+using namespace mlir::bufferization;
+
+namespace mlir {
+namespace arith {
+namespace {
+
+struct SelectOpInterface
+ : public BufferViewFlowOpInterface::ExternalModel<SelectOpInterface,
+ SelectOp> {
+ void
+ populateDependencies(Operation *op,
+ RegisterDependenciesFn registerDependenciesFn) const {
+ auto selectOp = cast<SelectOp>(op);
+
+ // Either one of the true/false value may be selected at runtime.
+ registerDependenciesFn(selectOp.getTrueValue(), selectOp.getResult());
+ registerDependenciesFn(selectOp.getFalseValue(), selectOp.getResult());
+ }
+};
+
+} // namespace
+} // namespace arith
+} // namespace mlir
+
+void arith::registerBufferViewFlowOpInterfaceExternalModels(
+ DialectRegistry &registry) {
+ registry.addExtension(+[](MLIRContext *ctx, arith::ArithDialect *dialect) {
+ SelectOp::attachInterface<SelectOpInterface>(*ctx);
+ });
+}
diff --git a/mlir/lib/Dialect/Arith/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Arith/Transforms/CMakeLists.txt
index 02240601bcd3..12659eaba1fa 100644
--- a/mlir/lib/Dialect/Arith/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Arith/Transforms/CMakeLists.txt
@@ -2,6 +2,7 @@ add_mlir_dialect_library(MLIRArithTransforms
BufferDeallocationOpInterfaceImpl.cpp
BufferizableOpInterfaceImpl.cpp
Bufferize.cpp
+ BufferViewFlowOpInterfaceImpl.cpp
EmulateUnsupportedFloats.cpp
EmulateWideInt.cpp
EmulateNarrowType.cpp
diff --git a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
index 7f246daf99ff..71e14a153cfd 100644
--- a/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
+++ b/mlir/lib/Dialect/Arith/Transforms/ExpandOps.cpp
@@ -110,9 +110,13 @@ struct CeilDivSIOpConverter : public OpRewritePattern<arith::CeilDivSIOp> {
}
};
-/// Expands FloorDivSIOp (n, m) into
-/// 1) x = (m<0) ? 1 : -1
-/// 2) return (n*m<0) ? - ((-n+x) / m) -1 : n / m
+/// Expands FloorDivSIOp (x, y) into
+/// z = x / y
+/// if (z * y != x && (x < 0) != (y < 0)) {
+/// return z - 1;
+/// } else {
+/// return z;
+/// }
struct FloorDivSIOpConverter : public OpRewritePattern<arith::FloorDivSIOp> {
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(arith::FloorDivSIOp op,
@@ -121,41 +125,29 @@ struct FloorDivSIOpConverter : public OpRewritePattern<arith::FloorDivSIOp> {
Type type = op.getType();
Value a = op.getLhs();
Value b = op.getRhs();
- Value plusOne = createConst(loc, type, 1, rewriter);
+
+ Value quotient = rewriter.create<arith::DivSIOp>(loc, a, b);
+ Value product = rewriter.create<arith::MulIOp>(loc, quotient, b);
+ Value notEqualDivisor = rewriter.create<arith::CmpIOp>(
+ loc, arith::CmpIPredicate::ne, a, product);
Value zero = createConst(loc, type, 0, rewriter);
- Value minusOne = createConst(loc, type, -1, rewriter);
- // Compute x = (b<0) ? 1 : -1.
- Value compare =
- rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, b, zero);
- Value x = rewriter.create<arith::SelectOp>(loc, compare, plusOne, minusOne);
- // Compute negative res: -1 - ((x-a)/b).
- Value xMinusA = rewriter.create<arith::SubIOp>(loc, x, a);
- Value xMinusADivB = rewriter.create<arith::DivSIOp>(loc, xMinusA, b);
- Value negRes = rewriter.create<arith::SubIOp>(loc, minusOne, xMinusADivB);
- // Compute positive res: a/b.
- Value posRes = rewriter.create<arith::DivSIOp>(loc, a, b);
- // Result is (a*b<0) ? negative result : positive result.
- // Note, we want to avoid using a*b because of possible overflow.
- // The case that matters are a>0, a==0, a<0, b>0 and b<0. We do
- // not particuliarly care if a*b<0 is true or false when b is zero
- // as this will result in an illegal divide. So `a*b<0` can be reformulated
- // as `(a>0 && b<0) || (a>0 && b<0)' or `(a>0 && b<0) || (a>0 && b<=0)'.
- // We pick the first expression here.
+
Value aNeg =
rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, a, zero);
- Value aPos =
- rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt, a, zero);
Value bNeg =
rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, b, zero);
- Value bPos =
- rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::sgt, b, zero);
- Value firstTerm = rewriter.create<arith::AndIOp>(loc, aNeg, bPos);
- Value secondTerm = rewriter.create<arith::AndIOp>(loc, aPos, bNeg);
- Value compareRes =
- rewriter.create<arith::OrIOp>(loc, firstTerm, secondTerm);
- // Perform substitution and return success.
- rewriter.replaceOpWithNewOp<arith::SelectOp>(op, compareRes, negRes,
- posRes);
+
+ Value signOpposite = rewriter.create<arith::CmpIOp>(
+ loc, arith::CmpIPredicate::ne, aNeg, bNeg);
+ Value cond =
+ rewriter.create<arith::AndIOp>(loc, notEqualDivisor, signOpposite);
+
+ Value minusOne = createConst(loc, type, -1, rewriter);
+ Value quotientMinusOne =
+ rewriter.create<arith::AddIOp>(loc, quotient, minusOne);
+
+ rewriter.replaceOpWithNewOp<arith::SelectOp>(op, cond, quotientMinusOne,
+ quotient);
return success();
}
};
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp
new file mode 100644
index 000000000000..ea726a4bfc3f
--- /dev/null
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp
@@ -0,0 +1,18 @@
+//===- BufferViewFlowOpInterface.cpp - Buffer View Flow Analysis ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h"
+#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
+
+namespace mlir {
+namespace bufferization {
+
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp.inc"
+
+} // namespace bufferization
+} // namespace mlir
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 2b226c7a1207..a656c812a59f 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -333,6 +333,9 @@ struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
auto allocTensorOp = dimOp.getSource().getDefiningOp<AllocTensorOp>();
if (!allocTensorOp || !maybeConstantIndex)
return failure();
+ if (*maybeConstantIndex < 0 ||
+ *maybeConstantIndex >= allocTensorOp.getType().getRank())
+ return failure();
if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex))
return failure();
rewriter.replaceOp(
diff --git a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt
index 9895db9d93ce..63dcc1eb233e 100644
--- a/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Bufferization/IR/CMakeLists.txt
@@ -4,6 +4,7 @@ add_mlir_dialect_library(MLIRBufferizationDialect
BufferDeallocationOpInterface.cpp
BufferizationOps.cpp
BufferizationDialect.cpp
+ BufferViewFlowOpInterface.cpp
UnstructuredControlFlow.cpp
ADDITIONAL_HEADER_DIRS
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
index e30779868b47..954485cfede3 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/Analysis/AliasAnalysis.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
+#include "mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h"
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
@@ -34,6 +34,14 @@ using namespace mlir::bufferization;
// Helpers
//===----------------------------------------------------------------------===//
+/// Given a memref value, return the "base" value by skipping over all
+/// ViewLikeOpInterface ops (if any) in the reverse use-def chain.
+static Value getViewBase(Value value) {
+ while (auto viewLikeOp = value.getDefiningOp<ViewLikeOpInterface>())
+ value = viewLikeOp.getViewSource();
+ return value;
+}
+
static LogicalResult updateDeallocIfChanged(DeallocOp deallocOp,
ValueRange memrefs,
ValueRange conditions,
@@ -49,14 +57,6 @@ static LogicalResult updateDeallocIfChanged(DeallocOp deallocOp,
return success();
}
-/// Given a memref value, return the "base" value by skipping over all
-/// ViewLikeOpInterface ops (if any) in the reverse use-def chain.
-static Value getViewBase(Value value) {
- while (auto viewLikeOp = value.getDefiningOp<ViewLikeOpInterface>())
- value = viewLikeOp.getViewSource();
- return value;
-}
-
/// Return "true" if the given values are guaranteed to be different (and
/// non-aliasing) allocations based on the fact that one value is the result
/// of an allocation and the other value is a block argument of a parent block.
@@ -80,12 +80,14 @@ static bool distinctAllocAndBlockArgument(Value v1, Value v2) {
/// Checks if `memref` may potentially alias a MemRef in `otherList`. It is
/// often a requirement of optimization patterns that there cannot be any
/// aliasing memref in order to perform the desired simplification.
-static bool potentiallyAliasesMemref(AliasAnalysis &analysis,
+static bool potentiallyAliasesMemref(BufferOriginAnalysis &analysis,
ValueRange otherList, Value memref) {
for (auto other : otherList) {
if (distinctAllocAndBlockArgument(other, memref))
continue;
- if (!analysis.alias(other, memref).isNo())
+ std::optional<bool> analysisResult =
+ analysis.isSameAllocation(other, memref);
+ if (!analysisResult.has_value() || analysisResult == true)
return true;
}
return false;
@@ -129,8 +131,8 @@ namespace {
struct RemoveDeallocMemrefsContainedInRetained
: public OpRewritePattern<DeallocOp> {
RemoveDeallocMemrefsContainedInRetained(MLIRContext *context,
- AliasAnalysis &aliasAnalysis)
- : OpRewritePattern<DeallocOp>(context), aliasAnalysis(aliasAnalysis) {}
+ BufferOriginAnalysis &analysis)
+ : OpRewritePattern<DeallocOp>(context), analysis(analysis) {}
/// The passed 'memref' must not have a may-alias relation to any retained
/// memref, and at least one must-alias relation. If there is no must-aliasing
@@ -147,10 +149,11 @@ struct RemoveDeallocMemrefsContainedInRetained
// deallocated in some situations and can thus not be dropped).
bool atLeastOneMustAlias = false;
for (Value retained : deallocOp.getRetained()) {
- AliasResult analysisResult = aliasAnalysis.alias(retained, memref);
- if (analysisResult.isMay())
+ std::optional<bool> analysisResult =
+ analysis.isSameAllocation(retained, memref);
+ if (!analysisResult.has_value())
return failure();
- if (analysisResult.isMust() || analysisResult.isPartial())
+ if (analysisResult == true)
atLeastOneMustAlias = true;
}
if (!atLeastOneMustAlias)
@@ -161,8 +164,9 @@ struct RemoveDeallocMemrefsContainedInRetained
// we can remove that operand later on.
for (auto [i, retained] : llvm::enumerate(deallocOp.getRetained())) {
Value updatedCondition = deallocOp.getUpdatedConditions()[i];
- AliasResult analysisResult = aliasAnalysis.alias(retained, memref);
- if (analysisResult.isMust() || analysisResult.isPartial()) {
+ std::optional<bool> analysisResult =
+ analysis.isSameAllocation(retained, memref);
+ if (analysisResult == true) {
auto disjunction = rewriter.create<arith::OrIOp>(
deallocOp.getLoc(), updatedCondition, cond);
rewriter.replaceAllUsesExcept(updatedCondition, disjunction.getResult(),
@@ -206,7 +210,7 @@ struct RemoveDeallocMemrefsContainedInRetained
}
private:
- AliasAnalysis &aliasAnalysis;
+ BufferOriginAnalysis &analysis;
};
/// Remove memrefs from the `retained` list which are guaranteed to not alias
@@ -228,15 +232,15 @@ private:
struct RemoveRetainedMemrefsGuaranteedToNotAlias
: public OpRewritePattern<DeallocOp> {
RemoveRetainedMemrefsGuaranteedToNotAlias(MLIRContext *context,
- AliasAnalysis &aliasAnalysis)
- : OpRewritePattern<DeallocOp>(context), aliasAnalysis(aliasAnalysis) {}
+ BufferOriginAnalysis &analysis)
+ : OpRewritePattern<DeallocOp>(context), analysis(analysis) {}
LogicalResult matchAndRewrite(DeallocOp deallocOp,
PatternRewriter &rewriter) const override {
SmallVector<Value> newRetainedMemrefs, replacements;
for (auto retainedMemref : deallocOp.getRetained()) {
- if (potentiallyAliasesMemref(aliasAnalysis, deallocOp.getMemrefs(),
+ if (potentiallyAliasesMemref(analysis, deallocOp.getMemrefs(),
retainedMemref)) {
newRetainedMemrefs.push_back(retainedMemref);
replacements.push_back({});
@@ -264,7 +268,7 @@ struct RemoveRetainedMemrefsGuaranteedToNotAlias
}
private:
- AliasAnalysis &aliasAnalysis;
+ BufferOriginAnalysis &analysis;
};
/// Split off memrefs to separate dealloc operations to reduce the number of
@@ -297,8 +301,8 @@ private:
struct SplitDeallocWhenNotAliasingAnyOther
: public OpRewritePattern<DeallocOp> {
SplitDeallocWhenNotAliasingAnyOther(MLIRContext *context,
- AliasAnalysis &aliasAnalysis)
- : OpRewritePattern<DeallocOp>(context), aliasAnalysis(aliasAnalysis) {}
+ BufferOriginAnalysis &analysis)
+ : OpRewritePattern<DeallocOp>(context), analysis(analysis) {}
LogicalResult matchAndRewrite(DeallocOp deallocOp,
PatternRewriter &rewriter) const override {
@@ -314,7 +318,7 @@ struct SplitDeallocWhenNotAliasingAnyOther
SmallVector<Value> otherMemrefs(deallocOp.getMemrefs());
otherMemrefs.erase(otherMemrefs.begin() + i);
// Check if `memref` can split off into a separate bufferization.dealloc.
- if (potentiallyAliasesMemref(aliasAnalysis, otherMemrefs, memref)) {
+ if (potentiallyAliasesMemref(analysis, otherMemrefs, memref)) {
// `memref` alias with other memrefs, do not split off.
remainingMemrefs.push_back(memref);
remainingConditions.push_back(cond);
@@ -352,7 +356,7 @@ struct SplitDeallocWhenNotAliasingAnyOther
}
private:
- AliasAnalysis &aliasAnalysis;
+ BufferOriginAnalysis &analysis;
};
/// Check for every retained memref if a must-aliasing memref exists in the
@@ -381,8 +385,8 @@ private:
struct RetainedMemrefAliasingAlwaysDeallocatedMemref
: public OpRewritePattern<DeallocOp> {
RetainedMemrefAliasingAlwaysDeallocatedMemref(MLIRContext *context,
- AliasAnalysis &aliasAnalysis)
- : OpRewritePattern<DeallocOp>(context), aliasAnalysis(aliasAnalysis) {}
+ BufferOriginAnalysis &analysis)
+ : OpRewritePattern<DeallocOp>(context), analysis(analysis) {}
LogicalResult matchAndRewrite(DeallocOp deallocOp,
PatternRewriter &rewriter) const override {
@@ -396,8 +400,9 @@ struct RetainedMemrefAliasingAlwaysDeallocatedMemref
if (!matchPattern(cond, m_One()))
continue;
- AliasResult analysisResult = aliasAnalysis.alias(retained, memref);
- if (analysisResult.isMust() || analysisResult.isPartial()) {
+ std::optional<bool> analysisResult =
+ analysis.isSameAllocation(retained, memref);
+ if (analysisResult == true) {
rewriter.replaceAllUsesWith(res, cond);
aliasesWithConstTrueMemref[i] = true;
canDropMemref = true;
@@ -411,10 +416,9 @@ struct RetainedMemrefAliasingAlwaysDeallocatedMemref
if (!extractOp)
continue;
- AliasResult extractAnalysisResult =
- aliasAnalysis.alias(retained, extractOp.getOperand());
- if (extractAnalysisResult.isMust() ||
- extractAnalysisResult.isPartial()) {
+ std::optional<bool> extractAnalysisResult =
+ analysis.isSameAllocation(retained, extractOp.getOperand());
+ if (extractAnalysisResult == true) {
rewriter.replaceAllUsesWith(res, cond);
aliasesWithConstTrueMemref[i] = true;
canDropMemref = true;
@@ -434,7 +438,7 @@ struct RetainedMemrefAliasingAlwaysDeallocatedMemref
}
private:
- AliasAnalysis &aliasAnalysis;
+ BufferOriginAnalysis &analysis;
};
} // namespace
@@ -452,13 +456,13 @@ struct BufferDeallocationSimplificationPass
: public bufferization::impl::BufferDeallocationSimplificationBase<
BufferDeallocationSimplificationPass> {
void runOnOperation() override {
- AliasAnalysis &aliasAnalysis = getAnalysis<AliasAnalysis>();
+ BufferOriginAnalysis analysis(getOperation());
RewritePatternSet patterns(&getContext());
patterns.add<RemoveDeallocMemrefsContainedInRetained,
RemoveRetainedMemrefsGuaranteedToNotAlias,
SplitDeallocWhenNotAliasingAnyOther,
RetainedMemrefAliasingAlwaysDeallocatedMemref>(&getContext(),
- aliasAnalysis);
+ analysis);
populateDeallocOpCanonicalizationPatterns(patterns, &getContext());
if (failed(
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
index 88ef1b639fc5..72f47b8b468e 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
@@ -8,29 +8,34 @@
#include "mlir/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.h"
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h"
+#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
+#include "mlir/Interfaces/FunctionInterfaces.h"
#include "mlir/Interfaces/ViewLikeInterface.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
using namespace mlir;
+using namespace mlir::bufferization;
+
+//===----------------------------------------------------------------------===//
+// BufferViewFlowAnalysis
+//===----------------------------------------------------------------------===//
/// Constructs a new alias analysis using the op provided.
BufferViewFlowAnalysis::BufferViewFlowAnalysis(Operation *op) { build(op); }
-/// Find all immediate and indirect dependent buffers this value could
-/// potentially have. Note that the resulting set will also contain the value
-/// provided as it is a dependent alias of itself.
-BufferViewFlowAnalysis::ValueSetT
-BufferViewFlowAnalysis::resolve(Value rootValue) const {
- ValueSetT result;
+static BufferViewFlowAnalysis::ValueSetT
+resolveValues(const BufferViewFlowAnalysis::ValueMapT &map, Value value) {
+ BufferViewFlowAnalysis::ValueSetT result;
SmallVector<Value, 8> queue;
- queue.push_back(rootValue);
+ queue.push_back(value);
while (!queue.empty()) {
Value currentValue = queue.pop_back_val();
if (result.insert(currentValue).second) {
- auto it = dependencies.find(currentValue);
- if (it != dependencies.end()) {
+ auto it = map.find(currentValue);
+ if (it != map.end()) {
for (Value aliasValue : it->second)
queue.push_back(aliasValue);
}
@@ -39,6 +44,19 @@ BufferViewFlowAnalysis::resolve(Value rootValue) const {
return result;
}
+/// Find all immediate and indirect dependent buffers this value could
+/// potentially have. Note that the resulting set will also contain the value
+/// provided as it is a dependent alias of itself.
+BufferViewFlowAnalysis::ValueSetT
+BufferViewFlowAnalysis::resolve(Value rootValue) const {
+ return resolveValues(dependencies, rootValue);
+}
+
+BufferViewFlowAnalysis::ValueSetT
+BufferViewFlowAnalysis::resolveReverse(Value rootValue) const {
+ return resolveValues(reverseDependencies, rootValue);
+}
+
/// Removes the given values from all alias sets.
void BufferViewFlowAnalysis::remove(const SetVector<Value> &aliasValues) {
for (auto &entry : dependencies)
@@ -65,18 +83,46 @@ void BufferViewFlowAnalysis::rename(Value from, Value to) {
void BufferViewFlowAnalysis::build(Operation *op) {
// Registers all dependencies of the given values.
auto registerDependencies = [&](ValueRange values, ValueRange dependencies) {
- for (auto [value, dep] : llvm::zip(values, dependencies))
+ for (auto [value, dep] : llvm::zip_equal(values, dependencies)) {
this->dependencies[value].insert(dep);
+ this->reverseDependencies[dep].insert(value);
+ }
+ };
+
+ // Mark all buffer results and buffer region entry block arguments of the
+ // given op as terminals.
+ auto populateTerminalValues = [&](Operation *op) {
+ for (Value v : op->getResults())
+ if (isa<BaseMemRefType>(v.getType()))
+ this->terminals.insert(v);
+ for (Region &r : op->getRegions())
+ for (BlockArgument v : r.getArguments())
+ if (isa<BaseMemRefType>(v.getType()))
+ this->terminals.insert(v);
};
op->walk([&](Operation *op) {
- // TODO: We should have an op interface instead of a hard-coded list of
- // interfaces/ops.
+ // Query BufferViewFlowOpInterface. If the op does not implement that
+ // interface, try to infer the dependencies from other interfaces that the
+ // op may implement.
+ if (auto bufferViewFlowOp = dyn_cast<BufferViewFlowOpInterface>(op)) {
+ bufferViewFlowOp.populateDependencies(registerDependencies);
+ for (Value v : op->getResults())
+ if (isa<BaseMemRefType>(v.getType()) &&
+ bufferViewFlowOp.mayBeTerminalBuffer(v))
+ this->terminals.insert(v);
+ for (Region &r : op->getRegions())
+ for (BlockArgument v : r.getArguments())
+ if (isa<BaseMemRefType>(v.getType()) &&
+ bufferViewFlowOp.mayBeTerminalBuffer(v))
+ this->terminals.insert(v);
+ return WalkResult::advance();
+ }
// Add additional dependencies created by view changes to the alias list.
if (auto viewInterface = dyn_cast<ViewLikeOpInterface>(op)) {
- dependencies[viewInterface.getViewSource()].insert(
- viewInterface->getResult(0));
+ registerDependencies(viewInterface.getViewSource(),
+ viewInterface->getResult(0));
return WalkResult::advance();
}
@@ -131,16 +177,154 @@ void BufferViewFlowAnalysis::build(Operation *op) {
return WalkResult::advance();
}
- // Unknown op: Assume that all operands alias with all results.
- for (Value operand : op->getOperands()) {
- if (!isa<BaseMemRefType>(operand.getType()))
- continue;
- for (Value result : op->getResults()) {
- if (!isa<BaseMemRefType>(result.getType()))
- continue;
- registerDependencies({operand}, {result});
- }
+ // Region terminators are handled together with RegionBranchOpInterface.
+ if (isa<RegionBranchTerminatorOpInterface>(op))
+ return WalkResult::advance();
+
+ if (isa<CallOpInterface>(op)) {
+ // This is an intra-function analysis. We have no information about other
+ // functions. Conservatively assume that each operand may alias with each
+ // result. Also mark the results are terminals because the function could
+ // return newly allocated buffers.
+ populateTerminalValues(op);
+ for (Value operand : op->getOperands())
+ for (Value result : op->getResults())
+ registerDependencies({operand}, {result});
+ return WalkResult::advance();
}
+
+ // We have no information about unknown ops.
+ populateTerminalValues(op);
+
return WalkResult::advance();
});
}
+
+bool BufferViewFlowAnalysis::mayBeTerminalBuffer(Value value) const {
+ assert(isa<BaseMemRefType>(value.getType()) && "expected memref");
+ return terminals.contains(value);
+}
+
+//===----------------------------------------------------------------------===//
+// BufferOriginAnalysis
+//===----------------------------------------------------------------------===//
+
+/// Return "true" if the given value is the result of a memory allocation.
+static bool hasAllocateSideEffect(Value v) {
+ Operation *op = v.getDefiningOp();
+ if (!op)
+ return false;
+ return hasEffect<MemoryEffects::Allocate>(op, v);
+}
+
+/// Return "true" if the given value is a function block argument.
+static bool isFunctionArgument(Value v) {
+ auto bbArg = dyn_cast<BlockArgument>(v);
+ if (!bbArg)
+ return false;
+ Block *b = bbArg.getOwner();
+ auto funcOp = dyn_cast<FunctionOpInterface>(b->getParentOp());
+ if (!funcOp)
+ return false;
+ return bbArg.getOwner() == &funcOp.getFunctionBody().front();
+}
+
+/// Given a memref value, return the "base" value by skipping over all
+/// ViewLikeOpInterface ops (if any) in the reverse use-def chain.
+static Value getViewBase(Value value) {
+ while (auto viewLikeOp = value.getDefiningOp<ViewLikeOpInterface>())
+ value = viewLikeOp.getViewSource();
+ return value;
+}
+
+BufferOriginAnalysis::BufferOriginAnalysis(Operation *op) : analysis(op) {}
+
+std::optional<bool> BufferOriginAnalysis::isSameAllocation(Value v1, Value v2) {
+ assert(isa<BaseMemRefType>(v1.getType()) && "expected buffer");
+ assert(isa<BaseMemRefType>(v2.getType()) && "expected buffer");
+
+ // Skip over all view-like ops.
+ v1 = getViewBase(v1);
+ v2 = getViewBase(v2);
+
+ // Fast path: If both buffers are the same SSA value, we can be sure that
+ // they originate from the same allocation.
+ if (v1 == v2)
+ return true;
+
+ // Compute the SSA values from which the buffers `v1` and `v2` originate.
+ SmallPtrSet<Value, 16> origin1 = analysis.resolveReverse(v1);
+ SmallPtrSet<Value, 16> origin2 = analysis.resolveReverse(v2);
+
+ // Originating buffers are "terminal" if they could not be traced back any
+ // further by the `BufferViewFlowAnalysis`. Examples of terminal buffers:
+ // - function block arguments
+ // - values defined by allocation ops such as "memref.alloc"
+ // - values defined by ops that are unknown to the buffer view flow analysis
+ // - values that are marked as "terminal" in the `BufferViewFlowOpInterface`
+ SmallPtrSet<Value, 16> terminal1, terminal2;
+
+ // While gathering terminal buffers, keep track of whether all terminal
+ // buffers are newly allocated buffer or function entry arguments.
+ bool allAllocs1 = true, allAllocs2 = true;
+ bool allAllocsOrFuncEntryArgs1 = true, allAllocsOrFuncEntryArgs2 = true;
+
+ // Helper function that gathers terminal buffers among `origin`.
+ auto gatherTerminalBuffers = [this](const SmallPtrSet<Value, 16> &origin,
+ SmallPtrSet<Value, 16> &terminal,
+ bool &allAllocs,
+ bool &allAllocsOrFuncEntryArgs) {
+ for (Value v : origin) {
+ if (isa<BaseMemRefType>(v.getType()) && analysis.mayBeTerminalBuffer(v)) {
+ terminal.insert(v);
+ allAllocs &= hasAllocateSideEffect(v);
+ allAllocsOrFuncEntryArgs &=
+ isFunctionArgument(v) || hasAllocateSideEffect(v);
+ }
+ }
+ assert(!terminal.empty() && "expected non-empty terminal set");
+ };
+
+ // Gather terminal buffers for `v1` and `v2`.
+ gatherTerminalBuffers(origin1, terminal1, allAllocs1,
+ allAllocsOrFuncEntryArgs1);
+ gatherTerminalBuffers(origin2, terminal2, allAllocs2,
+ allAllocsOrFuncEntryArgs2);
+
+ // If both `v1` and `v2` have a single matching terminal buffer, they are
+ // guaranteed to originate from the same buffer allocation.
+ if (llvm::hasSingleElement(terminal1) && llvm::hasSingleElement(terminal2) &&
+ *terminal1.begin() == *terminal2.begin())
+ return true;
+
+ // At least one of the two values has multiple terminals.
+
+ // Check if there is overlap between the terminal buffers of `v1` and `v2`.
+ bool distinctTerminalSets = true;
+ for (Value v : terminal1)
+ distinctTerminalSets &= !terminal2.contains(v);
+ // If there is overlap between the terminal buffers of `v1` and `v2`, we
+ // cannot make an accurate decision without further analysis.
+ if (!distinctTerminalSets)
+ return std::nullopt;
+
+ // If `v1` originates from only allocs, and `v2` is guaranteed to originate
+ // from different allocations (that is guaranteed if `v2` originates from
+ // only distinct allocs or function entry arguments), we can be sure that
+ // `v1` and `v2` originate from different allocations. The same argument can
+ // be made when swapping `v1` and `v2`.
+ bool isolatedAlloc1 = allAllocs1 && (allAllocs2 || allAllocsOrFuncEntryArgs2);
+ bool isolatedAlloc2 = (allAllocs1 || allAllocsOrFuncEntryArgs1) && allAllocs2;
+ if (isolatedAlloc1 || isolatedAlloc2)
+ return false;
+
+ // Otherwise: We do not know whether `v1` and `v2` originate from the same
+ // allocation or not.
+ // TODO: Function arguments are currently handled conservatively. We assume
+ // that they could be the same allocation.
+ // TODO: Terminals other than allocations and function arguments are
+ // currently handled conservatively. We assume that they could be the same
+ // allocation. E.g., we currently return "nullopt" for values that originate
+ // from different "memref.get_global" ops (with different symbols).
+ return std::nullopt;
+}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
index 8dbf70162012..32f4e6a0fe89 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
@@ -182,6 +182,11 @@ parseHeuristicOption(const std::string &s) {
return OneShotBufferizationOptions::AnalysisHeuristic::BottomUp;
if (s == "top-down")
return OneShotBufferizationOptions::AnalysisHeuristic::TopDown;
+ if (s == "bottom-up-from-terminators")
+ return OneShotBufferizationOptions::AnalysisHeuristic::
+ BottomUpFromTerminators;
+ if (s == "fuzzer")
+ return OneShotBufferizationOptions::AnalysisHeuristic::Fuzzer;
llvm_unreachable("invalid analysisheuristic option");
}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
index fba9cd873063..531016130d1d 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
@@ -51,6 +51,7 @@
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Dominance.h"
+#include "mlir/IR/Iterators.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
@@ -1094,41 +1095,104 @@ static void equivalenceAnalysis(Operation *op, OneShotAnalysisState &state) {
equivalenceAnalysis(ops, state);
}
-LogicalResult OneShotAnalysisState::analyzeOp(Operation *op,
- const DominanceInfo &domInfo) {
- // Collect ops so we can build our own reverse traversal.
- SmallVector<Operation *> ops;
- op->walk([&](Operation *op) {
- // No tensors => no buffers.
- if (!hasTensorSemantics(op))
+/// "Bottom-up from terminators" heuristic.
+static SmallVector<Operation *>
+bottomUpFromTerminatorsHeuristic(Operation *op,
+ const OneShotAnalysisState &state) {
+ SetVector<Operation *> traversedOps;
+
+ // Find region terminators.
+ op->walk<WalkOrder::PostOrder>([&](RegionBranchTerminatorOpInterface term) {
+ if (!traversedOps.insert(term))
return;
- ops.push_back(op);
+ // Follow the reverse SSA use-def chain from each yielded value as long as
+ // we stay within the same region.
+ SmallVector<OpResult> worklist;
+ for (Value v : term->getOperands()) {
+ if (!isa<TensorType>(v.getType()))
+ continue;
+ auto opResult = dyn_cast<OpResult>(v);
+ if (!opResult)
+ continue;
+ worklist.push_back(opResult);
+ }
+ while (!worklist.empty()) {
+ OpResult opResult = worklist.pop_back_val();
+ Operation *defOp = opResult.getDefiningOp();
+ if (!traversedOps.insert(defOp))
+ continue;
+ if (!term->getParentRegion()->findAncestorOpInRegion(*defOp))
+ continue;
+ AliasingOpOperandList aliases = state.getAliasingOpOperands(opResult);
+ for (auto alias : aliases) {
+ Value v = alias.opOperand->get();
+ if (!isa<TensorType>(v.getType()))
+ continue;
+ auto opResult = dyn_cast<OpResult>(v);
+ if (!opResult)
+ continue;
+ worklist.push_back(opResult);
+ }
+ }
});
- if (getOptions().analysisFuzzerSeed) {
- // This is a fuzzer. For testing purposes only. Randomize the order in which
- // operations are analyzed. The bufferization quality is likely worse, but
- // we want to make sure that no assertions are triggered anywhere.
- std::mt19937 g(getOptions().analysisFuzzerSeed);
- llvm::shuffle(ops.begin(), ops.end(), g);
- }
+ // Analyze traversed ops, then all remaining ops.
+ SmallVector<Operation *> result(traversedOps.begin(), traversedOps.end());
+ op->walk<WalkOrder::PostOrder, ReverseIterator>([&](Operation *op) {
+ if (!traversedOps.contains(op) && hasTensorSemantics(op))
+ result.push_back(op);
+ });
+ return result;
+}
+LogicalResult OneShotAnalysisState::analyzeOp(Operation *op,
+ const DominanceInfo &domInfo) {
OneShotBufferizationOptions::AnalysisHeuristic heuristic =
getOptions().analysisHeuristic;
- if (heuristic == OneShotBufferizationOptions::AnalysisHeuristic::BottomUp) {
- // Default: Walk ops in reverse for better interference analysis.
- for (Operation *op : reverse(ops))
- if (failed(analyzeSingleOp(op, domInfo)))
- return failure();
- } else if (heuristic ==
- OneShotBufferizationOptions::AnalysisHeuristic::TopDown) {
- for (Operation *op : ops)
- if (failed(analyzeSingleOp(op, domInfo)))
- return failure();
+
+ SmallVector<Operation *> orderedOps;
+ if (heuristic ==
+ OneShotBufferizationOptions::AnalysisHeuristic::BottomUpFromTerminators) {
+ orderedOps = bottomUpFromTerminatorsHeuristic(op, *this);
} else {
- llvm_unreachable("unsupported heuristic");
+ op->walk([&](Operation *op) {
+ // No tensors => no buffers.
+ if (!hasTensorSemantics(op))
+ return;
+ orderedOps.push_back(op);
+ });
+ switch (heuristic) {
+ case OneShotBufferizationOptions::AnalysisHeuristic::BottomUp: {
+ // Default: Walk ops in reverse for better interference analysis.
+ std::reverse(orderedOps.begin(), orderedOps.end());
+ break;
+ }
+ case OneShotBufferizationOptions::AnalysisHeuristic::TopDown: {
+ // Ops are already sorted top-down in `orderedOps`.
+ break;
+ }
+ case OneShotBufferizationOptions::AnalysisHeuristic::Fuzzer: {
+ assert(getOptions().analysisFuzzerSeed &&
+ "expected that fuzzer seed it set");
+ // This is a fuzzer. For testing purposes only. Randomize the order in
+ // which operations are analyzed. The bufferization quality is likely
+ // worse, but we want to make sure that no assertions are triggered
+ // anywhere.
+ std::mt19937 g(getOptions().analysisFuzzerSeed);
+ llvm::shuffle(orderedOps.begin(), orderedOps.end(), g);
+ break;
+ }
+ default: {
+ llvm_unreachable("unsupported heuristic");
+ }
+ }
}
+ // Analyze ops in the computed order.
+ for (Operation *op : orderedOps)
+ if (failed(analyzeSingleOp(op, domInfo)))
+ return failure();
+
equivalenceAnalysis(op, *this);
return success();
}
diff --git a/mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp b/mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp
index ca57171af156..0bdcf434e062 100644
--- a/mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp
+++ b/mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp
@@ -40,7 +40,7 @@ void complex::ComplexDialect::initialize() {
#define GET_ATTRDEF_LIST
#include "mlir/Dialect/Complex/IR/ComplexAttributes.cpp.inc"
>();
- declarePromisedInterface<ComplexDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, ComplexDialect>();
addInterfaces<ComplexInlinerInterface>();
}
diff --git a/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp b/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp
index 5529dccaf1de..1c81433bc3e9 100644
--- a/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp
+++ b/mlir/lib/Dialect/Complex/IR/ComplexOps.cpp
@@ -370,6 +370,32 @@ OpFoldResult MulOp::fold(FoldAdaptor adaptor) {
}
//===----------------------------------------------------------------------===//
+// DivOp
+//===----------------------------------------------------------------------===//
+
+OpFoldResult DivOp::fold(FoldAdaptor adaptor) {
+ auto rhs = adaptor.getRhs();
+ if (!rhs)
+ return {};
+
+ ArrayAttr arrayAttr = rhs.dyn_cast<ArrayAttr>();
+ if (!arrayAttr || arrayAttr.size() != 2)
+ return {};
+
+ APFloat real = cast<FloatAttr>(arrayAttr[0]).getValue();
+ APFloat imag = cast<FloatAttr>(arrayAttr[1]).getValue();
+
+ if (!imag.isZero())
+ return {};
+
+ // complex.div(a, complex.constant<1.0, 0.0>) -> a
+ if (real == APFloat(real.getSemantics(), 1))
+ return getLhs();
+
+ return {};
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
index c6b02b9703e7..5d11f8f6cc45 100644
--- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
+++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp
@@ -70,11 +70,11 @@ void ControlFlowDialect::initialize() {
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.cpp.inc"
>();
addInterfaces<ControlFlowInlinerInterface>();
- declarePromisedInterface<ControlFlowDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, ControlFlowDialect>();
declarePromisedInterfaces<bufferization::BufferizableOpInterface, BranchOp,
CondBranchOp>();
- declarePromisedInterface<CondBranchOp,
- bufferization::BufferDeallocationOpInterface>();
+ declarePromisedInterface<bufferization::BufferDeallocationOpInterface,
+ CondBranchOp>();
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/EmitC/Transforms/FormExpressions.cpp b/mlir/lib/Dialect/EmitC/Transforms/FormExpressions.cpp
index 5b03f81b305f..e7c431f39e3f 100644
--- a/mlir/lib/Dialect/EmitC/Transforms/FormExpressions.cpp
+++ b/mlir/lib/Dialect/EmitC/Transforms/FormExpressions.cpp
@@ -36,7 +36,8 @@ struct FormExpressionsPass
// Wrap each C operator op with an expression op.
OpBuilder builder(context);
auto matchFun = [&](Operation *op) {
- if (op->hasTrait<OpTrait::emitc::CExpression>())
+ if (op->hasTrait<OpTrait::emitc::CExpression>() &&
+ !op->getParentOfType<emitc::ExpressionOp>())
createExpression(op, builder);
};
rootOp->walk(matchFun);
diff --git a/mlir/lib/Dialect/Func/IR/FuncOps.cpp b/mlir/lib/Dialect/Func/IR/FuncOps.cpp
index ed2ecfe9d0fb..95589e8989e2 100644
--- a/mlir/lib/Dialect/Func/IR/FuncOps.cpp
+++ b/mlir/lib/Dialect/Func/IR/FuncOps.cpp
@@ -42,8 +42,8 @@ void FuncDialect::initialize() {
#define GET_OP_LIST
#include "mlir/Dialect/Func/IR/FuncOps.cpp.inc"
>();
- declarePromisedInterface<FuncDialect, DialectInlinerInterface>();
- declarePromisedInterface<FuncDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<DialectInlinerInterface, FuncDialect>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, FuncDialect>();
declarePromisedInterfaces<bufferization::BufferizableOpInterface, CallOp,
FuncOp, ReturnOp>();
}
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index a02eca8b1179..f1b9ca5c5002 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -216,8 +216,8 @@ void GPUDialect::initialize() {
#include "mlir/Dialect/GPU/IR/GPUOpsAttributes.cpp.inc"
>();
addInterfaces<GPUInlinerInterface>();
- declarePromisedInterface<TerminatorOp,
- bufferization::BufferDeallocationOpInterface>();
+ declarePromisedInterface<bufferization::BufferDeallocationOpInterface,
+ TerminatorOp>();
}
static std::string getSparseHandleKeyword(SparseHandleKind kind) {
diff --git a/mlir/lib/Dialect/Index/IR/IndexDialect.cpp b/mlir/lib/Dialect/Index/IR/IndexDialect.cpp
index d631afa63b9a..183d0e33b252 100644
--- a/mlir/lib/Dialect/Index/IR/IndexDialect.cpp
+++ b/mlir/lib/Dialect/Index/IR/IndexDialect.cpp
@@ -19,7 +19,7 @@ using namespace mlir::index;
void IndexDialect::initialize() {
registerAttributes();
registerOperations();
- declarePromisedInterface<IndexDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, IndexDialect>();
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
index 00b4559658fd..f171bf7cc4be 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
@@ -13,10 +13,8 @@
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
-#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
-#include "mlir/IR/ValueRange.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Interfaces/MemorySlotInterfaces.h"
#include "llvm/ADT/STLExtras.h"
@@ -71,12 +69,8 @@ SmallVector<DestructurableMemorySlot> LLVM::AllocaOp::getDestructurableSlots() {
if (!destructuredType)
return {};
- DenseMap<Attribute, Type> allocaTypeMap;
- for (Attribute index : llvm::make_first_range(destructuredType.value()))
- allocaTypeMap.insert({index, LLVM::LLVMPointerType::get(getContext())});
-
- return {
- DestructurableMemorySlot{{getResult(), getElemType()}, {allocaTypeMap}}};
+ return {DestructurableMemorySlot{{getResult(), getElemType()},
+ *destructuredType}};
}
DenseMap<Attribute, MemorySlot>
@@ -182,17 +176,111 @@ DeletionKind LLVM::StoreOp::removeBlockingUses(
return DeletionKind::Delete;
}
+/// Checks if `slot` can be accessed through the provided access type.
+static bool isValidAccessType(const MemorySlot &slot, Type accessType,
+ const DataLayout &dataLayout) {
+ return dataLayout.getTypeSize(accessType) <=
+ dataLayout.getTypeSize(slot.elemType);
+}
+
LogicalResult LLVM::LoadOp::ensureOnlySafeAccesses(
const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
const DataLayout &dataLayout) {
- return success(getAddr() != slot.ptr || getType() == slot.elemType);
+ return success(getAddr() != slot.ptr ||
+ isValidAccessType(slot, getType(), dataLayout));
}
LogicalResult LLVM::StoreOp::ensureOnlySafeAccesses(
const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
const DataLayout &dataLayout) {
return success(getAddr() != slot.ptr ||
- getValue().getType() == slot.elemType);
+ isValidAccessType(slot, getValue().getType(), dataLayout));
+}
+
+/// Returns the subslot's type at the requested index.
+static Type getTypeAtIndex(const DestructurableMemorySlot &slot,
+ Attribute index) {
+ auto subelementIndexMap =
+ slot.elemType.cast<DestructurableTypeInterface>().getSubelementIndexMap();
+ if (!subelementIndexMap)
+ return {};
+ assert(!subelementIndexMap->empty());
+
+ // Note: Returns a null-type when no entry was found.
+ return subelementIndexMap->lookup(index);
+}
+
+bool LLVM::LoadOp::canRewire(const DestructurableMemorySlot &slot,
+ SmallPtrSetImpl<Attribute> &usedIndices,
+ SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
+ const DataLayout &dataLayout) {
+ if (getVolatile_())
+ return false;
+
+ // A load always accesses the first element of the destructured slot.
+ auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
+ Type subslotType = getTypeAtIndex(slot, index);
+ if (!subslotType)
+ return false;
+
+ // The access can only be replaced when the subslot is read within its bounds.
+ if (dataLayout.getTypeSize(getType()) > dataLayout.getTypeSize(subslotType))
+ return false;
+
+ usedIndices.insert(index);
+ return true;
+}
+
+DeletionKind LLVM::LoadOp::rewire(const DestructurableMemorySlot &slot,
+ DenseMap<Attribute, MemorySlot> &subslots,
+ RewriterBase &rewriter,
+ const DataLayout &dataLayout) {
+ auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
+ auto it = subslots.find(index);
+ assert(it != subslots.end());
+
+ rewriter.modifyOpInPlace(
+ *this, [&]() { getAddrMutable().set(it->getSecond().ptr); });
+ return DeletionKind::Keep;
+}
+
+bool LLVM::StoreOp::canRewire(const DestructurableMemorySlot &slot,
+ SmallPtrSetImpl<Attribute> &usedIndices,
+ SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
+ const DataLayout &dataLayout) {
+ if (getVolatile_())
+ return false;
+
+ // Storing the pointer to memory cannot be dealt with.
+ if (getValue() == slot.ptr)
+ return false;
+
+ // A store always accesses the first element of the destructured slot.
+ auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
+ Type subslotType = getTypeAtIndex(slot, index);
+ if (!subslotType)
+ return false;
+
+ // The access can only be replaced when the subslot is read within its bounds.
+ if (dataLayout.getTypeSize(getValue().getType()) >
+ dataLayout.getTypeSize(subslotType))
+ return false;
+
+ usedIndices.insert(index);
+ return true;
+}
+
+DeletionKind LLVM::StoreOp::rewire(const DestructurableMemorySlot &slot,
+ DenseMap<Attribute, MemorySlot> &subslots,
+ RewriterBase &rewriter,
+ const DataLayout &dataLayout) {
+ auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
+ auto it = subslots.find(index);
+ assert(it != subslots.end());
+
+ rewriter.modifyOpInPlace(
+ *this, [&]() { getAddrMutable().set(it->getSecond().ptr); });
+ return DeletionKind::Keep;
}
//===----------------------------------------------------------------------===//
@@ -390,10 +478,8 @@ bool LLVM::GEPOp::canRewire(const DestructurableMemorySlot &slot,
auto firstLevelIndex = dyn_cast<IntegerAttr>(getIndices()[1]);
if (!firstLevelIndex)
return false;
- assert(slot.elementPtrs.contains(firstLevelIndex));
- if (!llvm::isa<LLVM::LLVMPointerType>(slot.elementPtrs.at(firstLevelIndex)))
- return false;
mustBeSafelyUsed.emplace_back<MemorySlot>({getResult(), reachedType});
+ assert(slot.elementPtrs.contains(firstLevelIndex));
usedIndices.insert(firstLevelIndex);
return true;
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index 443e245887ea..630187f220a4 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -287,15 +287,22 @@ getPointerDataLayoutEntry(DataLayoutEntryListRef params, LLVMPointerType type,
}
}
if (currentEntry) {
- return *extractPointerSpecValue(currentEntry, pos) /
- (pos == PtrDLEntryPos::Size ? 1 : kBitsInByte);
+ std::optional<uint64_t> value = extractPointerSpecValue(currentEntry, pos);
+ // If the optional `PtrDLEntryPos::Index` entry is not available, use the
+ // pointer size as the index bitwidth.
+ if (!value && pos == PtrDLEntryPos::Index)
+ value = extractPointerSpecValue(currentEntry, PtrDLEntryPos::Size);
+ bool isSizeOrIndex =
+ pos == PtrDLEntryPos::Size || pos == PtrDLEntryPos::Index;
+ return *value / (isSizeOrIndex ? 1 : kBitsInByte);
}
// If not found, and this is the pointer to the default memory space, assume
// 64-bit pointers.
if (type.getAddressSpace() == 0) {
- return pos == PtrDLEntryPos::Size ? kDefaultPointerSizeBits
- : kDefaultPointerAlignment;
+ bool isSizeOrIndex =
+ pos == PtrDLEntryPos::Size || pos == PtrDLEntryPos::Index;
+ return isSizeOrIndex ? kDefaultPointerSizeBits : kDefaultPointerAlignment;
}
return std::nullopt;
@@ -332,6 +339,16 @@ LLVMPointerType::getPreferredAlignment(const DataLayout &dataLayout,
return dataLayout.getTypePreferredAlignment(get(getContext()));
}
+std::optional<uint64_t>
+LLVMPointerType::getIndexBitwidth(const DataLayout &dataLayout,
+ DataLayoutEntryListRef params) const {
+ if (std::optional<uint64_t> indexBitwidth =
+ getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Index))
+ return *indexBitwidth;
+
+ return dataLayout.getTypeIndexBitwidth(get(getContext()));
+}
+
bool LLVMPointerType::areCompatible(DataLayoutEntryListRef oldLayout,
DataLayoutEntryListRef newLayout) const {
for (DataLayoutEntryInterface newEntry : newLayout) {
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 9e8407451a08..94197e473ce0 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -1044,8 +1044,8 @@ void NVVMDialect::initialize() {
// Support unknown operations because not all NVVM operations are
// registered.
allowUnknownOperations();
- declarePromisedInterface<NVVMDialect, ConvertToLLVMPatternInterface>();
- declarePromisedInterface<NVVMTargetAttr, gpu::TargetAttrInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, NVVMDialect>();
+ declarePromisedInterface<gpu::TargetAttrInterface, NVVMTargetAttr>();
}
LogicalResult NVVMDialect::verifyOperationAttribute(Operation *op,
diff --git a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
index 0f2e75cd7e8b..65b770ae3261 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/ROCDLDialect.cpp
@@ -247,7 +247,7 @@ void ROCDLDialect::initialize() {
// Support unknown operations because not all ROCDL operations are registered.
allowUnknownOperations();
- declarePromisedInterface<ROCDLTargetAttr, gpu::TargetAttrInterface>();
+ declarePromisedInterface<gpu::TargetAttrInterface, ROCDLTargetAttr>();
}
LogicalResult ROCDLDialect::verifyOperationAttribute(Operation *op,
diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp
index b25c831bc717..3d700fe94e3b 100644
--- a/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp
+++ b/mlir/lib/Dialect/LLVMIR/Transforms/TypeConsistency.cpp
@@ -50,104 +50,6 @@ static bool areBitcastCompatible(DataLayout &layout, Type lhs, Type rhs) {
}
//===----------------------------------------------------------------------===//
-// AddFieldGetterToStructDirectUse
-//===----------------------------------------------------------------------===//
-
-/// Gets the type of the first subelement of `type` if `type` is destructurable,
-/// nullptr otherwise.
-static Type getFirstSubelementType(Type type) {
- auto destructurable = dyn_cast<DestructurableTypeInterface>(type);
- if (!destructurable)
- return nullptr;
-
- Type subelementType = destructurable.getTypeAtIndex(
- IntegerAttr::get(IntegerType::get(type.getContext(), 32), 0));
- if (subelementType)
- return subelementType;
-
- return nullptr;
-}
-
-/// Extracts a pointer to the first field of an `elemType` from the address
-/// pointer of the provided MemOp, and rewires the MemOp so it uses that pointer
-/// instead.
-template <class MemOp>
-static void insertFieldIndirection(MemOp op, PatternRewriter &rewriter,
- Type elemType) {
- PatternRewriter::InsertionGuard guard(rewriter);
-
- rewriter.setInsertionPointAfterValue(op.getAddr());
- SmallVector<GEPArg> firstTypeIndices{0, 0};
-
- Value properPtr = rewriter.create<GEPOp>(
- op->getLoc(), LLVM::LLVMPointerType::get(op.getContext()), elemType,
- op.getAddr(), firstTypeIndices);
-
- rewriter.modifyOpInPlace(op,
- [&]() { op.getAddrMutable().assign(properPtr); });
-}
-
-template <>
-LogicalResult AddFieldGetterToStructDirectUse<LoadOp>::matchAndRewrite(
- LoadOp load, PatternRewriter &rewriter) const {
- PatternRewriter::InsertionGuard guard(rewriter);
-
- Type inconsistentElementType =
- isElementTypeInconsistent(load.getAddr(), load.getType());
- if (!inconsistentElementType)
- return failure();
- Type firstType = getFirstSubelementType(inconsistentElementType);
- if (!firstType)
- return failure();
- DataLayout layout = DataLayout::closest(load);
- if (!areBitcastCompatible(layout, firstType, load.getResult().getType()))
- return failure();
-
- insertFieldIndirection<LoadOp>(load, rewriter, inconsistentElementType);
-
- // If the load does not use the first type but a type that can be casted from
- // it, add a bitcast and change the load type.
- if (firstType != load.getResult().getType()) {
- rewriter.setInsertionPointAfterValue(load.getResult());
- BitcastOp bitcast = rewriter.create<BitcastOp>(
- load->getLoc(), load.getResult().getType(), load.getResult());
- rewriter.modifyOpInPlace(load,
- [&]() { load.getResult().setType(firstType); });
- rewriter.replaceAllUsesExcept(load.getResult(), bitcast.getResult(),
- bitcast);
- }
-
- return success();
-}
-
-template <>
-LogicalResult AddFieldGetterToStructDirectUse<StoreOp>::matchAndRewrite(
- StoreOp store, PatternRewriter &rewriter) const {
- PatternRewriter::InsertionGuard guard(rewriter);
-
- Type inconsistentElementType =
- isElementTypeInconsistent(store.getAddr(), store.getValue().getType());
- if (!inconsistentElementType)
- return failure();
- Type firstType = getFirstSubelementType(inconsistentElementType);
- if (!firstType)
- return failure();
-
- DataLayout layout = DataLayout::closest(store);
- // Check that the first field has the right type or can at least be bitcast
- // to the right type.
- if (!areBitcastCompatible(layout, firstType, store.getValue().getType()))
- return failure();
-
- insertFieldIndirection<StoreOp>(store, rewriter, inconsistentElementType);
-
- rewriter.modifyOpInPlace(
- store, [&]() { store.getValueMutable().assign(store.getValue()); });
-
- return success();
-}
-
-//===----------------------------------------------------------------------===//
// CanonicalizeAlignedGep
//===----------------------------------------------------------------------===//
@@ -684,9 +586,6 @@ struct LLVMTypeConsistencyPass
: public LLVM::impl::LLVMTypeConsistencyBase<LLVMTypeConsistencyPass> {
void runOnOperation() override {
RewritePatternSet rewritePatterns(&getContext());
- rewritePatterns.add<AddFieldGetterToStructDirectUse<LoadOp>>(&getContext());
- rewritePatterns.add<AddFieldGetterToStructDirectUse<StoreOp>>(
- &getContext());
rewritePatterns.add<CanonicalizeAlignedGep>(&getContext());
rewritePatterns.add<SplitStores>(&getContext(), maxVectorSplitSize);
rewritePatterns.add<BitcastStores>(&getContext());
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
index a6936fde4370..9e50c355c504 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp
@@ -123,16 +123,16 @@ void mlir::linalg::LinalgDialect::initialize() {
addInterfaces<LinalgInlinerInterface>();
- declarePromisedInterface<GenericOp, mesh::ShardingInterface>();
+ declarePromisedInterface<mesh::ShardingInterface, GenericOp>();
declarePromisedInterfaces<mesh::ShardingInterface,
#define GET_OP_LIST
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
>();
- declarePromisedInterface<CopyOp, SubsetOpInterface>();
- declarePromisedInterface<CopyOp, SubsetInsertionOpInterface>();
- declarePromisedInterface<IndexOp, ValueBoundsOpInterface>();
- declarePromisedInterface<linalg::GenericOp, TilingInterface>();
- declarePromisedInterface<linalg::GenericOp, PartialReductionOpInterface>();
+ declarePromisedInterface<SubsetOpInterface, CopyOp>();
+ declarePromisedInterface<SubsetInsertionOpInterface, CopyOp>();
+ declarePromisedInterface<ValueBoundsOpInterface, IndexOp>();
+ declarePromisedInterface<TilingInterface, linalg::GenericOp>();
+ declarePromisedInterface<PartialReductionOpInterface, linalg::GenericOp>();
declarePromisedInterfaces<TilingInterface,
#define GET_OP_LIST
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp
index ae2a34bcf3e5..3e85559e1ec0 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgMatchOps.cpp
@@ -12,8 +12,8 @@
#include "mlir/Dialect/Linalg/IR/LinalgInterfaces.h"
#include "mlir/Dialect/Linalg/TransformOps/Syntax.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformTypes.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Interfaces/FunctionImplementation.h"
#include "llvm/Support/Debug.h"
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index d82a6beb1086..88819cd96435 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -3269,17 +3269,24 @@ DiagnosedSilenceableFailure transform::FlattenElementwiseLinalgOp::applyToOne(
transform::ApplyToEachResultList &results,
transform::TransformState &state) {
rewriter.setInsertionPoint(target);
- if (target.getNumLoops() <= 1)
+ if (!isElementwise(target))
+ return mlir::emitSilenceableFailure(target->getLoc())
+ << "only elementwise flattening is supported";
+
+ // If rank <= 1, do nothing
+ if (target.getNumLoops() <= 1) {
+ results.push_back(target);
return DiagnosedSilenceableFailure::success();
+ }
+
+ // Attempt to flatten all dims to one.
ReassociationIndices reassociation(target.getNumLoops());
std::iota(reassociation.begin(), reassociation.end(), 0);
auto maybeFlattened =
- (isElementwise(target))
- ? collapseOpIterationDims(target, reassociation, rewriter)
- : FailureOr<CollapseResult>(rewriter.notifyMatchFailure(
- target, "only elementwise flattening is supported"));
+ collapseOpIterationDims(target, reassociation, rewriter);
if (failed(maybeFlattened))
- return emitDefaultSilenceableFailure(target);
+ return mlir::emitSilenceableFailure(target->getLoc())
+ << "attempted to flatten, but failed";
results.push_back(maybeFlattened->collapsedOp);
rewriter.replaceOp(target, maybeFlattened->results);
return DiagnosedSilenceableFailure::success();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
index 5ceb85e7d990..7fd88dec71d4 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp
@@ -17,6 +17,7 @@
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/IR/Dominance.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Debug.h"
#include <optional>
@@ -552,6 +553,305 @@ private:
ControlPropagationFn controlFn;
};
+/// Project dimsPos to the inner-most non-unit dim pos with reassocIndices.
+///
+/// For example, given dimsPos [0, 2], reassocIndices [[0, 1], [2, 3]], and
+/// targetShape [16, 16, 32, 1], it returns [1, 2]. Because for pos 0, the
+/// inner-most projected dim in pos [0, 1] is 1. And for pos 2, the inner-most
+/// non-unit projected dims in pos [2, 3] is 2.
+///
+/// If all candidates in a reassociation are unit dims, it chooses the
+/// inner-most dim pos.
+static SmallVector<int64_t>
+projectToInnerMostNonUnitDimsPos(ArrayRef<int64_t> dimsPos,
+ ArrayRef<ReassociationIndices> reassocIndices,
+ ArrayRef<int64_t> targetShape) {
+ SmallVector<int64_t> projectedDimsPos;
+ for (auto pos : dimsPos) {
+ // In the case all dims are unit, this will return the inner-most one.
+ int64_t projectedPos = reassocIndices[pos].back();
+ for (auto i : llvm::reverse(reassocIndices[pos])) {
+ int64_t dim = targetShape[i];
+ if (dim > 1 || ShapedType::isDynamic(dim)) {
+ projectedPos = i;
+ break;
+ }
+ }
+ projectedDimsPos.push_back(projectedPos);
+ }
+ return projectedDimsPos;
+}
+
+/// Check if all dims in dimsPos are divisible by the corresponding tile sizes.
+static bool isDimsDivisibleByTileSizes(ArrayRef<int64_t> dimsPos,
+ ArrayRef<int64_t> shape,
+ ArrayRef<int64_t> tileSizes) {
+ for (auto [pos, tileSize] : llvm::zip_equal(dimsPos, tileSizes)) {
+ int64_t dim = shape[pos];
+ if (ShapedType::isDynamic(dim) || (dim % tileSize) != 0)
+ return false;
+ }
+ return true;
+}
+
+/// Permutate the reassociation indices and reindex them in the sequence order.
+/// Returns the next dim pos in the sequence.
+///
+/// For example, given reassocIndices [[0, 1], [2]] and permutation [1, 0], it
+/// applies the permutation to get [[2], [0, 1]] and reindexes the indices into
+/// [[0], [1, 2]].
+static int64_t applyPermutationAndReindexReassoc(
+ SmallVector<ReassociationIndices> &reassocIndices,
+ ArrayRef<int64_t> permutation) {
+ applyPermutationToVector<ReassociationIndices>(reassocIndices, permutation);
+ int64_t nextPos = 0;
+ for (ReassociationIndices &indices : reassocIndices) {
+ for (auto &index : indices) {
+ index = nextPos;
+ nextPos += 1;
+ }
+ }
+ return nextPos;
+}
+
+/// Bubble up pack op through collapse shape op when the packed dims can be
+/// projected to the dims before collapsing. This is possible when the inner
+/// tile sizes can divide the projected dims.
+///
+/// For example:
+///
+/// %collapsed = tensor.collapse_shape %in [[0, 1], 2]
+/// : tensor<?x16x4xf32> into tensor<?x4xf32>
+/// %pack = tensor.pack %collapsed outer_dims_perm = [0, 1]
+/// inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %empty
+/// : tensor<?x4xf32> -> tensor<?x4x8x1xf32>
+///
+/// can be transformed into:
+///
+/// %pack = tensor.pack %in outer_dims_perm = [1, 2]
+/// inner_dims_pos = [1, 2] inner_tiles = [8, 1] into %empty
+/// : tensor<?x16x4xf32> -> tensor<?x2x4x8x1xf32>
+/// %collapsed = tensor.collapse_shape %pack [[0, 1], 2, 3, 4]
+/// : tensor<?x2x4x8x1xf32> into tensor<?x4x8x1>
+static LogicalResult
+bubbleUpPackOpThroughCollapseShape(tensor::CollapseShapeOp collapseOp,
+ tensor::PackOp packOp,
+ PatternRewriter &rewriter) {
+ SmallVector<int64_t> innerTileSizes = packOp.getStaticTiles();
+ ArrayRef<int64_t> innerDimsPos = packOp.getInnerDimsPos();
+ ArrayRef<int64_t> outerDimsPerm = packOp.getOuterDimsPerm();
+
+ ArrayRef<int64_t> srcShape = collapseOp.getSrcType().getShape();
+ SmallVector<ReassociationIndices> reassocIndices =
+ collapseOp.getReassociationIndices();
+ // Project inner tile pos to the dim pos before collapsing. For example, if
+ // dims [x, y] is collapsed into [z], packing on dim z can be projected back
+ // to pack on dim y.
+ //
+ // Project to inner-most non-unit dims to increase the chance that they can be
+ // divided by the inner tile sizes. This is correct because for [..., x, 1],
+ // packing on dim 1 is equivalent to packing on dim x.
+ SmallVector<int64_t> projectedInnerDimsPos =
+ projectToInnerMostNonUnitDimsPos(innerDimsPos, reassocIndices, srcShape);
+
+ if (!isDimsDivisibleByTileSizes(projectedInnerDimsPos, srcShape,
+ innerTileSizes)) {
+ return failure();
+ }
+ // Expand the outer dims permutation with the associated source dims for the
+ // new permutation after bubbling. This is because moving a collapsed dim is
+ // equivalent to moving the associated source dims together.
+ SmallVector<int64_t> newOuterDimsPerm;
+ for (auto outerPos : outerDimsPerm) {
+ newOuterDimsPerm.insert(newOuterDimsPerm.end(),
+ reassocIndices[outerPos].begin(),
+ reassocIndices[outerPos].end());
+ }
+
+ auto emptyOp = tensor::PackOp::createDestinationTensor(
+ rewriter, packOp.getLoc(), collapseOp.getSrc(), packOp.getMixedTiles(),
+ projectedInnerDimsPos, newOuterDimsPerm);
+ auto newPackOp = rewriter.create<tensor::PackOp>(
+ packOp.getLoc(), collapseOp.getSrc(), emptyOp, projectedInnerDimsPos,
+ packOp.getMixedTiles(), packOp.getPaddingValue(), newOuterDimsPerm);
+
+ SmallVector<ReassociationIndices> newReassocIndices = reassocIndices;
+ // First apply the permutation on the reassociations of the outer dims.
+ // For example given the permutation [1, 0], the reassociations [[0, 1], [2]]
+ // -> [[0], [1, 2]]
+ int64_t nextPos =
+ applyPermutationAndReindexReassoc(newReassocIndices, outerDimsPerm);
+ // Then add direct mapping for the inner tile dims.
+ for (size_t i = 0; i < innerDimsPos.size(); ++i) {
+ newReassocIndices.push_back({nextPos});
+ nextPos += 1;
+ }
+
+ auto newCollapseOp = rewriter.create<tensor::CollapseShapeOp>(
+ collapseOp.getLoc(), packOp.getType(), newPackOp, newReassocIndices);
+ rewriter.replaceOp(packOp, newCollapseOp);
+
+ return success();
+}
+
+class BubbleUpPackOpThroughReshapeOp final
+ : public OpRewritePattern<tensor::PackOp> {
+public:
+ BubbleUpPackOpThroughReshapeOp(MLIRContext *context, ControlPropagationFn fun)
+ : OpRewritePattern<tensor::PackOp>(context), controlFn(std::move(fun)) {}
+
+ LogicalResult matchAndRewrite(tensor::PackOp packOp,
+ PatternRewriter &rewriter) const override {
+ Operation *srcOp = packOp.getSource().getDefiningOp();
+ // Currently only support when the pack op is the only user.
+ if (!srcOp || !(srcOp->getNumResults() == 1) ||
+ !srcOp->getResult(0).hasOneUse()) {
+ return failure();
+ }
+ // Currently only support static inner tile sizes.
+ if (llvm::any_of(packOp.getStaticTiles(), [](int64_t size) {
+ return ShapedType::isDynamic(size);
+ })) {
+ return failure();
+ }
+
+ // User controlled propagation function.
+ if (!controlFn(srcOp))
+ return failure();
+
+ return TypeSwitch<Operation *, LogicalResult>(srcOp)
+ .Case([&](tensor::CollapseShapeOp op) {
+ return bubbleUpPackOpThroughCollapseShape(op, packOp, rewriter);
+ })
+ .Default([](Operation *) { return failure(); });
+ }
+
+private:
+ ControlPropagationFn controlFn;
+};
+
+/// Push down unpack op through expand shape op when the packed dims can be
+/// projected to the dims after expanding. This is possible when the inner tile
+/// sizes can divide the projected dims.
+///
+/// For example:
+///
+/// %unpack = tensor.unpack %in outer_dims_perm = [0, 1]
+/// inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %empty
+/// : tensor<?x32x8x8xf32> -> tensor<?x256xf32>
+/// %expanded = tensor.expand_shape %unpack [[0, 1], [2]]
+/// : tensor<?x256xf32> into tensor<?x256x256xf32>
+///
+/// can be transformed into:
+///
+/// %expanded = tensor.expand_shape %ain [[0, 1], [2], [3], [4]]
+/// : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
+/// %unpack = tensor.unpack %expanded outer_dims_perm = [0, 1, 2]
+/// inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %empty
+/// : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
+static LogicalResult
+pushDownUnPackOpThroughExpandShape(tensor::UnPackOp unPackOp,
+ tensor::ExpandShapeOp expandOp,
+ PatternRewriter &rewriter) {
+ SmallVector<int64_t> innerTileSizes = unPackOp.getStaticTiles();
+ ArrayRef<int64_t> innerDimsPos = unPackOp.getInnerDimsPos();
+ ArrayRef<int64_t> outerDimsPerm = unPackOp.getOuterDimsPerm();
+
+ ArrayRef<int64_t> dstShape = expandOp.getType().getShape();
+ SmallVector<ReassociationIndices> reassocIndices =
+ expandOp.getReassociationIndices();
+ // Project inner tile pos to the dim pos after expanding. For example, if dims
+ // [z] is expanded into [x, y], unpacking on dim z can be projected to unpack
+ // on dim y.
+ //
+ // Project to inner-most non-unit dims to increase the chance that they can be
+ // divided by the inner tile sizes. This is correct because for [..., x, 1],
+ // unpacking on dim 1 is equivalent to unpacking on dim x.
+ SmallVector<int64_t> projectedInnerDimsPos =
+ projectToInnerMostNonUnitDimsPos(innerDimsPos, reassocIndices, dstShape);
+
+ if (!isDimsDivisibleByTileSizes(projectedInnerDimsPos, dstShape,
+ innerTileSizes)) {
+ return failure();
+ }
+ // Expand the outer dims permutation with the associated expanded dims for the
+ // new permutation after pushing. This is because moving a source dim is
+ // equivalent to moving the associated expanded dims together.
+ SmallVector<int64_t> newOuterDimsPerm;
+ for (auto outerPos : outerDimsPerm) {
+ newOuterDimsPerm.insert(newOuterDimsPerm.end(),
+ reassocIndices[outerPos].begin(),
+ reassocIndices[outerPos].end());
+ }
+
+ SmallVector<ReassociationIndices> newReassocIndices = reassocIndices;
+ // First apply the permutation on the reassociations of the outer dims.
+ // For example given the permutation [1, 0], the reassociations [[0, 1], [2]]
+ // -> [[0], [1, 2]]
+ int64_t nextPos =
+ applyPermutationAndReindexReassoc(newReassocIndices, outerDimsPerm);
+ // Then add direct mapping for the inner tile dims.
+ for (size_t i = 0; i < innerDimsPos.size(); ++i) {
+ newReassocIndices.push_back({nextPos});
+ nextPos += 1;
+ }
+
+ RankedTensorType newExpandType =
+ tensor::PackOp::inferPackedType(expandOp.getType(), innerTileSizes,
+ projectedInnerDimsPos, newOuterDimsPerm);
+ auto newExpandOp = rewriter.create<tensor::ExpandShapeOp>(
+ expandOp.getLoc(), newExpandType, unPackOp.getSource(),
+ newReassocIndices);
+
+ auto emptyOp = tensor::UnPackOp::createDestinationTensor(
+ rewriter, unPackOp.getLoc(), newExpandOp, unPackOp.getMixedTiles(),
+ projectedInnerDimsPos, newOuterDimsPerm);
+ auto newUnPackOp = rewriter.create<tensor::UnPackOp>(
+ unPackOp.getLoc(), newExpandOp.getResult(), emptyOp,
+ projectedInnerDimsPos, unPackOp.getMixedTiles(), newOuterDimsPerm);
+ rewriter.replaceOp(expandOp, newUnPackOp);
+
+ return success();
+}
+
+class PushDownUnPackOpThroughReshapeOp final
+ : public OpRewritePattern<tensor::UnPackOp> {
+public:
+ PushDownUnPackOpThroughReshapeOp(MLIRContext *context,
+ ControlPropagationFn fun)
+ : OpRewritePattern<tensor::UnPackOp>(context), controlFn(std::move(fun)) {
+ }
+
+ LogicalResult matchAndRewrite(tensor::UnPackOp unPackOp,
+ PatternRewriter &rewriter) const override {
+ Value result = unPackOp.getResult();
+ // Currently only support unpack op with the single user.
+ if (!result.hasOneUse()) {
+ return failure();
+ }
+ // Currently only support static inner tile sizes.
+ if (llvm::any_of(unPackOp.getStaticTiles(), [](int64_t size) {
+ return ShapedType::isDynamic(size);
+ })) {
+ return failure();
+ }
+
+ Operation *consumerOp = *result.user_begin();
+ // User controlled propagation function.
+ if (!controlFn(consumerOp))
+ return failure();
+
+ return TypeSwitch<Operation *, LogicalResult>(consumerOp)
+ .Case([&](tensor::ExpandShapeOp op) {
+ return pushDownUnPackOpThroughExpandShape(unPackOp, op, rewriter);
+ })
+ .Default([](Operation *) { return failure(); });
+ }
+
+private:
+ ControlPropagationFn controlFn;
+};
+
// TODO: Relax this restriction. We should unpack a generic op also
// in the presence of multiple unpack ops as producers.
/// Return the unpacked operand, if present, for the current generic op.
@@ -774,6 +1074,7 @@ void mlir::linalg::populateDataLayoutPropagationPatterns(
const ControlPropagationFn &controlPackUnPackPropagation) {
patterns
.insert<BubbleUpPackOpThroughGenericOpPattern, BubbleUpPackThroughPadOp,
- PushDownUnPackOpThroughGenericOp, PushDownUnPackThroughPadOp>(
+ BubbleUpPackOpThroughReshapeOp, PushDownUnPackOpThroughGenericOp,
+ PushDownUnPackThroughPadOp, PushDownUnPackOpThroughReshapeOp>(
patterns.getContext(), controlPackUnPackPropagation);
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index 30aed850bed8..462f692615fa 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -304,6 +304,28 @@ static void calculateTileOffsetsAndSizes(
}
}
+/// Returns a vector of bools representing if, for each axis, `op` can be tiled
+/// without incurring in a race condition and thus it is thread-safe to do the
+/// tiling. This is checked by iterating over numThreads and ensuring that the
+/// corresponding iterator type is "parallel". If it is not, then we know that
+/// such dimension is unsafe to tile.
+SmallVector<bool> safeToTileToForall(mlir::MLIRContext *ctx, LinalgOp linalgOp,
+ ArrayRef<OpFoldResult> numThreads) {
+ auto iterators = linalgOp.getIteratorTypesArray();
+ SmallVector<bool> safeToTile(numThreads.size(), true);
+
+ for (unsigned i = 0, e = numThreads.size(); i != e; i++) {
+ if (auto attr = llvm::dyn_cast_if_present<Attribute>(numThreads[i])) {
+ if (cast<IntegerAttr>(attr).getValue().getSExtValue() > 1) {
+ safeToTile[i] = iterators[i] == utils::IteratorType::parallel;
+ }
+ } else {
+ safeToTile[i] = iterators[i] == utils::IteratorType::parallel;
+ }
+ }
+ return safeToTile;
+}
+
/// Rewrite a TilingInterface `op` to a tiled `scf.forall`. The
/// tiling is specified by the number of tiles/threads `numThreads` and the
/// optional nominal tile size `nominalTileSizes`. If `nominalTilSizes` is
@@ -314,8 +336,10 @@ static void calculateTileOffsetsAndSizes(
/// size of data.
/// It is the user's responsibility to ensure that `numThreads` is a valid
/// tiling specification (i.e. that only tiles parallel dimensions, e.g. in the
-/// Linalg case). If `omitTileOffsetBoundsCheck` is true, then the function will
-/// assume that `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds.
+/// Linalg case). If the dimension is not parallelizable, a warning is issued to
+/// notify the user that the generated code is not safe to parallelize. If
+/// `omitTileOffsetBoundsCheck` is true, then the function will assume that
+/// `tileSize[i] * (numThread[i] -1) <= dimSize[i]` holds.
static FailureOr<ForallTilingResult> tileToForallOpImpl(
RewriterBase &b, TilingInterface op, ArrayRef<OpFoldResult> numThreads,
std::optional<ArrayRef<OpFoldResult>> nominalTileSizes,
@@ -344,6 +368,16 @@ static FailureOr<ForallTilingResult> tileToForallOpImpl(
return getValueOrCreateConstantIndexOp(b, loc, ofr);
}));
+ LinalgOp linalgOp = dyn_cast<LinalgOp>(op.getOperation());
+ if (linalgOp) {
+ // Check if tiling is thread safe and print a warning if not.
+ SmallVector<bool> tilingSafety =
+ safeToTileToForall(b.getContext(), linalgOp, numThreads);
+ for (size_t i = 0; i < tilingSafety.size(); i++)
+ if (!tilingSafety[i])
+ op.emitWarning() << "tiling is not thread safe at axis #" << i;
+ }
+
// 1. Create the ForallOp. We don't use the lambda body-builder
// version because we require the use of RewriterBase in the body, so we
// manually move the insertion point to the body below.
diff --git a/mlir/lib/Dialect/Math/IR/MathDialect.cpp b/mlir/lib/Dialect/Math/IR/MathDialect.cpp
index a71b24cb1b97..285b5ca59405 100644
--- a/mlir/lib/Dialect/Math/IR/MathDialect.cpp
+++ b/mlir/lib/Dialect/Math/IR/MathDialect.cpp
@@ -35,5 +35,5 @@ void mlir::math::MathDialect::initialize() {
#include "mlir/Dialect/Math/IR/MathOps.cpp.inc"
>();
addInterfaces<MathInlinerInterface>();
- declarePromisedInterface<MathDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, MathDialect>();
}
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp
index 41082a85a485..3a8bd12ba258 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefDialect.cpp
@@ -47,14 +47,14 @@ void mlir::memref::MemRefDialect::initialize() {
#include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
>();
addInterfaces<MemRefInlinerInterface>();
- declarePromisedInterface<MemRefDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, MemRefDialect>();
declarePromisedInterfaces<bufferization::AllocationOpInterface, AllocOp,
AllocaOp, ReallocOp>();
declarePromisedInterfaces<RuntimeVerifiableOpInterface, CastOp, ExpandShapeOp,
LoadOp, ReinterpretCastOp, StoreOp, SubViewOp>();
declarePromisedInterfaces<ValueBoundsOpInterface, AllocOp, AllocaOp, CastOp,
DimOp, GetGlobalOp, RankOp, SubViewOp>();
- declarePromisedInterface<MemRefType, DestructurableTypeInterface>();
+ declarePromisedInterface<DestructurableTypeInterface, MemRefType>();
}
/// Finds the unique dealloc operation (if one exists) for `allocValue`.
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp
index 7be4056fb2fc..6c5250d527ad 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefMemorySlot.cpp
@@ -120,11 +120,8 @@ memref::AllocaOp::getDestructurableSlots() {
if (!destructuredType)
return {};
- DenseMap<Attribute, Type> indexMap;
- for (auto const &[index, type] : *destructuredType)
- indexMap.insert({index, MemRefType::get({}, type)});
-
- return {DestructurableMemorySlot{{getMemref(), memrefType}, indexMap}};
+ return {
+ DestructurableMemorySlot{{getMemref(), memrefType}, *destructuredType}};
}
DenseMap<Attribute, MemorySlot>
diff --git a/mlir/lib/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.cpp b/mlir/lib/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.cpp
new file mode 100644
index 000000000000..bbb269bd0016
--- /dev/null
+++ b/mlir/lib/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.cpp
@@ -0,0 +1,48 @@
+//===- BufferViewFlowOpInterfaceImpl.cpp - Buffer View Flow Analysis ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/MemRef/Transforms/BufferViewFlowOpInterfaceImpl.h"
+
+#include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
+
+using namespace mlir;
+using namespace mlir::bufferization;
+
+namespace mlir {
+namespace memref {
+namespace {
+
+struct ReallocOpInterface
+ : public BufferViewFlowOpInterface::ExternalModel<ReallocOpInterface,
+ ReallocOp> {
+ void
+ populateDependencies(Operation *op,
+ RegisterDependenciesFn registerDependenciesFn) const {
+ auto reallocOp = cast<ReallocOp>(op);
+ // memref.realloc may return the source operand.
+ registerDependenciesFn(reallocOp.getSource(), reallocOp.getResult());
+ }
+
+ bool mayBeTerminalBuffer(Operation *op, Value value) const {
+ // The return value of memref.realloc is a terminal buffer because the op
+ // may return a newly allocated buffer.
+ return true;
+ }
+};
+
+} // namespace
+} // namespace memref
+} // namespace mlir
+
+void memref::registerBufferViewFlowOpInterfaceExternalModels(
+ DialectRegistry &registry) {
+ registry.addExtension(+[](MLIRContext *ctx, memref::MemRefDialect *dialect) {
+ ReallocOp::attachInterface<ReallocOpInterface>(*ctx);
+ });
+}
diff --git a/mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt b/mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt
index 08b7eab726eb..f150ac7ac2d6 100644
--- a/mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/MemRef/Transforms/CMakeLists.txt
@@ -1,5 +1,6 @@
add_mlir_dialect_library(MLIRMemRefTransforms
AllocationOpInterfaceImpl.cpp
+ BufferViewFlowOpInterfaceImpl.cpp
ComposeSubView.cpp
ExpandOps.cpp
ExpandRealloc.cpp
@@ -27,6 +28,7 @@ add_mlir_dialect_library(MLIRMemRefTransforms
MLIRArithDialect
MLIRArithTransforms
MLIRBufferizationDialect
+ MLIRBufferizationTransforms
MLIRDialectUtils
MLIRFuncDialect
MLIRGPUDialect
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index ddb9676eb4f6..5bca8e85f889 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -79,7 +79,7 @@ void SCFDialect::initialize() {
declarePromisedInterfaces<bufferization::BufferizableOpInterface, ConditionOp,
ExecuteRegionOp, ForOp, IfOp, IndexSwitchOp,
ForallOp, InParallelOp, WhileOp, YieldOp>();
- declarePromisedInterface<ForOp, ValueBoundsOpInterface>();
+ declarePromisedInterface<ValueBoundsOpInterface, ForOp>();
}
/// Default callback for IfOp builders. Inserts a yield without arguments.
diff --git a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
index 4d8d93f7aac7..c09184148208 100644
--- a/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
+++ b/mlir/lib/Dialect/SCF/TransformOps/SCFTransformOps.cpp
@@ -384,7 +384,7 @@ void transform::TakeAssumedBranchOp::getEffects(
}
//===----------------------------------------------------------------------===//
-// LoopFuseSibling
+// LoopFuseSiblingOp
//===----------------------------------------------------------------------===//
/// Check if `target` and `source` are siblings, in the context that `target`
@@ -408,7 +408,7 @@ static DiagnosedSilenceableFailure isOpSibling(Operation *target,
// Check if fusion will violate dominance.
DominanceInfo domInfo(source);
if (target->isBeforeInBlock(source)) {
- // Since, `target` is before `source`, all users of results of `target`
+ // Since `target` is before `source`, all users of results of `target`
// need to be dominated by `source`.
for (Operation *user : target->getUsers()) {
if (!domInfo.properlyDominates(source, user, /*enclosingOpOk=*/false)) {
@@ -424,9 +424,8 @@ static DiagnosedSilenceableFailure isOpSibling(Operation *target,
// Check if operands of `target` are dominated by `source`.
for (Value operand : target->getOperands()) {
Operation *operandOp = operand.getDefiningOp();
- // If operand does not have a defining operation, it is a block arguement,
- // which will always dominate `source`, since `target` and `source` are in
- // the same block and the operand dominated `source` before.
+ // Operands without defining operations are block arguments. When `target`
+ // and `source` occur in the same block, these operands dominate `source`.
if (!operandOp)
continue;
@@ -441,8 +440,11 @@ static DiagnosedSilenceableFailure isOpSibling(Operation *target,
bool failed = false;
OpOperand *failedValue = nullptr;
visitUsedValuesDefinedAbove(target->getRegions(), [&](OpOperand *operand) {
- if (!domInfo.properlyDominates(operand->getOwner(), source,
- /*enclosingOpOk=*/false)) {
+ Operation *operandOp = operand->get().getDefiningOp();
+ if (operandOp && !domInfo.properlyDominates(operandOp, source,
+ /*enclosingOpOk=*/false)) {
+ // `operand` is not an argument of an enclosing block and the defining
+ // op of `operand` is outside `target` but does not dominate `source`.
failed = true;
failedValue = operand;
}
@@ -457,12 +459,11 @@ static DiagnosedSilenceableFailure isOpSibling(Operation *target,
return DiagnosedSilenceableFailure::success();
}
-/// Check if `target` can be fused into `source`.
+/// Check if `target` scf.forall can be fused into `source` scf.forall.
///
-/// This is a simple check that just checks if both loops have same
-/// bounds, steps and mapping. This check does not ensure that the side effects
-/// of `target` are independent of `source` or vice-versa. It is the
-/// responsibility of the caller to ensure that.
+/// This simply checks if both loops have the same bounds, steps and mapping.
+/// No attempt is made at checking that the side effects of `target` and
+/// `source` are independent of each other.
static bool isForallWithIdenticalConfiguration(Operation *target,
Operation *source) {
auto targetOp = dyn_cast<scf::ForallOp>(target);
@@ -476,21 +477,27 @@ static bool isForallWithIdenticalConfiguration(Operation *target,
targetOp.getMapping() == sourceOp.getMapping();
}
-/// Fuse `target` into `source` assuming they are siblings and indepndent.
-/// TODO: Add fusion for more operations. Currently, we handle only scf.forall.
-static Operation *fuseSiblings(Operation *target, Operation *source,
- RewriterBase &rewriter) {
- auto targetOp = dyn_cast<scf::ForallOp>(target);
- auto sourceOp = dyn_cast<scf::ForallOp>(source);
+/// Check if `target` scf.for can be fused into `source` scf.for.
+///
+/// This simply checks if both loops have the same bounds and steps. No attempt
+/// is made at checking that the side effects of `target` and `source` are
+/// independent of each other.
+static bool isForWithIdenticalConfiguration(Operation *target,
+ Operation *source) {
+ auto targetOp = dyn_cast<scf::ForOp>(target);
+ auto sourceOp = dyn_cast<scf::ForOp>(source);
if (!targetOp || !sourceOp)
- return nullptr;
- return fuseIndependentSiblingForallLoops(targetOp, sourceOp, rewriter);
+ return false;
+
+ return targetOp.getLowerBound() == sourceOp.getLowerBound() &&
+ targetOp.getUpperBound() == sourceOp.getUpperBound() &&
+ targetOp.getStep() == sourceOp.getStep();
}
DiagnosedSilenceableFailure
-transform::LoopFuseSibling::apply(transform::TransformRewriter &rewriter,
- transform::TransformResults &results,
- transform::TransformState &state) {
+transform::LoopFuseSiblingOp::apply(transform::TransformRewriter &rewriter,
+ transform::TransformResults &results,
+ transform::TransformState &state) {
auto targetOps = state.getPayloadOps(getTarget());
auto sourceOps = state.getPayloadOps(getSource());
@@ -510,13 +517,18 @@ transform::LoopFuseSibling::apply(transform::TransformRewriter &rewriter,
if (!diag.succeeded())
return diag;
- // Check if the target can be fused into source.
- if (!isForallWithIdenticalConfiguration(target, source)) {
+ Operation *fusedLoop;
+ /// TODO: Support fusion for loop-like ops besides scf.for and scf.forall.
+ if (isForWithIdenticalConfiguration(target, source)) {
+ fusedLoop = fuseIndependentSiblingForLoops(
+ cast<scf::ForOp>(target), cast<scf::ForOp>(source), rewriter);
+ } else if (isForallWithIdenticalConfiguration(target, source)) {
+ fusedLoop = fuseIndependentSiblingForallLoops(
+ cast<scf::ForallOp>(target), cast<scf::ForallOp>(source), rewriter);
+ } else
return emitSilenceableFailure(target->getLoc())
<< "operations cannot be fused";
- }
- Operation *fusedLoop = fuseSiblings(target, source, rewriter);
assert(fusedLoop && "failed to fuse operations");
results.set(cast<OpResult>(getFusedLoop()), {fusedLoop});
diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
index a5bff0a892c3..a30e349d4913 100644
--- a/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
@@ -220,7 +220,7 @@ LogicalResult mlir::scf::peelForLoopFirstIteration(RewriterBase &b, ForOp forOp,
auto stepInt = getConstantIntValue(forOp.getStep());
// Peeling is not needed if there is one or less iteration.
- if (lbInt && ubInt && stepInt && (*ubInt - *lbInt) / *stepInt <= 1)
+ if (lbInt && ubInt && stepInt && ceil(float(*ubInt - *lbInt) / *stepInt) <= 1)
return failure();
AffineExpr lbSymbol, stepSymbol;
diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
index 502d7e197a6f..914aeb4fa79f 100644
--- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp
@@ -910,61 +910,98 @@ scf::ForallOp mlir::fuseIndependentSiblingForallLoops(scf::ForallOp target,
unsigned numTargetOuts = target.getNumResults();
unsigned numSourceOuts = source.getNumResults();
- OperandRange targetOuts = target.getOutputs();
- OperandRange sourceOuts = source.getOutputs();
-
// Create fused shared_outs.
SmallVector<Value> fusedOuts;
- fusedOuts.reserve(numTargetOuts + numSourceOuts);
- fusedOuts.append(targetOuts.begin(), targetOuts.end());
- fusedOuts.append(sourceOuts.begin(), sourceOuts.end());
+ llvm::append_range(fusedOuts, target.getOutputs());
+ llvm::append_range(fusedOuts, source.getOutputs());
- // Create a new scf::forall op after the source loop.
+ // Create a new scf.forall op after the source loop.
rewriter.setInsertionPointAfter(source);
scf::ForallOp fusedLoop = rewriter.create<scf::ForallOp>(
source.getLoc(), source.getMixedLowerBound(), source.getMixedUpperBound(),
source.getMixedStep(), fusedOuts, source.getMapping());
// Map control operands.
- IRMapping fusedMapping;
- fusedMapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
- fusedMapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
+ IRMapping mapping;
+ mapping.map(target.getInductionVars(), fusedLoop.getInductionVars());
+ mapping.map(source.getInductionVars(), fusedLoop.getInductionVars());
// Map shared outs.
- fusedMapping.map(target.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().slice(0, numTargetOuts));
- fusedMapping.map(
- source.getRegionIterArgs(),
- fusedLoop.getRegionIterArgs().slice(numTargetOuts, numSourceOuts));
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
// Append everything except the terminator into the fused operation.
rewriter.setInsertionPointToStart(fusedLoop.getBody());
for (Operation &op : target.getBody()->without_terminator())
- rewriter.clone(op, fusedMapping);
+ rewriter.clone(op, mapping);
for (Operation &op : source.getBody()->without_terminator())
- rewriter.clone(op, fusedMapping);
+ rewriter.clone(op, mapping);
// Fuse the old terminator in_parallel ops into the new one.
scf::InParallelOp targetTerm = target.getTerminator();
scf::InParallelOp sourceTerm = source.getTerminator();
scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
-
rewriter.setInsertionPointToStart(fusedTerm.getBody());
for (Operation &op : targetTerm.getYieldingOps())
- rewriter.clone(op, fusedMapping);
+ rewriter.clone(op, mapping);
for (Operation &op : sourceTerm.getYieldingOps())
- rewriter.clone(op, fusedMapping);
-
- // Replace all uses of the old loops with the fused loop.
- rewriter.replaceAllUsesWith(target.getResults(),
- fusedLoop.getResults().slice(0, numTargetOuts));
- rewriter.replaceAllUsesWith(
- source.getResults(),
- fusedLoop.getResults().slice(numTargetOuts, numSourceOuts));
-
- // Erase the old loops.
- rewriter.eraseOp(target);
- rewriter.eraseOp(source);
+ rewriter.clone(op, mapping);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
+
+ return fusedLoop;
+}
+
+scf::ForOp mlir::fuseIndependentSiblingForLoops(scf::ForOp target,
+ scf::ForOp source,
+ RewriterBase &rewriter) {
+ unsigned numTargetOuts = target.getNumResults();
+ unsigned numSourceOuts = source.getNumResults();
+
+ // Create fused init_args, with target's init_args before source's init_args.
+ SmallVector<Value> fusedInitArgs;
+ llvm::append_range(fusedInitArgs, target.getInitArgs());
+ llvm::append_range(fusedInitArgs, source.getInitArgs());
+
+ // Create a new scf.for op after the source loop (with scf.yield terminator
+ // (without arguments) only in case its init_args is empty).
+ rewriter.setInsertionPointAfter(source);
+ scf::ForOp fusedLoop = rewriter.create<scf::ForOp>(
+ source.getLoc(), source.getLowerBound(), source.getUpperBound(),
+ source.getStep(), fusedInitArgs);
+
+ // Map original induction variables and operands to those of the fused loop.
+ IRMapping mapping;
+ mapping.map(target.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(target.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
+ mapping.map(source.getInductionVar(), fusedLoop.getInductionVar());
+ mapping.map(source.getRegionIterArgs(),
+ fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
+
+ // Merge target's body into the new (fused) for loop and then source's body.
+ rewriter.setInsertionPointToStart(fusedLoop.getBody());
+ for (Operation &op : target.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+ for (Operation &op : source.getBody()->without_terminator())
+ rewriter.clone(op, mapping);
+
+ // Build fused yield results by appropriately mapping original yield operands.
+ SmallVector<Value> yieldResults;
+ for (Value operand : target.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ for (Value operand : source.getBody()->getTerminator()->getOperands())
+ yieldResults.push_back(mapping.lookupOrDefault(operand));
+ if (!yieldResults.empty())
+ rewriter.create<scf::YieldOp>(source.getLoc(), yieldResults);
+
+ // Replace old loops by substituting their uses by results of the fused loop.
+ rewriter.replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
+ rewriter.replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
return fusedLoop;
}
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
index ff4bace9a4d8..5f47cff71cba 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
@@ -881,6 +881,172 @@ OpFoldResult spirv::INotEqualOp::fold(spirv::INotEqualOp::FoldAdaptor adaptor) {
}
//===----------------------------------------------------------------------===//
+// spirv.SGreaterThan
+//===----------------------------------------------------------------------===//
+
+OpFoldResult
+spirv::SGreaterThanOp::fold(spirv::SGreaterThanOp::FoldAdaptor adaptor) {
+ // x == x -> false
+ if (getOperand1() == getOperand2()) {
+ auto falseAttr = BoolAttr::get(getContext(), false);
+ if (isa<IntegerType>(getType()))
+ return falseAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, falseAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.sgt(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.SGreaterThanEqual
+//===----------------------------------------------------------------------===//
+
+OpFoldResult spirv::SGreaterThanEqualOp::fold(
+ spirv::SGreaterThanEqualOp::FoldAdaptor adaptor) {
+ // x == x -> true
+ if (getOperand1() == getOperand2()) {
+ auto trueAttr = BoolAttr::get(getContext(), true);
+ if (isa<IntegerType>(getType()))
+ return trueAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, trueAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.sge(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.UGreaterThan
+//===----------------------------------------------------------------------===//
+
+OpFoldResult
+spirv::UGreaterThanOp::fold(spirv::UGreaterThanOp::FoldAdaptor adaptor) {
+ // x == x -> false
+ if (getOperand1() == getOperand2()) {
+ auto falseAttr = BoolAttr::get(getContext(), false);
+ if (isa<IntegerType>(getType()))
+ return falseAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, falseAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.ugt(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.UGreaterThanEqual
+//===----------------------------------------------------------------------===//
+
+OpFoldResult spirv::UGreaterThanEqualOp::fold(
+ spirv::UGreaterThanEqualOp::FoldAdaptor adaptor) {
+ // x == x -> true
+ if (getOperand1() == getOperand2()) {
+ auto trueAttr = BoolAttr::get(getContext(), true);
+ if (isa<IntegerType>(getType()))
+ return trueAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, trueAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.uge(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.SLessThan
+//===----------------------------------------------------------------------===//
+
+OpFoldResult spirv::SLessThanOp::fold(spirv::SLessThanOp::FoldAdaptor adaptor) {
+ // x == x -> false
+ if (getOperand1() == getOperand2()) {
+ auto falseAttr = BoolAttr::get(getContext(), false);
+ if (isa<IntegerType>(getType()))
+ return falseAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, falseAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.slt(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.SLessThanEqual
+//===----------------------------------------------------------------------===//
+
+OpFoldResult
+spirv::SLessThanEqualOp::fold(spirv::SLessThanEqualOp::FoldAdaptor adaptor) {
+ // x == x -> true
+ if (getOperand1() == getOperand2()) {
+ auto trueAttr = BoolAttr::get(getContext(), true);
+ if (isa<IntegerType>(getType()))
+ return trueAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, trueAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.sle(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.ULessThan
+//===----------------------------------------------------------------------===//
+
+OpFoldResult spirv::ULessThanOp::fold(spirv::ULessThanOp::FoldAdaptor adaptor) {
+ // x == x -> false
+ if (getOperand1() == getOperand2()) {
+ auto falseAttr = BoolAttr::get(getContext(), false);
+ if (isa<IntegerType>(getType()))
+ return falseAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, falseAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.ult(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// spirv.ULessThanEqual
+//===----------------------------------------------------------------------===//
+
+OpFoldResult
+spirv::ULessThanEqualOp::fold(spirv::ULessThanEqualOp::FoldAdaptor adaptor) {
+ // x == x -> true
+ if (getOperand1() == getOperand2()) {
+ auto trueAttr = BoolAttr::get(getContext(), true);
+ if (isa<IntegerType>(getType()))
+ return trueAttr;
+ if (auto vecTy = dyn_cast<VectorType>(getType()))
+ return SplatElementsAttr::get(vecTy, trueAttr);
+ }
+
+ return constFoldBinaryOp<IntegerAttr>(
+ adaptor.getOperands(), getType(), [](const APInt &a, const APInt &b) {
+ return a.ule(b) ? APInt::getAllOnes(1) : APInt::getZero(1);
+ });
+}
+
+//===----------------------------------------------------------------------===//
// spirv.ShiftLeftLogical
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
index e914f46bdef6..72488d6e5d0b 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
@@ -135,7 +135,7 @@ void SPIRVDialect::initialize() {
// Allow unknown operations because SPIR-V is extensible.
allowUnknownOperations();
- declarePromisedInterface<TargetEnvAttr, gpu::TargetAttrInterface>();
+ declarePromisedInterface<gpu::TargetAttrInterface, TargetEnvAttr>();
}
std::string SPIRVDialect::getAttributeName(Decoration decoration) {
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index 2b79c8022b8e..4072608dc8f8 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -991,15 +991,16 @@ Value mlir::spirv::linearizeIndex(ValueRange indices, ArrayRef<int64_t> strides,
// broken down into progressive small steps so we can have intermediate steps
// using other dialects. At the moment SPIR-V is the final sink.
- Value linearizedIndex = builder.create<spirv::ConstantOp>(
+ Value linearizedIndex = builder.createOrFold<spirv::ConstantOp>(
loc, integerType, IntegerAttr::get(integerType, offset));
for (const auto &index : llvm::enumerate(indices)) {
- Value strideVal = builder.create<spirv::ConstantOp>(
+ Value strideVal = builder.createOrFold<spirv::ConstantOp>(
loc, integerType,
IntegerAttr::get(integerType, strides[index.index()]));
- Value update = builder.create<spirv::IMulOp>(loc, strideVal, index.value());
+ Value update =
+ builder.createOrFold<spirv::IMulOp>(loc, index.value(), strideVal);
linearizedIndex =
- builder.create<spirv::IAddOp>(loc, linearizedIndex, update);
+ builder.createOrFold<spirv::IAddOp>(loc, update, linearizedIndex);
}
return linearizedIndex;
}
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.cpp
index 21de1c9e867c..5d4dd5b3a1e0 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVWebGPUTransforms.cpp
@@ -39,7 +39,7 @@ namespace {
//===----------------------------------------------------------------------===//
// Helpers
//===----------------------------------------------------------------------===//
-Attribute getScalarOrSplatAttr(Type type, int64_t value) {
+static Attribute getScalarOrSplatAttr(Type type, int64_t value) {
APInt sizedValue(getElementTypeOrSelf(type).getIntOrFloatBitWidth(), value);
if (auto intTy = dyn_cast<IntegerType>(type))
return IntegerAttr::get(intTy, sizedValue);
@@ -47,9 +47,9 @@ Attribute getScalarOrSplatAttr(Type type, int64_t value) {
return SplatElementsAttr::get(cast<ShapedType>(type), sizedValue);
}
-Value lowerExtendedMultiplication(Operation *mulOp, PatternRewriter &rewriter,
- Value lhs, Value rhs,
- bool signExtendArguments) {
+static Value lowerExtendedMultiplication(Operation *mulOp,
+ PatternRewriter &rewriter, Value lhs,
+ Value rhs, bool signExtendArguments) {
Location loc = mulOp->getLoc();
Type argTy = lhs.getType();
// Emulate 64-bit multiplication by splitting each input element of type i32
@@ -203,15 +203,39 @@ struct ExpandAddCarryPattern final : OpRewritePattern<IAddCarryOp> {
}
};
+struct ExpandIsInfPattern final : OpRewritePattern<IsInfOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(IsInfOp op,
+ PatternRewriter &rewriter) const override {
+ // We assume values to be finite and turn `IsInf` info `false`.
+ rewriter.replaceOpWithNewOp<spirv::ConstantOp>(
+ op, op.getType(), getScalarOrSplatAttr(op.getType(), 0));
+ return success();
+ }
+};
+
+struct ExpandIsNanPattern final : OpRewritePattern<IsNanOp> {
+ using OpRewritePattern::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(IsNanOp op,
+ PatternRewriter &rewriter) const override {
+ // We assume values to be finite and turn `IsNan` info `false`.
+ rewriter.replaceOpWithNewOp<spirv::ConstantOp>(
+ op, op.getType(), getScalarOrSplatAttr(op.getType(), 0));
+ return success();
+ }
+};
+
//===----------------------------------------------------------------------===//
// Passes
//===----------------------------------------------------------------------===//
-class WebGPUPreparePass
- : public impl::SPIRVWebGPUPreparePassBase<WebGPUPreparePass> {
-public:
+struct WebGPUPreparePass final
+ : impl::SPIRVWebGPUPreparePassBase<WebGPUPreparePass> {
void runOnOperation() override {
RewritePatternSet patterns(&getContext());
populateSPIRVExpandExtendedMultiplicationPatterns(patterns);
+ populateSPIRVExpandNonFiniteArithmeticPatterns(patterns);
if (failed(
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
@@ -227,12 +251,16 @@ void populateSPIRVExpandExtendedMultiplicationPatterns(
RewritePatternSet &patterns) {
// WGSL currently does not support extended multiplication ops, see:
// https://github.com/gpuweb/gpuweb/issues/1565.
- patterns.add<
- // clang-format off
- ExpandSMulExtendedPattern,
- ExpandUMulExtendedPattern,
- ExpandAddCarryPattern
- >(patterns.getContext());
+ patterns.add<ExpandSMulExtendedPattern, ExpandUMulExtendedPattern,
+ ExpandAddCarryPattern>(patterns.getContext());
}
+
+void populateSPIRVExpandNonFiniteArithmeticPatterns(
+ RewritePatternSet &patterns) {
+ // WGSL currently does not support `isInf` and `isNan`, see:
+ // https://github.com/gpuweb/gpuweb/pull/2311.
+ patterns.add<ExpandIsInfPattern, ExpandIsNanPattern>(patterns.getContext());
+}
+
} // namespace spirv
} // namespace mlir
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index 92c98b34af60..c52fa3751e6b 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -275,7 +275,7 @@ static Value genPositionsCall(OpBuilder &builder, Location loc,
.getResult(0);
}
-/// Generates a call to obtain the coordindates array.
+/// Generates a call to obtain the coordinates array.
static Value genCoordinatesCall(OpBuilder &builder, Location loc,
SparseTensorType stt, Value ptr, Level l) {
Type crdTp = stt.getCrdType();
@@ -287,6 +287,20 @@ static Value genCoordinatesCall(OpBuilder &builder, Location loc,
.getResult(0);
}
+/// Generates a call to obtain the coordinates array (AoS view).
+static Value genCoordinatesBufferCall(OpBuilder &builder, Location loc,
+ SparseTensorType stt, Value ptr,
+ Level l) {
+ Type crdTp = stt.getCrdType();
+ auto resTp = MemRefType::get({ShapedType::kDynamic}, crdTp);
+ Value lvl = constantIndex(builder, loc, l);
+ SmallString<25> name{"sparseCoordinatesBuffer",
+ overheadTypeFunctionSuffix(crdTp)};
+ return createFuncCall(builder, loc, name, resTp, {ptr, lvl},
+ EmitCInterface::On)
+ .getResult(0);
+}
+
//===----------------------------------------------------------------------===//
// Conversion rules.
//===----------------------------------------------------------------------===//
@@ -518,13 +532,35 @@ public:
LogicalResult
matchAndRewrite(ToCoordinatesOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
+ const Location loc = op.getLoc();
+ auto stt = getSparseTensorType(op.getTensor());
+ auto crds = genCoordinatesCall(rewriter, loc, stt, adaptor.getTensor(),
+ op.getLevel());
+ // Cast the MemRef type to the type expected by the users, though these
+ // two types should be compatible at runtime.
+ if (op.getType() != crds.getType())
+ crds = rewriter.create<memref::CastOp>(loc, op.getType(), crds);
+ rewriter.replaceOp(op, crds);
+ return success();
+ }
+};
+
+/// Sparse conversion rule for coordinate accesses (AoS style).
+class SparseToCoordinatesBufferConverter
+ : public OpConversionPattern<ToCoordinatesBufferOp> {
+public:
+ using OpConversionPattern::OpConversionPattern;
+ LogicalResult
+ matchAndRewrite(ToCoordinatesBufferOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ const Location loc = op.getLoc();
auto stt = getSparseTensorType(op.getTensor());
- auto crds = genCoordinatesCall(rewriter, op.getLoc(), stt,
- adaptor.getTensor(), op.getLevel());
+ auto crds = genCoordinatesBufferCall(
+ rewriter, loc, stt, adaptor.getTensor(), stt.getAoSCOOStart());
// Cast the MemRef type to the type expected by the users, though these
// two types should be compatible at runtime.
if (op.getType() != crds.getType())
- crds = rewriter.create<memref::CastOp>(op.getLoc(), op.getType(), crds);
+ crds = rewriter.create<memref::CastOp>(loc, op.getType(), crds);
rewriter.replaceOp(op, crds);
return success();
}
@@ -878,10 +914,10 @@ void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter,
SparseTensorAllocConverter, SparseTensorEmptyConverter,
SparseTensorDeallocConverter, SparseTensorReorderCOOConverter,
SparseTensorToPositionsConverter, SparseTensorToCoordinatesConverter,
- SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
- SparseTensorLoadConverter, SparseTensorInsertConverter,
- SparseTensorExpandConverter, SparseTensorCompressConverter,
- SparseTensorAssembleConverter, SparseTensorDisassembleConverter,
- SparseHasRuntimeLibraryConverter>(typeConverter,
- patterns.getContext());
+ SparseToCoordinatesBufferConverter, SparseTensorToValuesConverter,
+ SparseNumberOfEntriesConverter, SparseTensorLoadConverter,
+ SparseTensorInsertConverter, SparseTensorExpandConverter,
+ SparseTensorCompressConverter, SparseTensorAssembleConverter,
+ SparseTensorDisassembleConverter, SparseHasRuntimeLibraryConverter>(
+ typeConverter, patterns.getContext());
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index 17f70d0796cc..b117c1694e45 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -648,7 +648,9 @@ public:
loc, lvl, vector::PrintPunctuation::NoPunctuation);
rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : "));
Value crd = nullptr;
- // TODO: eliminates ToCoordinateBufferOp!
+ // For COO AoS storage, we want to print a single, linear view of
+ // the full coordinate storage at this level. For any other storage,
+ // we show the coordinate storage for every indivual level.
if (stt.getAoSCOOStart() == l)
crd = rewriter.create<ToCoordinatesBufferOp>(loc, tensor);
else
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp b/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
index 4b3156728cc9..002077753b13 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorDialect.cpp
@@ -62,7 +62,7 @@ void TensorDialect::initialize() {
ParallelInsertSliceOp>();
declarePromisedInterfaces<SubsetInsertionOpInterface, InsertSliceOp,
ParallelInsertSliceOp>();
- declarePromisedInterface<ExtractSliceOp, SubsetExtractionOpInterface>();
+ declarePromisedInterface<SubsetExtractionOpInterface, ExtractSliceOp>();
declarePromisedInterfaces<TilingInterface, PadOp, PackOp, UnPackOp>();
declarePromisedInterfaces<ValueBoundsOpInterface, CastOp, DimOp, EmptyOp,
ExtractSliceOp, PadOp, RankOp>();
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index dc8843aa4e1e..38a9ad60bb79 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -276,6 +276,10 @@ bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
if (sourceType.getRank() != targetType.getRank())
return false;
+ // Requires same encoding.
+ if (sourceType.getEncoding() != targetType.getEncoding())
+ return false;
+
// If cast is towards more static sizes along any dimension, don't fold.
for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
if (!ShapedType::isDynamic(std::get<0>(t)) &&
diff --git a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
index 5257310f5b00..ff003e486d21 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/MergeConsecutiveInsertExtractSlicePatterns.cpp
@@ -78,12 +78,12 @@ struct MergeConsecutiveInsertSlice : public OpRewritePattern<OpTy> {
}
};
-/// Drop redundant rank expansion. I.e., rank expansions that are directly
-/// followed by rank reductions. E.g.:
+/// Drop redundant rank expansion of insert_slice that are directly followed
+/// by extract_slice. E.g.:
/// %0 = tensor.insert_slice ... : tensor<5x10xf32> into tensor<1x1x5x10xf32>
/// %1 = tensor.extract_slice %0[0, 0, 2, 3] [1, 1, 2, 2] [1, 1, 1, 1]
/// : tensor<1x1x5x10xf32> to tensor<2x2xf32>
-struct DropRedundantInsertSliceRankExpansion
+struct DropRedundantRankExpansionOnExtractSliceOfInsertSlice
: public OpRewritePattern<ExtractSliceOp> {
using OpRewritePattern::OpRewritePattern;
@@ -134,6 +134,97 @@ struct DropRedundantInsertSliceRankExpansion
return success();
}
};
+
+/// Drop redundant rank expansion of insert_slice that direclty follows
+/// extract_slice.
+///
+/// This can be done when the insert_slice op purely expands ranks (adds unit
+/// dims) and the extrace_slice drops corresponding unit dims. For example:
+///
+/// %extracted_slice = tensor.extract_slice %in[0, 0] [1, 8] [1, 1]
+/// : tensor<2x8xf32> to tensor<8xf32>
+/// %inserted_slice = tensor.insert_slice %extracted_slice
+/// into %dest[0, 0] [1, 8] [1, 1]
+/// : tensor<8xf32> into tensor<1x8xf32>
+///
+/// can be folded into:
+///
+/// %extracted_slice = tensor.extract_slice %in[0, 0] [1, 8] [1, 1]
+/// : tensor<2x8xf32> to tensor<1x8xf32>
+struct DropRedundantRankExpansionOnInsertSliceOfExtractSlice final
+ : public OpRewritePattern<tensor::InsertSliceOp> {
+ using OpRewritePattern<tensor::InsertSliceOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(tensor::InsertSliceOp insertSliceOp,
+ PatternRewriter &rewriter) const override {
+ auto extractSliceOp =
+ insertSliceOp.getSource().getDefiningOp<tensor::ExtractSliceOp>();
+ if (!extractSliceOp) {
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "source is not extract_slice");
+ }
+
+ // Can't fold if the extract_slice op has other users.
+ if (!extractSliceOp->hasOneUse()) {
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "source has multi-uses");
+ }
+
+ // Check if the insert_slice op purely expands ranks (add unit dims).
+ if (!isCastLikeInsertSliceOp(insertSliceOp)) {
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "insert_slice is not cast-like");
+ }
+
+ llvm::SmallBitVector extractDroppedDims = extractSliceOp.getDroppedDims();
+ llvm::SmallBitVector insertDroppedDims = insertSliceOp.getDroppedDims();
+ // Can't fold if the insert_slice op expands to more dims.
+ if (extractDroppedDims.size() < insertDroppedDims.size()) {
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "insert_slice expands more dims");
+ }
+
+ // Try to match the extract dropped dims to the insert dropped dims. This is
+ // done by scanning the dims of extract_slice and find the left-most one can
+ // match the dim of insert_slice. If a match is found, advance the dim of
+ // insert_slice to match the next one.
+ unsigned insertDimPos = 0;
+ for (unsigned extractDimPos = 0; extractDimPos < extractDroppedDims.size();
+ ++extractDimPos) {
+ // Matched all dims.
+ if (insertDimPos == insertDroppedDims.size())
+ break;
+
+ bool isExtractDropped = extractDroppedDims[extractDimPos];
+ bool isInsertDropped = insertDroppedDims[insertDimPos];
+ // Match if both sides drop/keep the dim. Advance and match the next dim
+ // of insert_slice.
+ if (isExtractDropped == isInsertDropped) {
+ insertDimPos += 1;
+ } else if (!isExtractDropped && isInsertDropped) {
+ // Not enough extract dropped dims to match the insert dropped dims.
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "insert_slice drops more unit dims");
+ }
+ // If the dim is dropped by extract_slice and not by insert_slice, look
+ // the next dim of extract_slice to see if it can match the current dim of
+ // insert_slice.
+ }
+ // Can't match some insert dims.
+ if (insertDimPos != insertDroppedDims.size()) {
+ return rewriter.notifyMatchFailure(insertSliceOp,
+ "insert_slice has unmatched dims");
+ }
+
+ rewriter.replaceOpWithNewOp<tensor::ExtractSliceOp>(
+ insertSliceOp, insertSliceOp.getType(), extractSliceOp.getSource(),
+ extractSliceOp.getMixedOffsets(), extractSliceOp.getMixedSizes(),
+ extractSliceOp.getMixedStrides());
+ rewriter.eraseOp(extractSliceOp);
+
+ return success();
+ }
+};
} // namespace
void mlir::tensor::populateMergeConsecutiveInsertExtractSlicePatterns(
@@ -146,5 +237,7 @@ void mlir::tensor::populateMergeConsecutiveInsertExtractSlicePatterns(
void mlir::tensor::populateDropRedundantInsertSliceRankExpansionPatterns(
RewritePatternSet &patterns) {
- patterns.add<DropRedundantInsertSliceRankExpansion>(patterns.getContext());
+ patterns.add<DropRedundantRankExpansionOnExtractSliceOfInsertSlice,
+ DropRedundantRankExpansionOnInsertSliceOfExtractSlice>(
+ patterns.getContext());
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
index 55fd38749669..666ac56c6cd5 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
@@ -11,7 +11,6 @@
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/IR/PatternMatch.h"
-#include "llvm/Support/Debug.h"
namespace mlir {
namespace tensor {
@@ -224,6 +223,33 @@ struct FoldUnpackWithExtractSliceOp : public OpRewritePattern<ExtractSliceOp> {
}
};
+// Applies 'permutation' on 'inVec' and stores the result in resVec.
+// 'inVec' may be empty, in that case it's one-to-one mapping with permutation.
+// `rank` sets the boundary for permutation i.e., the permutation dim can't be
+// greater than the rank specified. If it's so then return false.
+// For e.g., permutation {1, 0, 3, 2} with rank 2 is allowed since the values in
+// permutation[:rank] doesn't exceed rank, whereas, permutation {1, 3, 0, 2} is
+// not allowed since `3` exceeds the value of the rank in the given range.
+static bool checkAndPermute(ArrayRef<int64_t> permutation,
+ ArrayRef<int64_t> inVec,
+ SmallVectorImpl<int64_t> &resVec, int64_t rank) {
+
+ for (unsigned int i = 0; i < rank; ++i) {
+ int64_t remappedPosition = permutation[i];
+
+ if (!inVec.empty()) {
+ if (remappedPosition >= rank) {
+ return false;
+ }
+ remappedPosition = inVec[remappedPosition];
+ }
+
+ resVec.push_back(remappedPosition);
+ }
+
+ return true;
+}
+
/// Fold 'pack' -> 'transpose' into 'pack' since 'pack' already has transpose
/// semantics.
struct FoldProducerPackWithConsumerLinalgTransposeOp
@@ -246,24 +272,12 @@ struct FoldProducerPackWithConsumerLinalgTransposeOp
SmallVector<OpFoldResult> newMixedInnerTilesVec;
int64_t srcRank = packOp.getSourceRank();
- // Process transpose operation for non-tiled outer dimensions
- for (unsigned int i = 0; i < srcRank; ++i) {
- int64_t remappedPosition = transposePerm[i];
-
- // If tensor.pack has outer_dims_perm attribute, then consider it during
- // index remapping.
- if (!outerDimsPerm.empty()) {
- if (transposePerm[i] >= srcRank) {
- return rewriter.notifyMatchFailure(
- transposeOp,
- "Cannot fold in tensor.pack if a tile dimension was transposed "
- "with a non-tile dimension in linalg.transpose.");
- }
- remappedPosition = outerDimsPerm[remappedPosition];
- }
-
- newOuterDimsPermVec.push_back(remappedPosition);
- }
+ if (!checkAndPermute(transposePerm, outerDimsPerm, newOuterDimsPermVec,
+ srcRank))
+ return rewriter.notifyMatchFailure(
+ transposeOp,
+ "Cannot fold in tensor.pack if a tile dimension was transposed "
+ "with a non-tile dimension in linalg.transpose.");
// Process transpose operation for tiled inner dimensions
for (unsigned int i = srcRank; i < transposePerm.size(); ++i) {
@@ -323,12 +337,103 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
return success();
}
};
+
+/// Fold 'unpack' -> 'transpose' into 'unpack' since 'unpack' already has
+/// transpose semantics.
+struct FoldProducerUnPackWithConsumerLinalgTransposeOp
+ : public OpRewritePattern<linalg::TransposeOp> {
+ using OpRewritePattern<linalg::TransposeOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(linalg::TransposeOp transposeOp,
+ PatternRewriter &rewriter) const override {
+ auto unPackOp = transposeOp.getOperand(0).getDefiningOp<UnPackOp>();
+
+ if (!unPackOp)
+ return failure();
+
+ auto transposePermutation = transposeOp.getPermutation();
+ auto outerDimsPerm = unPackOp.getOuterDimsPerm();
+ auto innerDimsPos = unPackOp.getInnerDimsPos();
+ SmallVector<int64_t> newInnerDimsPosVec;
+ SmallVector<int64_t> newOuterDimsPermVec =
+ llvm::to_vector(transposePermutation);
+
+ if (!outerDimsPerm.empty())
+ applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
+
+ // Can't use applyPermutationToVector for newInnerDimsPosVec since input and
+ // permutation rank won't necessarily be equal in all cases.
+ for (auto dim : innerDimsPos)
+ newInnerDimsPosVec.push_back(transposePermutation[dim]);
+
+ Value output = unPackOp.createDestinationTensor(
+ rewriter, transposeOp.getLoc(), unPackOp.getSource(),
+ unPackOp.getMixedTiles(), newInnerDimsPosVec, newOuterDimsPermVec);
+
+ rewriter.replaceOpWithNewOp<UnPackOp>(
+ transposeOp, unPackOp.getSource(), output, newInnerDimsPosVec,
+ unPackOp.getMixedTiles(), newOuterDimsPermVec);
+
+ return success();
+ }
+};
+
+/// Fold 'transpose' -> 'unpack' into 'unpack' since 'unpack' already has
+/// transpose semantics.
+struct FoldConsumerUnPackWithProducerLinalgTransposeOp
+ : public OpRewritePattern<UnPackOp> {
+ using OpRewritePattern<UnPackOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(UnPackOp unPackOp,
+ PatternRewriter &rewriter) const override {
+ auto transposeOp =
+ unPackOp.getSource().getDefiningOp<linalg::TransposeOp>();
+
+ if (!transposeOp)
+ return failure();
+
+ auto transposePermutation = transposeOp.getPermutation();
+ auto outerDimsPerm = unPackOp.getOuterDimsPerm();
+ auto innerDimsPos = unPackOp.getInnerDimsPos();
+ int64_t destRank = unPackOp.getSourceRank() - innerDimsPos.size();
+ auto mixedInnerTilesVec = unPackOp.getMixedTiles();
+ SmallVector<int64_t> newOuterDimsPermVec;
+ SmallVector<int64_t> newInnerDimsPosVec;
+ SmallVector<OpFoldResult> newMixedInnerTilesVec;
+
+ if (!checkAndPermute(transposePermutation, outerDimsPerm,
+ newOuterDimsPermVec, destRank))
+ return rewriter.notifyMatchFailure(
+ unPackOp,
+ "Cannot fold in tensor.unpack if a tile dimension was transposed "
+ "with a non-tile dimension in linalg.transpose.");
+
+ // Process transpose operation for tiled inner dimensions
+ for (unsigned int i = destRank; i < transposePermutation.size(); ++i) {
+ int64_t remappedPosition = transposePermutation[i] - destRank;
+ newMixedInnerTilesVec.push_back(mixedInnerTilesVec[remappedPosition]);
+ newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
+ }
+
+ Value output = unPackOp.createDestinationTensor(
+ rewriter, unPackOp.getLoc(), transposeOp.getOperand(0),
+ newMixedInnerTilesVec, newInnerDimsPosVec, newOuterDimsPermVec);
+
+ rewriter.replaceOpWithNewOp<UnPackOp>(
+ unPackOp, transposeOp.getOperand(0), output, newInnerDimsPosVec,
+ newMixedInnerTilesVec, newOuterDimsPermVec);
+
+ return success();
+ }
+};
} // namespace
void populateFoldIntoPackAndUnpackPatterns(RewritePatternSet &patterns) {
patterns.insert<FoldUnpackWithExtractSliceOp, FoldPadWithPackOp,
FoldProducerPackWithConsumerLinalgTransposeOp,
- FoldConsumerPackWithProducerLinalgTransposeOp>(
+ FoldConsumerPackWithProducerLinalgTransposeOp,
+ FoldConsumerUnPackWithProducerLinalgTransposeOp,
+ FoldProducerUnPackWithConsumerLinalgTransposeOp>(
patterns.getContext());
}
diff --git a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
index 186f85d2ce20..2dd91e2f7a17 100644
--- a/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Tensor/Utils/Utils.cpp
@@ -142,11 +142,15 @@ mlir::tensor::getUnPackInverseSrcPerm(UnPackOp unpackOp,
bool mlir::tensor::isCastLikeInsertSliceOp(InsertSliceOp op) {
llvm::SmallBitVector droppedDims = op.getDroppedDims();
int64_t srcDim = 0;
+ RankedTensorType resultType = op.getDestType();
// Source dims and destination dims (apart from dropped dims) must have the
// same size.
- for (int64_t resultDim = 0; resultDim < op.getDestType().getRank();
- ++resultDim) {
+ for (int64_t resultDim = 0; resultDim < resultType.getRank(); ++resultDim) {
if (droppedDims.test(resultDim)) {
+ // InsertSlice may expand unit dimensions that result from inserting a
+ // size-1 slice into a non-size-1 result dimension.
+ if (resultType.getDimSize(resultDim) != 1)
+ return false;
continue;
}
FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 4c50aaecfe94..d23c9fe824c9 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -795,7 +795,10 @@ OpFoldResult ReshapeOp::fold(FoldAdaptor adaptor) {
if (!inputTy || !outputTy)
return {};
- if (inputTy == outputTy)
+ // Fold when the input and output types are the same. This is only safe when
+ // there is at most 1 dynamic dimension. For 2 or more dynamic dimensions,
+ // there may still be a productive reshape.
+ if (inputTy == outputTy && inputTy.getNumDynamicDims() < 2)
return getInput1();
// reshape(reshape(x)) -> reshape(x)
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index f461e7e1a555..6e6e84350738 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -970,6 +970,11 @@ mlir::LogicalResult tosa::ReshapeOp::verify() {
<< " elements into " << outputElementsNum;
}
}
+
+ int missingDims = llvm::count(getNewShape(), -1);
+ if (missingDims > 1)
+ return emitOpError() << "At most one target dimension can be -1";
+
return mlir::success();
}
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
index 050f8ca3f32a..6575b39fd45a 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
@@ -132,14 +132,17 @@ bool constantUnaryOpShouldBeFolded(TosaOp unaryOp, DenseElementsAttr values) {
return inputOp.hasOneUse();
}
-template <typename BaseType>
-DenseElementsAttr transposeType(ElementsAttr attr, ShapedType inputType,
+template <typename RangeType>
+DenseElementsAttr transposeType(const RangeType &data, ShapedType inputType,
ShapedType outputType,
llvm::ArrayRef<int64_t> permValues) {
+ using ElementType = std::decay_t<decltype(*std::begin(data))>;
+
+ assert(inputType.getElementType() == outputType.getElementType());
+
if (inputType.getNumElements() == 0)
- return DenseElementsAttr::get(outputType, llvm::ArrayRef<BaseType>{});
+ return DenseElementsAttr::get(outputType, llvm::ArrayRef<ElementType>{});
- auto attrValues = attr.getValues<BaseType>();
auto inputShape = inputType.getShape();
// The inverted permutation map and strides of the output are used to compute
@@ -148,10 +151,11 @@ DenseElementsAttr transposeType(ElementsAttr attr, ShapedType inputType,
auto outputStrides = computeStrides(outputType.getShape());
auto invertedPermValues = invertPermutationVector(permValues);
- auto initialValue = *std::begin(attrValues);
- SmallVector<BaseType> outputValues(inputType.getNumElements(), initialValue);
+ auto initialValue = *std::begin(data);
+ SmallVector<ElementType> outputValues(inputType.getNumElements(),
+ initialValue);
- for (const auto &it : llvm::enumerate(attrValues)) {
+ for (const auto &it : llvm::enumerate(data)) {
auto srcLinearIndex = it.index();
uint64_t dstLinearIndex = 0;
@@ -170,7 +174,7 @@ DenseElementsAttr transposeType(ElementsAttr attr, ShapedType inputType,
}
return DenseElementsAttr::get(outputType,
- llvm::ArrayRef<BaseType>(outputValues));
+ llvm::ArrayRef<ElementType>(outputValues));
}
// A type specialized transposition of an ElementsAttr.
@@ -180,32 +184,28 @@ DenseElementsAttr transposeType(ElementsAttr attr, ShapedType inputType,
DenseElementsAttr transpose(ElementsAttr attr, ShapedType inputType,
ShapedType outputType,
llvm::ArrayRef<int64_t> permValues) {
- auto baseType = inputType.getElementType();
-
- // Handle possible integer types
- if (auto intType = dyn_cast<IntegerType>(baseType)) {
- switch (intType.getWidth()) {
- case 1:
- return transposeType<bool>(attr, inputType, outputType, permValues);
- case 8:
- return transposeType<int8_t>(attr, inputType, outputType, permValues);
- case 16:
- return transposeType<int16_t>(attr, inputType, outputType, permValues);
- case 32:
- return transposeType<int32_t>(attr, inputType, outputType, permValues);
- case 64:
- return transposeType<int64_t>(attr, inputType, outputType, permValues);
- default:
- return transposeType<APInt>(attr, inputType, outputType, permValues);
- }
- }
+ if (auto data = attr.tryGetValues<bool>())
+ return transposeType(*data, inputType, outputType, permValues);
- // Handle possible float types
- if (baseType.isF32()) {
- return transposeType<float>(attr, inputType, outputType, permValues);
- }
+ if (auto data = attr.tryGetValues<int8_t>())
+ return transposeType(*data, inputType, outputType, permValues);
+
+ if (auto data = attr.tryGetValues<int16_t>())
+ return transposeType(*data, inputType, outputType, permValues);
+
+ if (auto data = attr.tryGetValues<int32_t>())
+ return transposeType(*data, inputType, outputType, permValues);
- return transposeType<APFloat>(attr, inputType, outputType, permValues);
+ if (auto data = attr.tryGetValues<int64_t>())
+ return transposeType(*data, inputType, outputType, permValues);
+
+ if (auto data = attr.tryGetValues<float>())
+ return transposeType(*data, inputType, outputType, permValues);
+
+ if (auto data = attr.tryGetValues<APFloat>())
+ return transposeType(*data, inputType, outputType, permValues);
+
+ return nullptr;
}
struct TosaFoldConstantTranspose : public OpRewritePattern<tosa::TransposeOp> {
@@ -228,14 +228,19 @@ struct TosaFoldConstantTranspose : public OpRewritePattern<tosa::TransposeOp> {
DenseIntElementsAttr permAttr;
if (!matchPattern(op.getPerms(), m_Constant(&permAttr)))
return failure();
- auto permValues = llvm::to_vector<6>(llvm::map_range(
+ auto permValues = llvm::map_to_vector(
// TOSA allows both 32- and 64-bit integer tensors here.
permAttr.getValues<APInt>(),
- [](const APInt &val) { return val.getSExtValue(); }));
+ [](const APInt &val) { return val.getSExtValue(); });
auto inputType = cast<ShapedType>(op.getInput1().getType());
auto resultAttr = transpose(inputValues, inputType, outputType, permValues);
+ if (!resultAttr) {
+ return rewriter.notifyMatchFailure(
+ op, "unsupported attribute or element type");
+ }
+
rewriter.replaceOpWithNewOp<tosa::ConstOp>(op, outputType, resultAttr);
return success();
}
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index 967775281ad9..74ef6381f3d7 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -410,6 +410,8 @@ private:
bool CheckVariable(Operation *op);
bool CheckVariableReadOrWrite(Operation *op);
+ bool isValidElementType(Type type);
+
SmallVector<std::function<LogicalResult(Operation *)>> constCheckers;
TosaLevel tosaLevel;
DenseMap<StringAttr, mlir::Type> variablesMap;
@@ -503,15 +505,58 @@ LogicalResult TosaValidation::applyVariableCheck(Operation *op) {
return success();
}
+bool TosaValidation::isValidElementType(Type type) {
+ if ((profile == TosaProfileEnum::BaseInference) && isa<FloatType>(type)) {
+ return false;
+ }
+ if (type.isF64()) {
+ return false;
+ }
+ if (auto intTy = dyn_cast<IntegerType>(type)) {
+ if (intTy.isUnsigned()) {
+ switch (intTy.getWidth()) {
+ case 8:
+ case 16:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ // Signless - treated as signed.
+ switch (intTy.getWidth()) {
+ case 1:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 48:
+ case 64:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+ }
+ return true;
+}
+
void TosaValidation::runOnOperation() {
configLevelAndProfile();
getOperation().walk([&](Operation *op) {
for (Value operand : op->getOperands()) {
- if ((profile == TosaProfileEnum::BaseInference) &&
- isa<FloatType>(getElementTypeOrSelf(operand))) {
+ auto elementTy = getElementTypeOrSelf(operand);
+ if (!isValidElementType(elementTy)) {
+ op->emitOpError() << "is not profile-aligned: element type "
+ << elementTy << " is not legal";
return signalPassFailure();
}
- if (getElementTypeOrSelf(operand).isF64()) {
+ }
+ for (Type resultTy : op->getResultTypes()) {
+ auto elementTy = getElementTypeOrSelf(resultTy);
+ if (!isValidElementType(elementTy)) {
+ op->emitOpError() << "is not profile-aligned: element type "
+ << elementTy << " is not legal";
return signalPassFailure();
}
}
diff --git a/mlir/lib/Dialect/Transform/IR/CMakeLists.txt b/mlir/lib/Dialect/Transform/IR/CMakeLists.txt
index f90ac089adaa..5b4989f328e6 100644
--- a/mlir/lib/Dialect/Transform/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Transform/IR/CMakeLists.txt
@@ -1,15 +1,10 @@
add_mlir_dialect_library(MLIRTransformDialect
- MatchInterfaces.cpp
TransformAttrs.cpp
TransformDialect.cpp
TransformOps.cpp
TransformTypes.cpp
Utils.cpp
- DEPENDS
- MLIRMatchInterfacesIncGen
- MLIRTransformDialectIncGen
-
LINK_LIBS PUBLIC
MLIRCastInterfaces
MLIRFunctionInterfaces
diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
index 8d2ed8f6d737..578b2492bbab 100644
--- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
+++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp
@@ -11,10 +11,10 @@
#include "mlir/Conversion/ConvertToLLVM/ToLLVMInterface.h"
#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
#include "mlir/Conversion/LLVMCommon/TypeConverter.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformAttrs.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
#include "mlir/Dialect/Transform/IR/TransformTypes.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
@@ -1020,6 +1020,8 @@ transform::ForeachMatchOp::apply(transform::TransformRewriter &rewriter,
matchActionPairs.emplace_back(matcherSymbol, actionSymbol);
}
+ DiagnosedSilenceableFailure overallDiag =
+ DiagnosedSilenceableFailure::success();
for (Operation *root : state.getPayloadOps(getRoot())) {
WalkResult walkResult = root->walk([&](Operation *op) {
// If getRestrictRoot is not present, skip over the root op itself so we
@@ -1058,8 +1060,19 @@ transform::ForeachMatchOp::apply(transform::TransformRewriter &rewriter,
action.getFunctionBody().front().without_terminator()) {
DiagnosedSilenceableFailure result =
state.applyTransform(cast<TransformOpInterface>(transform));
- if (failed(result.checkAndReport()))
+ if (result.isDefiniteFailure())
return WalkResult::interrupt();
+ if (result.isSilenceableFailure()) {
+ if (overallDiag.succeeded()) {
+ overallDiag = emitSilenceableError() << "actions failed";
+ }
+ overallDiag.attachNote(action->getLoc())
+ << "failed action: " << result.getMessage();
+ overallDiag.attachNote(op->getLoc())
+ << "when applied to this matching payload";
+ (void)result.silence();
+ continue;
+ }
}
break;
}
@@ -1075,7 +1088,7 @@ transform::ForeachMatchOp::apply(transform::TransformRewriter &rewriter,
// by actions, are invalidated.
results.set(llvm::cast<OpResult>(getUpdated()),
state.getPayloadOps(getRoot()));
- return DiagnosedSilenceableFailure::success();
+ return overallDiag;
}
void transform::ForeachMatchOp::getEffects(
@@ -2628,7 +2641,11 @@ transform::PrintOp::apply(transform::TransformRewriter &rewriter,
void transform::PrintOp::getEffects(
SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
- onlyReadsHandle(getTarget(), effects);
+ // We don't really care about mutability here, but `getTarget` now
+ // unconditionally casts to a specific type before verification could run
+ // here.
+ if (!getTargetMutable().empty())
+ onlyReadsHandle(getTargetMutable()[0].get(), effects);
onlyReadsPayload(effects);
// There is no resource for stderr file descriptor, so just declare print
diff --git a/mlir/lib/Dialect/Transform/Interfaces/CMakeLists.txt b/mlir/lib/Dialect/Transform/Interfaces/CMakeLists.txt
index 7b837bde0625..fc9cbfdc9a5b 100644
--- a/mlir/lib/Dialect/Transform/Interfaces/CMakeLists.txt
+++ b/mlir/lib/Dialect/Transform/Interfaces/CMakeLists.txt
@@ -1,7 +1,9 @@
add_mlir_library(MLIRTransformDialectInterfaces
+ MatchInterfaces.cpp
TransformInterfaces.cpp
DEPENDS
+ MLIRMatchInterfacesIncGen
MLIRTransformInterfacesIncGen
LINK_LIBS PUBLIC
diff --git a/mlir/lib/Dialect/Transform/IR/MatchInterfaces.cpp b/mlir/lib/Dialect/Transform/Interfaces/MatchInterfaces.cpp
index b9b6dabc2621..4151d0ea5bee 100644
--- a/mlir/lib/Dialect/Transform/IR/MatchInterfaces.cpp
+++ b/mlir/lib/Dialect/Transform/Interfaces/MatchInterfaces.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
using namespace mlir;
@@ -149,4 +149,4 @@ DiagnosedSilenceableFailure transform::expandTargetSpecification(
// Generated interface implementation.
//===----------------------------------------------------------------------===//
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.cpp.inc"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.cpp.inc"
diff --git a/mlir/lib/Dialect/UB/IR/UBOps.cpp b/mlir/lib/Dialect/UB/IR/UBOps.cpp
index 3a2010cdcb5c..5b2cfe7bf426 100644
--- a/mlir/lib/Dialect/UB/IR/UBOps.cpp
+++ b/mlir/lib/Dialect/UB/IR/UBOps.cpp
@@ -46,7 +46,7 @@ void UBDialect::initialize() {
#include "mlir/Dialect/UB/IR/UBOpsAttributes.cpp.inc"
>();
addInterfaces<UBInlinerInterface>();
- declarePromisedInterface<UBDialect, ConvertToLLVMPatternInterface>();
+ declarePromisedInterface<ConvertToLLVMPatternInterface, UBDialect>();
}
Operation *UBDialect::materializeConstant(OpBuilder &builder, Attribute value,
diff --git a/mlir/lib/Dialect/Vector/IR/CMakeLists.txt b/mlir/lib/Dialect/Vector/IR/CMakeLists.txt
index 70f3fa8c297d..204462ffd047 100644
--- a/mlir/lib/Dialect/Vector/IR/CMakeLists.txt
+++ b/mlir/lib/Dialect/Vector/IR/CMakeLists.txt
@@ -1,5 +1,7 @@
add_mlir_dialect_library(MLIRVectorDialect
VectorOps.cpp
+ ValueBoundsOpInterfaceImpl.cpp
+ ScalableValueBoundsConstraintSet.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/Vector/IR
diff --git a/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp b/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp
new file mode 100644
index 000000000000..6d7e3bc70f59
--- /dev/null
+++ b/mlir/lib/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.cpp
@@ -0,0 +1,103 @@
+//===- ScalableValueBoundsConstraintSet.cpp - Scalable Value Bounds -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h"
+
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+
+namespace mlir::vector {
+
+FailureOr<ConstantOrScalableBound::BoundSize>
+ConstantOrScalableBound::getSize() const {
+ if (map.isSingleConstant())
+ return BoundSize{map.getSingleConstantResult(), /*scalable=*/false};
+ if (map.getNumResults() != 1 || map.getNumInputs() != 1)
+ return failure();
+ auto binop = dyn_cast<AffineBinaryOpExpr>(map.getResult(0));
+ if (!binop || binop.getKind() != AffineExprKind::Mul)
+ return failure();
+ auto matchConstant = [&](AffineExpr expr, int64_t &constant) -> bool {
+ if (auto cst = dyn_cast<AffineConstantExpr>(expr)) {
+ constant = cst.getValue();
+ return true;
+ }
+ return false;
+ };
+ // Match `s0 * cst` or `cst * s0`:
+ int64_t cst = 0;
+ auto lhs = binop.getLHS();
+ auto rhs = binop.getRHS();
+ if ((matchConstant(lhs, cst) && isa<AffineSymbolExpr>(rhs)) ||
+ (matchConstant(rhs, cst) && isa<AffineSymbolExpr>(lhs))) {
+ return BoundSize{cst, /*scalable=*/true};
+ }
+ return failure();
+}
+
+char ScalableValueBoundsConstraintSet::ID = 0;
+
+FailureOr<ConstantOrScalableBound>
+ScalableValueBoundsConstraintSet::computeScalableBound(
+ Value value, std::optional<int64_t> dim, unsigned vscaleMin,
+ unsigned vscaleMax, presburger::BoundType boundType, bool closedUB,
+ StopConditionFn stopCondition) {
+ using namespace presburger;
+
+ assert(vscaleMin <= vscaleMax);
+ ScalableValueBoundsConstraintSet scalableCstr(value.getContext(), vscaleMin,
+ vscaleMax);
+
+ int64_t pos = scalableCstr.populateConstraintsSet(value, dim, stopCondition);
+
+ // Project out all variables apart from vscale.
+ // This should result in constraints in terms of vscale only.
+ scalableCstr.projectOut(
+ [&](ValueDim p) { return p.first != scalableCstr.getVscaleValue(); });
+
+ assert(scalableCstr.cstr.getNumDimAndSymbolVars() ==
+ scalableCstr.positionToValueDim.size() &&
+ "inconsistent mapping state");
+
+ // Check that the only symbols left are vscale.
+ for (int64_t i = 0; i < scalableCstr.cstr.getNumDimAndSymbolVars(); ++i) {
+ if (i == pos)
+ continue;
+ if (scalableCstr.positionToValueDim[i] !=
+ ValueDim(scalableCstr.getVscaleValue(),
+ ValueBoundsConstraintSet::kIndexValue)) {
+ return failure();
+ }
+ }
+
+ SmallVector<AffineMap, 1> lowerBound(1), upperBound(1);
+ scalableCstr.cstr.getSliceBounds(pos, 1, value.getContext(), &lowerBound,
+ &upperBound, closedUB);
+
+ auto invalidBound = [](auto &bound) {
+ return !bound[0] || bound[0].getNumResults() != 1;
+ };
+
+ AffineMap bound = [&] {
+ if (boundType == BoundType::EQ && !invalidBound(lowerBound) &&
+ lowerBound[0] == lowerBound[0]) {
+ return lowerBound[0];
+ } else if (boundType == BoundType::LB && !invalidBound(lowerBound)) {
+ return lowerBound[0];
+ } else if (boundType == BoundType::UB && !invalidBound(upperBound)) {
+ return upperBound[0];
+ }
+ return AffineMap{};
+ }();
+
+ if (!bound)
+ return failure();
+
+ return ConstantOrScalableBound{bound};
+}
+
+} // namespace mlir::vector
diff --git a/mlir/lib/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.cpp b/mlir/lib/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.cpp
new file mode 100644
index 000000000000..ca95072d9bb0
--- /dev/null
+++ b/mlir/lib/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.cpp
@@ -0,0 +1,51 @@
+//===- ValueBoundsOpInterfaceImpl.cpp - Impl. of ValueBoundsOpInterface ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Vector/IR/ValueBoundsOpInterfaceImpl.h"
+
+#include "mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h"
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/Interfaces/ValueBoundsOpInterface.h"
+
+using namespace mlir;
+
+namespace mlir::vector {
+namespace {
+
+struct VectorScaleOpInterface
+ : public ValueBoundsOpInterface::ExternalModel<VectorScaleOpInterface,
+ VectorScaleOp> {
+ void populateBoundsForIndexValue(Operation *op, Value value,
+ ValueBoundsConstraintSet &cstr) const {
+ auto *scalableCstr = dyn_cast<ScalableValueBoundsConstraintSet>(&cstr);
+ if (!scalableCstr)
+ return;
+ auto vscaleOp = cast<VectorScaleOp>(op);
+ assert(value == vscaleOp.getResult() && "invalid value");
+ if (auto vscale = scalableCstr->getVscaleValue()) {
+ // All copies of vscale are equivalent.
+ scalableCstr->bound(value) == cstr.getExpr(vscale);
+ } else {
+ // We know vscale is confined to [vscaleMin, vscaleMax].
+ scalableCstr->bound(value) >= scalableCstr->getVscaleMin();
+ scalableCstr->bound(value) <= scalableCstr->getVscaleMax();
+ scalableCstr->setVscale(vscaleOp);
+ }
+ }
+};
+
+} // namespace
+} // namespace mlir::vector
+
+void mlir::vector::registerValueBoundsOpInterfaceExternalModels(
+ DialectRegistry &registry) {
+ registry.addExtension(+[](MLIRContext *ctx, vector::VectorDialect *dialect) {
+ vector::VectorScaleOp::attachInterface<vector::VectorScaleOpInterface>(
+ *ctx);
+ });
+}
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 35296824246e..e566bfacf379 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -382,8 +382,8 @@ void VectorDialect::initialize() {
YieldOp>();
declarePromisedInterfaces<SubsetOpInterface, TransferReadOp,
TransferWriteOp>();
- declarePromisedInterface<TransferReadOp, SubsetExtractionOpInterface>();
- declarePromisedInterface<TransferWriteOp, SubsetInsertionOpInterface>();
+ declarePromisedInterface<SubsetExtractionOpInterface, TransferReadOp>();
+ declarePromisedInterface<SubsetInsertionOpInterface, TransferWriteOp>();
}
/// Materialize a single constant operation from a given attribute value with
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index 4a5e8fcfb6ed..0693aa596cb2 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -41,8 +41,12 @@ static Value extendVectorRank(OpBuilder &builder, Location loc, Value vec,
SmallVector<int64_t> newShape(addedRank, 1);
newShape.append(originalVecType.getShape().begin(),
originalVecType.getShape().end());
- VectorType newVecType =
- VectorType::get(newShape, originalVecType.getElementType());
+
+ SmallVector<bool> newScalableDims(addedRank, false);
+ newScalableDims.append(originalVecType.getScalableDims().begin(),
+ originalVecType.getScalableDims().end());
+ VectorType newVecType = VectorType::get(
+ newShape, originalVecType.getElementType(), newScalableDims);
return builder.create<vector::BroadcastOp>(loc, newVecType, vec);
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index 74382b027c2f..593c1e53557a 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -329,12 +329,10 @@ struct CastAwayTransferWriteLeadingOneDim
} // namespace
-LogicalResult
+FailureOr<Value>
mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
+ MaskingOpInterface maskingOp,
RewriterBase &rewriter) {
- // TODO(#78787): Not supported masked op yet.
- if (cast<MaskableOpInterface>(contractOp.getOperation()).isMasked())
- return failure();
VectorType oldAccType = dyn_cast<VectorType>(contractOp.getAccType());
if (oldAccType == nullptr)
return failure();
@@ -368,6 +366,7 @@ mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
SmallVector<Value> operands = {contractOp.getLhs(), contractOp.getRhs(),
contractOp.getAcc()};
SmallVector<Value> newOperands;
+ auto loc = contractOp.getLoc();
for (const auto &it : llvm::enumerate(oldIndexingMaps)) {
// Check if the dim to be dropped exists as a leading dim in the operand
@@ -405,7 +404,7 @@ mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
map = AffineMap::get(map.getNumDims(), 0, transposeResults,
contractOp.getContext());
operands[it.index()] = rewriter.create<vector::TransposeOp>(
- contractOp.getLoc(), operands[it.index()], perm);
+ loc, operands[it.index()], perm);
}
}
// We have taken care to have the dim to be dropped be
@@ -429,18 +428,29 @@ mlir::vector::castAwayContractionLeadingOneDim(vector::ContractionOp contractOp,
// Extract if its a valid extraction, otherwise use the operand
// without extraction.
newOperands.push_back(
- validExtract ? rewriter.create<vector::ExtractOp>(contractOp.getLoc(),
- operands[it.index()],
- splatZero(dropDim))
+ validExtract ? rewriter.create<vector::ExtractOp>(
+ loc, operands[it.index()], splatZero(dropDim))
: operands[it.index()]);
}
- auto newContractOp = rewriter.create<vector::ContractionOp>(
- contractOp.getLoc(), newOperands[0], newOperands[1], newOperands[2],
+
+ // Depending on whether this vector.contract is masked, the replacing Op
+ // should either be a new vector.contract Op or vector.mask Op.
+ Operation *newOp = rewriter.create<vector::ContractionOp>(
+ loc, newOperands[0], newOperands[1], newOperands[2],
rewriter.getAffineMapArrayAttr(newIndexingMaps),
rewriter.getArrayAttr(newIteratorTypes), contractOp.getKind());
- rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
- contractOp, contractOp->getResultTypes()[0], newContractOp);
- return success();
+
+ if (maskingOp) {
+ auto newMask = rewriter.create<vector::ExtractOp>(loc, maskingOp.getMask(),
+ splatZero(dropDim));
+
+ newOp = mlir::vector::maskOperation(rewriter, newOp, newMask);
+ }
+
+ return rewriter
+ .create<vector::BroadcastOp>(loc, contractOp->getResultTypes()[0],
+ newOp->getResults()[0])
+ .getResult();
}
namespace {
@@ -450,12 +460,14 @@ namespace {
/// 1 dimensions. Also performs tranpose of lhs and rhs operands if required
/// prior to extract.
struct CastAwayContractionLeadingOneDim
- : public OpRewritePattern<vector::ContractionOp> {
- using OpRewritePattern::OpRewritePattern;
-
- LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
- PatternRewriter &rewriter) const override {
- return castAwayContractionLeadingOneDim(contractOp, rewriter);
+ : public MaskableOpRewritePattern<vector::ContractionOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
+
+ FailureOr<Value>
+ matchAndRewriteMaskableOp(vector::ContractionOp contractOp,
+ MaskingOpInterface maskingOp,
+ PatternRewriter &rewriter) const override {
+ return castAwayContractionLeadingOneDim(contractOp, maskingOp, rewriter);
}
};
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
index 7ca035370498..4fa5b8a4865b 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
@@ -22,9 +22,9 @@ using namespace mlir;
static bool isLessThanTargetBitWidth(Operation *op, unsigned targetBitWidth) {
auto resultTypes = op->getResultTypes();
for (auto resType : resultTypes) {
- VectorType vecType = cast<VectorType>(resType);
+ VectorType vecType = dyn_cast<VectorType>(resType);
// Reject index since getElementTypeBitWidth will abort for Index types.
- if (vecType.getElementType().isIndex())
+ if (!vecType || vecType.getElementType().isIndex())
return false;
unsigned trailingVecDimBitWidth =
vecType.getShape().back() * vecType.getElementTypeBitWidth();
@@ -49,6 +49,12 @@ struct LinearizeConstant final : OpConversionPattern<arith::ConstantOp> {
Location loc = constOp.getLoc();
auto resType =
getTypeConverter()->convertType<VectorType>(constOp.getType());
+
+ if (resType.isScalable() && !isa<SplatElementsAttr>(constOp.getValue()))
+ return rewriter.notifyMatchFailure(
+ loc,
+ "Cannot linearize a constant scalable vector that's not a splat");
+
if (!resType)
return rewriter.notifyMatchFailure(loc, "can't convert return type");
if (!isLessThanTargetBitWidth(constOp, targetVectorBitWidth))
@@ -104,11 +110,11 @@ void mlir::vector::populateVectorLinearizeTypeConversionsAndLegality(
ConversionTarget &target, unsigned targetBitWidth) {
typeConverter.addConversion([](VectorType type) -> std::optional<Type> {
- // Ignore scalable vectors for now.
- if (type.getRank() <= 1 || type.isScalable())
+ if (!isLinearizableVector(type))
return type;
- return VectorType::get(type.getNumElements(), type.getElementType());
+ return VectorType::get(type.getNumElements(), type.getElementType(),
+ type.isScalable());
});
auto materializeCast = [](OpBuilder &builder, Type type, ValueRange inputs,
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 6f6b6dcdad20..69c497264fd1 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1643,10 +1643,12 @@ struct DropUnitDimFromElementwiseOps final
if (!resultVectorType)
return failure();
- // Check the pre-conditions. For `Elementwise` Ops all operands are
- // guaranteed to have identical shapes and it suffices to only check the
- // first one.
- auto sourceVectorType = cast<VectorType>(op->getOperands()[0].getType());
+ // Check the operand pre-conditions. For `Elementwise` ops all operands are
+ // guaranteed to have identical shapes (with some exceptions such as
+ // `arith.select`) and it suffices to only check one of them.
+ auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType());
+ if (!sourceVectorType)
+ return failure();
if (sourceVectorType.getRank() < 2)
return failure();
diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
index 63ed0947cf6c..ebc6f5cbcaa9 100644
--- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
+++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp
@@ -317,3 +317,8 @@ SmallVector<OpFoldResult> vector::getMixedSizesXfer(bool hasTensorSemantics,
: memref::getMixedSizes(rewriter, loc, base);
return mixedSourceDims;
}
+
+bool vector::isLinearizableVector(VectorType type) {
+ auto numScalableDims = llvm::count(type.getScalableDims(), true);
+ return (type.getRank() > 1) && (numScalableDims <= 1);
+}
diff --git a/mlir/lib/ExecutionEngine/CMakeLists.txt b/mlir/lib/ExecutionEngine/CMakeLists.txt
index b7e448d5417e..a091944b9ee7 100644
--- a/mlir/lib/ExecutionEngine/CMakeLists.txt
+++ b/mlir/lib/ExecutionEngine/CMakeLists.txt
@@ -97,6 +97,29 @@ add_mlir_library(MLIRExecutionEngine
MLIRTargetLLVMIRExport
)
+if(LLVM_BUILD_LLVM_DYLIB)
+ # Build a shared library for the execution engine. Some downstream projects
+ # use this library to build their own CPU runners while preserving dynamic
+ # linkage.
+ add_mlir_library(MLIRExecutionEngineShared
+ ExecutionEngine.cpp
+ SHARED
+
+ EXCLUDE_FROM_LIBMLIR
+
+ ADDITIONAL_HEADER_DIRS
+ ${MLIR_MAIN_INCLUDE_DIR}/mlir/ExecutionEngine
+
+ # Ensures that all necessary dependencies are resolved.
+ DEPENDS
+ MLIRExecutionEngine
+
+ LINK_LIBS PUBLIC
+ LLVM
+ MLIR
+ )
+endif()
+
get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
add_mlir_library(MLIRJitRunner
JitRunner.cpp
diff --git a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
index 48e4b8cd88b5..41c619566b55 100644
--- a/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
+++ b/mlir/lib/ExecutionEngine/CRunnerUtils.cpp
@@ -51,8 +51,20 @@ void stdSort(uint64_t n, V *p) {
// details of our vectors. Also useful for direct LLVM IR output.
extern "C" void printI64(int64_t i) { fprintf(stdout, "%" PRId64, i); }
extern "C" void printU64(uint64_t u) { fprintf(stdout, "%" PRIu64, u); }
-extern "C" void printF32(float f) { fprintf(stdout, "%g", f); }
-extern "C" void printF64(double d) { fprintf(stdout, "%lg", d); }
+extern "C" void printF32(float f) {
+ if (std::isnan(f) && std::signbit(f)) {
+ fprintf(stdout, "-nan");
+ } else {
+ fprintf(stdout, "%g", f);
+ }
+}
+extern "C" void printF64(double d) {
+ if (std::isnan(d) && std::signbit(d)) {
+ fprintf(stdout, "-nan");
+ } else {
+ fprintf(stdout, "%lg", d);
+ }
+}
extern "C" void printString(char const *s) { fputs(s, stdout); }
extern "C" void printOpen() { fputs("( ", stdout); }
extern "C" void printClose() { fputs(" )", stdout); }
diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
index aaa42a7e3a31..acb2d1bb5bed 100644
--- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
@@ -68,6 +68,14 @@ MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETPOSITIONS)
MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATES)
#undef IMPL_GETCOORDINATES
+#define IMPL_GETCOORDINATESBUFFER(CNAME, C) \
+ void SparseTensorStorageBase::getCoordinatesBuffer(std::vector<C> **, \
+ uint64_t) { \
+ FATAL_PIV("getCoordinatesBuffer" #CNAME); \
+ }
+MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATESBUFFER)
+#undef IMPL_GETCOORDINATESBUFFER
+
#define IMPL_GETVALUES(VNAME, V) \
void SparseTensorStorageBase::getValues(std::vector<V> **) { \
FATAL_PIV("getValues" #VNAME); \
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index 8835056099d2..f160b0f40fb0 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -311,6 +311,7 @@ MLIR_SPARSETENSOR_FOREVERY_V(IMPL_SPARSEVALUES)
assert(v); \
aliasIntoMemref(v->size(), v->data(), *ref); \
}
+
#define IMPL_SPARSEPOSITIONS(PNAME, P) \
IMPL_GETOVERHEAD(sparsePositions##PNAME, P, getPositions)
MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOSITIONS)
@@ -320,6 +321,12 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSEPOSITIONS)
IMPL_GETOVERHEAD(sparseCoordinates##CNAME, C, getCoordinates)
MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES)
#undef IMPL_SPARSECOORDINATES
+
+#define IMPL_SPARSECOORDINATESBUFFER(CNAME, C) \
+ IMPL_GETOVERHEAD(sparseCoordinatesBuffer##CNAME, C, getCoordinatesBuffer)
+MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATESBUFFER)
+#undef IMPL_SPARSECOORDINATESBUFFER
+
#undef IMPL_GETOVERHEAD
#define IMPL_LEXINSERT(VNAME, V) \
diff --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp
index e1e6d14231d9..214b354c5347 100644
--- a/mlir/lib/IR/MLIRContext.cpp
+++ b/mlir/lib/IR/MLIRContext.cpp
@@ -183,7 +183,8 @@ public:
llvm::StringMap<std::unique_ptr<OperationName::Impl>> operations;
/// A vector of operation info specifically for registered operations.
- llvm::StringMap<RegisteredOperationName> registeredOperations;
+ llvm::DenseMap<TypeID, RegisteredOperationName> registeredOperations;
+ llvm::StringMap<RegisteredOperationName> registeredOperationsByName;
/// This is a sorted container of registered operations for a deterministic
/// and efficient `getRegisteredOperations` implementation.
@@ -780,8 +781,8 @@ OperationName::OperationName(StringRef name, MLIRContext *context) {
// Check the registered info map first. In the overwhelmingly common case,
// the entry will be in here and it also removes the need to acquire any
// locks.
- auto registeredIt = ctxImpl.registeredOperations.find(name);
- if (LLVM_LIKELY(registeredIt != ctxImpl.registeredOperations.end())) {
+ auto registeredIt = ctxImpl.registeredOperationsByName.find(name);
+ if (LLVM_LIKELY(registeredIt != ctxImpl.registeredOperationsByName.end())) {
impl = registeredIt->second.impl;
return;
}
@@ -909,10 +910,19 @@ OperationName::UnregisteredOpModel::hashProperties(OpaqueProperties prop) {
//===----------------------------------------------------------------------===//
std::optional<RegisteredOperationName>
-RegisteredOperationName::lookup(StringRef name, MLIRContext *ctx) {
+RegisteredOperationName::lookup(TypeID typeID, MLIRContext *ctx) {
auto &impl = ctx->getImpl();
- auto it = impl.registeredOperations.find(name);
+ auto it = impl.registeredOperations.find(typeID);
if (it != impl.registeredOperations.end())
+ return it->second;
+ return std::nullopt;
+}
+
+std::optional<RegisteredOperationName>
+RegisteredOperationName::lookup(StringRef name, MLIRContext *ctx) {
+ auto &impl = ctx->getImpl();
+ auto it = impl.registeredOperationsByName.find(name);
+ if (it != impl.registeredOperationsByName.end())
return it->getValue();
return std::nullopt;
}
@@ -945,11 +955,16 @@ void RegisteredOperationName::insert(
// Update the registered info for this operation.
auto emplaced = ctxImpl.registeredOperations.try_emplace(
- name, RegisteredOperationName(impl));
+ impl->getTypeID(), RegisteredOperationName(impl));
assert(emplaced.second && "operation name registration must be successful");
+ auto emplacedByName = ctxImpl.registeredOperationsByName.try_emplace(
+ name, RegisteredOperationName(impl));
+ (void)emplacedByName;
+ assert(emplacedByName.second &&
+ "operation name registration must be successful");
// Add emplaced operation name to the sorted operations container.
- RegisteredOperationName &value = emplaced.first->getValue();
+ RegisteredOperationName &value = emplaced.first->second;
ctxImpl.sortedRegisteredOperations.insert(
llvm::upper_bound(ctxImpl.sortedRegisteredOperations, value,
[](auto &lhs, auto &rhs) {
diff --git a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
index 65c41f44192a..b5b7d78cfeff 100644
--- a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
+++ b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
@@ -218,7 +218,23 @@ uint64_t mlir::detail::getDefaultPreferredAlignment(
reportMissingDataLayout(type);
}
-// Returns the memory space used for allocal operations if specified in the
+std::optional<uint64_t> mlir::detail::getDefaultIndexBitwidth(
+ Type type, const DataLayout &dataLayout,
+ ArrayRef<DataLayoutEntryInterface> params) {
+ if (isa<IndexType>(type))
+ return getIndexBitwidth(params);
+
+ if (auto typeInterface = dyn_cast<DataLayoutTypeInterface>(type))
+ if (std::optional<uint64_t> indexBitwidth =
+ typeInterface.getIndexBitwidth(dataLayout, params))
+ return *indexBitwidth;
+
+ // Return std::nullopt for all other types, which are assumed to be non
+ // pointer-like types.
+ return std::nullopt;
+}
+
+// Returns the memory space used for alloca operations if specified in the
// given entry. If the entry is empty the default memory space represented by
// an empty attribute is returned.
Attribute
@@ -520,6 +536,18 @@ uint64_t mlir::DataLayout::getTypePreferredAlignment(Type t) const {
});
}
+std::optional<uint64_t> mlir::DataLayout::getTypeIndexBitwidth(Type t) const {
+ checkValid();
+ return cachedLookup<std::optional<uint64_t>>(t, indexBitwidths, [&](Type ty) {
+ DataLayoutEntryList list;
+ if (originalLayout)
+ list = originalLayout.getSpecForType(ty.getTypeID());
+ if (auto iface = dyn_cast_or_null<DataLayoutOpInterface>(scope))
+ return iface.getIndexBitwidth(ty, *this, list);
+ return detail::getDefaultIndexBitwidth(ty, *this, list);
+ });
+}
+
mlir::Attribute mlir::DataLayout::getAllocaMemorySpace() const {
checkValid();
if (allocaMemorySpace)
diff --git a/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp b/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp
index 85abc2df8947..99598f2e89d9 100644
--- a/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp
+++ b/mlir/lib/Interfaces/ValueBoundsOpInterface.cpp
@@ -70,6 +70,8 @@ static std::optional<int64_t> getConstantIntValue(OpFoldResult ofr) {
ValueBoundsConstraintSet::ValueBoundsConstraintSet(MLIRContext *ctx)
: builder(ctx) {}
+char ValueBoundsConstraintSet::ID = 0;
+
#ifndef NDEBUG
static void assertValidValueDim(Value value, std::optional<int64_t> dim) {
if (value.getType().isIndex()) {
@@ -472,54 +474,86 @@ FailureOr<int64_t> ValueBoundsConstraintSet::computeConstantBound(
}
FailureOr<int64_t> ValueBoundsConstraintSet::computeConstantBound(
+ presburger::BoundType type, AffineMap map, ArrayRef<Value> operands,
+ StopConditionFn stopCondition, bool closedUB) {
+ ValueDimList valueDims;
+ for (Value v : operands) {
+ assert(v.getType().isIndex() && "expected index type");
+ valueDims.emplace_back(v, std::nullopt);
+ }
+ return computeConstantBound(type, map, valueDims, stopCondition, closedUB);
+}
+
+FailureOr<int64_t> ValueBoundsConstraintSet::computeConstantBound(
presburger::BoundType type, AffineMap map, ValueDimList operands,
StopConditionFn stopCondition, bool closedUB) {
assert(map.getNumResults() == 1 && "expected affine map with one result");
ValueBoundsConstraintSet cstr(map.getContext());
- int64_t pos = cstr.insert(/*isSymbol=*/false);
+
+ int64_t pos = 0;
+ if (stopCondition) {
+ cstr.populateConstraintsSet(map, operands, stopCondition, &pos);
+ } else {
+ // No stop condition specified: Keep adding constraints until a bound could
+ // be computed.
+ cstr.populateConstraintsSet(
+ map, operands,
+ [&](Value v, std::optional<int64_t> dim) {
+ return cstr.cstr.getConstantBound64(type, pos).has_value();
+ },
+ &pos);
+ }
+ // Compute constant bound for `valueDim`.
+ int64_t ubAdjustment = closedUB ? 0 : 1;
+ if (auto bound = cstr.cstr.getConstantBound64(type, pos))
+ return type == BoundType::UB ? *bound + ubAdjustment : *bound;
+ return failure();
+}
+
+int64_t ValueBoundsConstraintSet::populateConstraintsSet(
+ Value value, std::optional<int64_t> dim, StopConditionFn stopCondition) {
+#ifndef NDEBUG
+ assertValidValueDim(value, dim);
+#endif // NDEBUG
+
+ AffineMap map =
+ AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0,
+ Builder(value.getContext()).getAffineDimExpr(0));
+ return populateConstraintsSet(map, {{value, dim}}, stopCondition);
+}
+
+int64_t ValueBoundsConstraintSet::populateConstraintsSet(
+ AffineMap map, ValueDimList operands, StopConditionFn stopCondition,
+ int64_t *posOut) {
+ assert(map.getNumResults() == 1 && "expected affine map with one result");
+ int64_t pos = insert(/*isSymbol=*/false);
+ if (posOut)
+ *posOut = pos;
// Add map and operands to the constraint set. Dimensions are converted to
// symbols. All operands are added to the worklist.
auto mapper = [&](std::pair<Value, std::optional<int64_t>> v) {
- return cstr.getExpr(v.first, v.second);
+ return getExpr(v.first, v.second);
};
SmallVector<AffineExpr> dimReplacements = llvm::to_vector(
llvm::map_range(ArrayRef(operands).take_front(map.getNumDims()), mapper));
SmallVector<AffineExpr> symReplacements = llvm::to_vector(
llvm::map_range(ArrayRef(operands).drop_front(map.getNumDims()), mapper));
- cstr.addBound(
+ addBound(
presburger::BoundType::EQ, pos,
map.getResult(0).replaceDimsAndSymbols(dimReplacements, symReplacements));
// Process the backward slice of `operands` (i.e., reverse use-def chain)
// until `stopCondition` is met.
if (stopCondition) {
- cstr.processWorklist(stopCondition);
+ processWorklist(stopCondition);
} else {
- // No stop condition specified: Keep adding constraints until a bound could
- // be computed.
- cstr.processWorklist(
- /*stopCondition=*/[&](Value v, std::optional<int64_t> dim) {
- return cstr.cstr.getConstantBound64(type, pos).has_value();
- });
+ // No stop condition specified: Keep adding constraints until the worklist
+ // is empty.
+ processWorklist([](Value v, std::optional<int64_t> dim) { return false; });
}
- // Compute constant bound for `valueDim`.
- int64_t ubAdjustment = closedUB ? 0 : 1;
- if (auto bound = cstr.cstr.getConstantBound64(type, pos))
- return type == BoundType::UB ? *bound + ubAdjustment : *bound;
- return failure();
-}
-
-FailureOr<int64_t> ValueBoundsConstraintSet::computeConstantBound(
- presburger::BoundType type, AffineMap map, ArrayRef<Value> operands,
- StopConditionFn stopCondition, bool closedUB) {
- ValueDimList valueDims;
- for (Value v : operands) {
- assert(v.getType().isIndex() && "expected index type");
- valueDims.emplace_back(v, std::nullopt);
- }
- return computeConstantBound(type, map, valueDims, stopCondition, closedUB);
+ return pos;
}
FailureOr<int64_t>
@@ -681,6 +715,35 @@ ValueBoundsConstraintSet::areEquivalentSlices(MLIRContext *ctx,
return true;
}
+void ValueBoundsConstraintSet::dump() const {
+ llvm::errs() << "==========\nColumns:\n";
+ llvm::errs() << "(column\tdim\tvalue)\n";
+ for (auto [index, valueDim] : llvm::enumerate(positionToValueDim)) {
+ llvm::errs() << " " << index << "\t";
+ if (valueDim) {
+ if (valueDim->second == kIndexValue) {
+ llvm::errs() << "n/a\t";
+ } else {
+ llvm::errs() << valueDim->second << "\t";
+ }
+ llvm::errs() << getOwnerOfValue(valueDim->first)->getName() << " ";
+ if (OpResult result = dyn_cast<OpResult>(valueDim->first)) {
+ llvm::errs() << "(result " << result.getResultNumber() << ")";
+ } else {
+ llvm::errs() << "(bbarg "
+ << cast<BlockArgument>(valueDim->first).getArgNumber()
+ << ")";
+ }
+ llvm::errs() << "\n";
+ } else {
+ llvm::errs() << "n/a\tn/a\n";
+ }
+ }
+ llvm::errs() << "\nConstraint set:\n";
+ cstr.dump();
+ llvm::errs() << "==========\n";
+}
+
ValueBoundsConstraintSet::BoundBuilder &
ValueBoundsConstraintSet::BoundBuilder::operator[](int64_t dim) {
assert(!this->dim.has_value() && "dim was already set");
diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
index 4bdc03a3e282..779ad26fc847 100644
--- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp
+++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
@@ -179,12 +179,13 @@ DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
mlir::DistinctAttr id;
if (node->isDistinct())
id = getOrCreateDistinctID(node);
- std::optional<DISubprogramFlags> subprogramFlags =
- symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags());
// Return nullptr if the scope or type is invalid.
DIScopeAttr scope = translate(node->getScope());
if (node->getScope() && !scope)
return nullptr;
+ std::optional<DISubprogramFlags> subprogramFlags =
+ symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags());
+ assert(subprogramFlags && "expected valid subprogram flags");
DISubroutineTypeAttr type = translate(node->getType());
if (node->getType() && !type)
return nullptr;
@@ -192,8 +193,7 @@ DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
getStringAttrOrNull(node->getRawName()),
getStringAttrOrNull(node->getRawLinkageName()),
translate(node->getFile()), node->getLine(),
- node->getScopeLine(), subprogramFlags.value(),
- type);
+ node->getScopeLine(), *subprogramFlags, type);
}
DISubrangeAttr DebugImporter::translateImpl(llvm::DISubrange *node) {
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 7df33470ea06..646d0ed73084 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -1787,6 +1787,20 @@ getDeclareTargetRefPtrSuffix(LLVM::GlobalOp globalOp,
return suffix;
}
+static bool isDeclareTargetLink(mlir::Value value) {
+ if (auto addressOfOp =
+ llvm::dyn_cast_if_present<LLVM::AddressOfOp>(value.getDefiningOp())) {
+ auto modOp = addressOfOp->getParentOfType<mlir::ModuleOp>();
+ Operation *gOp = modOp.lookupSymbol(addressOfOp.getGlobalName());
+ if (auto declareTargetGlobal =
+ llvm::dyn_cast<mlir::omp::DeclareTargetInterface>(gOp))
+ if (declareTargetGlobal.getDeclareTargetCaptureClause() ==
+ mlir::omp::DeclareTargetCaptureClause::link)
+ return true;
+ }
+ return false;
+}
+
// Returns the reference pointer generated by the lowering of the declare target
// operation in cases where the link clause is used or the to clause is used in
// USM mode.
@@ -1982,6 +1996,99 @@ void collectMapDataFromMapOperands(MapInfoData &mapData,
}
}
+/// This function calculates the array/pointer offset for map data provided
+/// with bounds operations, e.g. when provided something like the following:
+///
+/// Fortran
+/// map(tofrom: array(2:5, 3:2))
+/// or
+/// C++
+/// map(tofrom: array[1:4][2:3])
+/// We must calculate the initial pointer offset to pass across, this function
+/// performs this using bounds.
+///
+/// NOTE: which while specified in row-major order it currently needs to be
+/// flipped for Fortran's column order array allocation and access (as
+/// opposed to C++'s row-major, hence the backwards processing where order is
+/// important). This is likely important to keep in mind for the future when
+/// we incorporate a C++ frontend, both frontends will need to agree on the
+/// ordering of generated bounds operations (one may have to flip them) to
+/// make the below lowering frontend agnostic. The offload size
+/// calcualtion may also have to be adjusted for C++.
+std::vector<llvm::Value *>
+calculateBoundsOffset(LLVM::ModuleTranslation &moduleTranslation,
+ llvm::IRBuilderBase &builder, bool isArrayTy,
+ mlir::OperandRange bounds) {
+ std::vector<llvm::Value *> idx;
+ // There's no bounds to calculate an offset from, we can safely
+ // ignore and return no indices.
+ if (bounds.empty())
+ return idx;
+
+ // If we have an array type, then we have its type so can treat it as a
+ // normal GEP instruction where the bounds operations are simply indexes
+ // into the array. We currently do reverse order of the bounds, which
+ // I believe leans more towards Fortran's column-major in memory.
+ if (isArrayTy) {
+ idx.push_back(builder.getInt64(0));
+ for (int i = bounds.size() - 1; i >= 0; --i) {
+ if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
+ bounds[i].getDefiningOp())) {
+ idx.push_back(moduleTranslation.lookupValue(boundOp.getLowerBound()));
+ }
+ }
+ } else {
+ // If we do not have an array type, but we have bounds, then we're dealing
+ // with a pointer that's being treated like an array and we have the
+ // underlying type e.g. an i32, or f64 etc, e.g. a fortran descriptor base
+ // address (pointer pointing to the actual data) so we must caclulate the
+ // offset using a single index which the following two loops attempts to
+ // compute.
+
+ // Calculates the size offset we need to make per row e.g. first row or
+ // column only needs to be offset by one, but the next would have to be
+ // the previous row/column offset multiplied by the extent of current row.
+ //
+ // For example ([1][10][100]):
+ //
+ // - First row/column we move by 1 for each index increment
+ // - Second row/column we move by 1 (first row/column) * 10 (extent/size of
+ // current) for 10 for each index increment
+ // - Third row/column we would move by 10 (second row/column) *
+ // (extent/size of current) 100 for 1000 for each index increment
+ std::vector<llvm::Value *> dimensionIndexSizeOffset{builder.getInt64(1)};
+ for (size_t i = 1; i < bounds.size(); ++i) {
+ if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
+ bounds[i].getDefiningOp())) {
+ dimensionIndexSizeOffset.push_back(builder.CreateMul(
+ moduleTranslation.lookupValue(boundOp.getExtent()),
+ dimensionIndexSizeOffset[i - 1]));
+ }
+ }
+
+ // Now that we have calculated how much we move by per index, we must
+ // multiply each lower bound offset in indexes by the size offset we
+ // have calculated in the previous and accumulate the results to get
+ // our final resulting offset.
+ for (int i = bounds.size() - 1; i >= 0; --i) {
+ if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
+ bounds[i].getDefiningOp())) {
+ if (idx.empty())
+ idx.emplace_back(builder.CreateMul(
+ moduleTranslation.lookupValue(boundOp.getLowerBound()),
+ dimensionIndexSizeOffset[i]));
+ else
+ idx.back() = builder.CreateAdd(
+ idx.back(), builder.CreateMul(moduleTranslation.lookupValue(
+ boundOp.getLowerBound()),
+ dimensionIndexSizeOffset[i]));
+ }
+ }
+ }
+
+ return idx;
+}
+
// This creates two insertions into the MapInfosTy data structure for the
// "parent" of a set of members, (usually a container e.g.
// class/structure/derived type) when subsequent members have also been
@@ -2057,6 +2164,27 @@ static llvm::omp::OpenMPOffloadMappingFlags mapParentWithMembers(
return memberOfFlag;
}
+// The intent is to verify if the mapped data being passed is a
+// pointer -> pointee that requires special handling in certain cases,
+// e.g. applying the OMP_MAP_PTR_AND_OBJ map type.
+//
+// There may be a better way to verify this, but unfortunately with
+// opaque pointers we lose the ability to easily check if something is
+// a pointer whilst maintaining access to the underlying type.
+static bool checkIfPointerMap(mlir::omp::MapInfoOp mapOp) {
+ // If we have a varPtrPtr field assigned then the underlying type is a pointer
+ if (mapOp.getVarPtrPtr())
+ return true;
+
+ // If the map data is declare target with a link clause, then it's represented
+ // as a pointer when we lower it to LLVM-IR even if at the MLIR level it has
+ // no relation to pointers.
+ if (isDeclareTargetLink(mapOp.getVarPtr()))
+ return true;
+
+ return false;
+}
+
// This function is intended to add explicit mappings of members
static void processMapMembersWithParent(
LLVM::ModuleTranslation &moduleTranslation, llvm::IRBuilderBase &builder,
@@ -2083,8 +2211,11 @@ static void processMapMembersWithParent(
auto mapFlag =
llvm::omp::OpenMPOffloadMappingFlags(memberClause.getMapType().value());
mapFlag &= ~llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF;
ompBuilder.setCorrectMemberOfFlag(mapFlag, memberOfFlag);
- mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
+ if (checkIfPointerMap(memberClause))
+ mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
+
combinedInfo.Types.emplace_back(mapFlag);
combinedInfo.DevicePointers.emplace_back(
llvm::OpenMPIRBuilder::DeviceInfoTy::None);
@@ -2092,55 +2223,7 @@ static void processMapMembersWithParent(
LLVM::createMappingInformation(memberClause.getLoc(), ompBuilder));
combinedInfo.BasePointers.emplace_back(mapData.BasePointers[memberDataIdx]);
-
- std::vector<llvm::Value *> idx{builder.getInt64(0)};
- llvm::Value *offsetAddress = nullptr;
- if (!memberClause.getBounds().empty()) {
- if (mapData.BaseType[memberDataIdx]->isArrayTy()) {
- for (int i = memberClause.getBounds().size() - 1; i >= 0; --i) {
- if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
- memberClause.getBounds()[i].getDefiningOp())) {
- idx.push_back(
- moduleTranslation.lookupValue(boundOp.getLowerBound()));
- }
- }
- } else {
- std::vector<llvm::Value *> dimensionIndexSizeOffset{
- builder.getInt64(1)};
- for (size_t i = 1; i < memberClause.getBounds().size(); ++i) {
- if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
- memberClause.getBounds()[i].getDefiningOp())) {
- dimensionIndexSizeOffset.push_back(builder.CreateMul(
- moduleTranslation.lookupValue(boundOp.getExtent()),
- dimensionIndexSizeOffset[i - 1]));
- }
- }
-
- for (int i = memberClause.getBounds().size() - 1; i >= 0; --i) {
- if (auto boundOp = mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
- memberClause.getBounds()[i].getDefiningOp())) {
- if (!offsetAddress)
- offsetAddress = builder.CreateMul(
- moduleTranslation.lookupValue(boundOp.getLowerBound()),
- dimensionIndexSizeOffset[i]);
- else
- offsetAddress = builder.CreateAdd(
- offsetAddress,
- builder.CreateMul(
- moduleTranslation.lookupValue(boundOp.getLowerBound()),
- dimensionIndexSizeOffset[i]));
- }
- }
- }
- }
-
- llvm::Value *memberIdx =
- builder.CreateLoad(builder.getPtrTy(), mapData.Pointers[memberDataIdx]);
- memberIdx = builder.CreateInBoundsGEP(
- mapData.BaseType[memberDataIdx], memberIdx,
- offsetAddress ? std::vector<llvm::Value *>{offsetAddress} : idx,
- "member_idx");
- combinedInfo.Pointers.emplace_back(memberIdx);
+ combinedInfo.Pointers.emplace_back(mapData.Pointers[memberDataIdx]);
combinedInfo.Sizes.emplace_back(mapData.Sizes[memberDataIdx]);
}
}
@@ -2158,6 +2241,76 @@ static void processMapWithMembersOf(
memberOfParentFlag);
}
+// This is a variation on Clang's GenerateOpenMPCapturedVars, which
+// generates different operation (e.g. load/store) combinations for
+// arguments to the kernel, based on map capture kinds which are then
+// utilised in the combinedInfo in place of the original Map value.
+static void
+createAlteredByCaptureMap(MapInfoData &mapData,
+ LLVM::ModuleTranslation &moduleTranslation,
+ llvm::IRBuilderBase &builder) {
+ for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
+ // if it's declare target, skip it, it's handled seperately.
+ if (!mapData.IsDeclareTarget[i]) {
+ auto mapOp =
+ mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(mapData.MapClause[i]);
+ mlir::omp::VariableCaptureKind captureKind =
+ mapOp.getMapCaptureType().value_or(
+ mlir::omp::VariableCaptureKind::ByRef);
+ bool isPtrTy = checkIfPointerMap(mapOp);
+
+ // Currently handles array sectioning lowerbound case, but more
+ // logic may be required in the future. Clang invokes EmitLValue,
+ // which has specialised logic for special Clang types such as user
+ // defines, so it is possible we will have to extend this for
+ // structures or other complex types. As the general idea is that this
+ // function mimics some of the logic from Clang that we require for
+ // kernel argument passing from host -> device.
+ switch (captureKind) {
+ case mlir::omp::VariableCaptureKind::ByRef: {
+ llvm::Value *newV = mapData.Pointers[i];
+ std::vector<llvm::Value *> offsetIdx = calculateBoundsOffset(
+ moduleTranslation, builder, mapData.BaseType[i]->isArrayTy(),
+ mapOp.getBounds());
+ if (isPtrTy)
+ newV = builder.CreateLoad(builder.getPtrTy(), newV);
+
+ if (!offsetIdx.empty())
+ newV = builder.CreateInBoundsGEP(mapData.BaseType[i], newV, offsetIdx,
+ "array_offset");
+ mapData.Pointers[i] = newV;
+ } break;
+ case mlir::omp::VariableCaptureKind::ByCopy: {
+ llvm::Type *type = mapData.BaseType[i];
+ llvm::Value *newV;
+ if (mapData.Pointers[i]->getType()->isPointerTy())
+ newV = builder.CreateLoad(type, mapData.Pointers[i]);
+ else
+ newV = mapData.Pointers[i];
+
+ if (!isPtrTy) {
+ auto curInsert = builder.saveIP();
+ builder.restoreIP(findAllocaInsertPoint(builder, moduleTranslation));
+ auto *memTempAlloc =
+ builder.CreateAlloca(builder.getPtrTy(), nullptr, ".casted");
+ builder.restoreIP(curInsert);
+
+ builder.CreateStore(newV, memTempAlloc);
+ newV = builder.CreateLoad(builder.getPtrTy(), memTempAlloc);
+ }
+
+ mapData.Pointers[i] = newV;
+ mapData.BasePointers[i] = newV;
+ } break;
+ case mlir::omp::VariableCaptureKind::This:
+ case mlir::omp::VariableCaptureKind::VLAType:
+ mapData.MapClause[i]->emitOpError("Unhandled capture kind");
+ break;
+ }
+ }
+ }
+}
+
// Generate all map related information and fill the combinedInfo.
static void genMapInfos(llvm::IRBuilderBase &builder,
LLVM::ModuleTranslation &moduleTranslation,
@@ -2167,6 +2320,20 @@ static void genMapInfos(llvm::IRBuilderBase &builder,
const SmallVector<Value> &devPtrOperands = {},
const SmallVector<Value> &devAddrOperands = {},
bool isTargetParams = false) {
+ // We wish to modify some of the methods in which arguments are
+ // passed based on their capture type by the target region, this can
+ // involve generating new loads and stores, which changes the
+ // MLIR value to LLVM value mapping, however, we only wish to do this
+ // locally for the current function/target and also avoid altering
+ // ModuleTranslation, so we remap the base pointer or pointer stored
+ // in the map infos corresponding MapInfoData, which is later accessed
+ // by genMapInfos and createTarget to help generate the kernel and
+ // kernel arg structure. It primarily becomes relevant in cases like
+ // bycopy, or byref range'd arrays. In the default case, we simply
+ // pass thee pointer byref as both basePointer and pointer.
+ if (!moduleTranslation.getOpenMPBuilder()->Config.isTargetDevice())
+ createAlteredByCaptureMap(mapData, moduleTranslation, builder);
+
llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder();
auto fail = [&combinedInfo]() -> void {
@@ -2200,19 +2367,20 @@ static void genMapInfos(llvm::IRBuilderBase &builder,
continue;
}
- // Declare Target Mappings are excluded from being marked as
- // OMP_MAP_TARGET_PARAM as they are not passed as parameters, they're
- // marked with OMP_MAP_PTR_AND_OBJ instead.
auto mapFlag = mapData.Types[i];
- if (mapData.IsDeclareTarget[i])
+ bool isPtrTy = checkIfPointerMap(mapInfoOp);
+ if (isPtrTy)
mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
- else if (isTargetParams)
+
+ // Declare Target Mappings are excluded from being marked as
+ // OMP_MAP_TARGET_PARAM as they are not passed as parameters.
+ if (isTargetParams && !mapData.IsDeclareTarget[i])
mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
if (auto mapInfoOp = dyn_cast<mlir::omp::MapInfoOp>(mapData.MapClause[i]))
if (mapInfoOp.getMapCaptureType().value() ==
mlir::omp::VariableCaptureKind::ByCopy &&
- !mapInfoOp.getVarType().isa<LLVM::LLVMPointerType>())
+ !isPtrTy)
mapFlag |= llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_LITERAL;
combinedInfo.BasePointers.emplace_back(mapData.BasePointers[i]);
@@ -2662,86 +2830,6 @@ createDeviceArgumentAccessor(MapInfoData &mapData, llvm::Argument &arg,
return builder.saveIP();
}
-// This is a variation on Clang's GenerateOpenMPCapturedVars, which
-// generates different operation (e.g. load/store) combinations for
-// arguments to the kernel, based on map capture kinds which are then
-// utilised in the combinedInfo in place of the original Map value.
-static void
-createAlteredByCaptureMap(MapInfoData &mapData,
- LLVM::ModuleTranslation &moduleTranslation,
- llvm::IRBuilderBase &builder) {
- for (size_t i = 0; i < mapData.MapClause.size(); ++i) {
- // if it's declare target, skip it, it's handled seperately.
- if (!mapData.IsDeclareTarget[i]) {
- mlir::omp::VariableCaptureKind captureKind =
- mlir::omp::VariableCaptureKind::ByRef;
-
- if (auto mapOp = mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(
- mapData.MapClause[i])) {
- captureKind = mapOp.getMapCaptureType().value_or(
- mlir::omp::VariableCaptureKind::ByRef);
- }
-
- switch (captureKind) {
- case mlir::omp::VariableCaptureKind::ByRef: {
- // Currently handles array sectioning lowerbound case, but more
- // logic may be required in the future. Clang invokes EmitLValue,
- // which has specialised logic for special Clang types such as user
- // defines, so it is possible we will have to extend this for
- // structures or other complex types. As the general idea is that this
- // function mimics some of the logic from Clang that we require for
- // kernel argument passing from host -> device.
- if (auto mapOp = mlir::dyn_cast_if_present<mlir::omp::MapInfoOp>(
- mapData.MapClause[i])) {
- if (!mapOp.getBounds().empty() && mapData.BaseType[i]->isArrayTy()) {
-
- std::vector<llvm::Value *> idx =
- std::vector<llvm::Value *>{builder.getInt64(0)};
- for (int i = mapOp.getBounds().size() - 1; i >= 0; --i) {
- if (auto boundOp =
- mlir::dyn_cast_if_present<mlir::omp::MapBoundsOp>(
- mapOp.getBounds()[i].getDefiningOp())) {
- idx.push_back(
- moduleTranslation.lookupValue(boundOp.getLowerBound()));
- }
- }
-
- mapData.Pointers[i] = builder.CreateInBoundsGEP(
- mapData.BaseType[i], mapData.Pointers[i], idx);
- }
- }
- } break;
- case mlir::omp::VariableCaptureKind::ByCopy: {
- llvm::Type *type = mapData.BaseType[i];
- llvm::Value *newV;
- if (mapData.Pointers[i]->getType()->isPointerTy())
- newV = builder.CreateLoad(type, mapData.Pointers[i]);
- else
- newV = mapData.Pointers[i];
-
- if (!type->isPointerTy()) {
- auto curInsert = builder.saveIP();
- builder.restoreIP(findAllocaInsertPoint(builder, moduleTranslation));
- auto *memTempAlloc =
- builder.CreateAlloca(builder.getPtrTy(), nullptr, ".casted");
- builder.restoreIP(curInsert);
-
- builder.CreateStore(newV, memTempAlloc);
- newV = builder.CreateLoad(builder.getPtrTy(), memTempAlloc);
- }
-
- mapData.Pointers[i] = newV;
- mapData.BasePointers[i] = newV;
- } break;
- case mlir::omp::VariableCaptureKind::This:
- case mlir::omp::VariableCaptureKind::VLAType:
- mapData.MapClause[i]->emitOpError("Unhandled capture kind");
- break;
- }
- }
- }
-}
-
static LogicalResult
convertOmpTarget(Operation &opInst, llvm::IRBuilderBase &builder,
LLVM::ModuleTranslation &moduleTranslation) {
@@ -2810,20 +2898,6 @@ convertOmpTarget(Operation &opInst, llvm::IRBuilderBase &builder,
collectMapDataFromMapOperands(mapData, mapOperands, moduleTranslation, dl,
builder);
- // We wish to modify some of the methods in which kernel arguments are
- // passed based on their capture type by the target region, this can
- // involve generating new loads and stores, which changes the
- // MLIR value to LLVM value mapping, however, we only wish to do this
- // locally for the current function/target and also avoid altering
- // ModuleTranslation, so we remap the base pointer or pointer stored
- // in the map infos corresponding MapInfoData, which is later accessed
- // by genMapInfos and createTarget to help generate the kernel and
- // kernel arg structure. It primarily becomes relevant in cases like
- // bycopy, or byref range'd arrays. In the default case, we simply
- // pass thee pointer byref as both basePointer and pointer.
- if (!moduleTranslation.getOpenMPBuilder()->Config.isTargetDevice())
- createAlteredByCaptureMap(mapData, moduleTranslation, builder);
-
llvm::OpenMPIRBuilder::MapInfosTy combinedInfos;
auto genMapInfoCB = [&](llvm::OpenMPIRBuilder::InsertPointTy codeGenIP)
-> llvm::OpenMPIRBuilder::MapInfosTy & {
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index d63ea12ecd49..6e70d52fa760 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -1290,6 +1290,27 @@ DILabelAttr ModuleImport::matchLabelAttr(llvm::Value *value) {
return debugImporter->translate(node);
}
+FPExceptionBehaviorAttr
+ModuleImport::matchFPExceptionBehaviorAttr(llvm::Value *value) {
+ auto *metadata = cast<llvm::MetadataAsValue>(value);
+ auto *mdstr = cast<llvm::MDString>(metadata->getMetadata());
+ std::optional<llvm::fp::ExceptionBehavior> optLLVM =
+ llvm::convertStrToExceptionBehavior(mdstr->getString());
+ assert(optLLVM && "Expecting FP exception behavior");
+ return builder.getAttr<FPExceptionBehaviorAttr>(
+ convertFPExceptionBehaviorFromLLVM(*optLLVM));
+}
+
+RoundingModeAttr ModuleImport::matchRoundingModeAttr(llvm::Value *value) {
+ auto *metadata = cast<llvm::MetadataAsValue>(value);
+ auto *mdstr = cast<llvm::MDString>(metadata->getMetadata());
+ std::optional<llvm::RoundingMode> optLLVM =
+ llvm::convertStrToRoundingMode(mdstr->getString());
+ assert(optLLVM && "Expecting rounding mode");
+ return builder.getAttr<RoundingModeAttr>(
+ convertRoundingModeFromLLVM(*optLLVM));
+}
+
FailureOr<SmallVector<AliasScopeAttr>>
ModuleImport::matchAliasScopeAttrs(llvm::Value *value) {
auto *nodeAsVal = cast<llvm::MetadataAsValue>(value);
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 995544238e4a..669b95a9c6a5 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -274,16 +274,15 @@ translateDataLayout(DataLayoutSpecInterface attribute,
layoutStream << ":" << preferred;
return success();
})
- .Case([&](LLVMPointerType ptrType) {
- layoutStream << "p" << ptrType.getAddressSpace() << ":";
+ .Case([&](LLVMPointerType type) {
+ layoutStream << "p" << type.getAddressSpace() << ":";
uint64_t size = dataLayout.getTypeSizeInBits(type);
uint64_t abi = dataLayout.getTypeABIAlignment(type) * 8u;
uint64_t preferred =
dataLayout.getTypePreferredAlignment(type) * 8u;
- layoutStream << size << ":" << abi << ":" << preferred;
- if (std::optional<uint64_t> index = extractPointerSpecValue(
- entry.getValue(), PtrDLEntryPos::Index))
- layoutStream << ":" << *index;
+ uint64_t index = *dataLayout.getTypeIndexBitwidth(type);
+ layoutStream << size << ":" << abi << ":" << preferred << ":"
+ << index;
return success();
})
.Default([loc](Type type) {
@@ -1722,6 +1721,16 @@ llvm::Metadata *ModuleTranslation::translateDebugInfo(LLVM::DINodeAttr attr) {
return debugTranslation->translate(attr);
}
+llvm::RoundingMode
+ModuleTranslation::translateRoundingMode(LLVM::RoundingMode rounding) {
+ return convertRoundingModeToLLVM(rounding);
+}
+
+llvm::fp::ExceptionBehavior ModuleTranslation::translateFPExceptionBehavior(
+ LLVM::FPExceptionBehavior exceptionBehavior) {
+ return convertFPExceptionBehaviorToLLVM(exceptionBehavior);
+}
+
llvm::NamedMDNode *
ModuleTranslation::getOrInsertNamedModuleMetadata(StringRef name) {
return llvmModule->getOrInsertNamedMetadata(name);
diff --git a/mlir/lib/Transforms/InlinerPass.cpp b/mlir/lib/Transforms/InlinerPass.cpp
index 08d8dbf73a6a..9a7d5403a95d 100644
--- a/mlir/lib/Transforms/InlinerPass.cpp
+++ b/mlir/lib/Transforms/InlinerPass.cpp
@@ -93,12 +93,19 @@ InlinerPass::InlinerPass(std::function<void(OpPassManager &)> defaultPipeline,
// Return true if the inlining ratio does not exceed the threshold.
static bool isProfitableToInline(const Inliner::ResolvedCall &resolvedCall,
unsigned inliningThreshold) {
+ // Return early, ratio <= 0U will always be false.
+ if (inliningThreshold == 0U)
+ return false;
+ // Return early, ratio <= -1U will always be true.
+ if (inliningThreshold == -1U)
+ return true;
+
Region *callerRegion = resolvedCall.sourceNode->getCallableRegion();
Region *calleeRegion = resolvedCall.targetNode->getCallableRegion();
// We should not get external nodes here, but just return true
// for now to preserve the original behavior of the inliner pass.
- if (!calleeRegion || !calleeRegion)
+ if (!callerRegion || !calleeRegion)
return true;
auto countOps = [](Region *region) {
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index 6cb5635e68c9..bbecbdb85669 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -377,7 +377,7 @@ private:
/// be re-added to the worklist. This function should be called when an
/// operation is modified or removed, as it may trigger further
/// simplifications.
- void addOperandsToWorklist(ValueRange operands);
+ void addOperandsToWorklist(Operation *op);
/// Notify the driver that the given block was inserted.
void notifyBlockInserted(Block *block, Region *previous,
@@ -688,17 +688,36 @@ void GreedyPatternRewriteDriver::notifyOperationModified(Operation *op) {
addToWorklist(op);
}
-void GreedyPatternRewriteDriver::addOperandsToWorklist(ValueRange operands) {
- for (Value operand : operands) {
- // If the use count of this operand is now < 2, we re-add the defining
- // operation to the worklist.
- // TODO: This is based on the fact that zero use operations
- // may be deleted, and that single use values often have more
- // canonicalization opportunities.
- if (!operand || (!operand.use_empty() && !operand.hasOneUse()))
+void GreedyPatternRewriteDriver::addOperandsToWorklist(Operation *op) {
+ for (Value operand : op->getOperands()) {
+ // If this operand currently has at most 2 users, add its defining op to the
+ // worklist. Indeed, after the op is deleted, then the operand will have at
+ // most 1 user left. If it has 0 users left, it can be deleted too,
+ // and if it has 1 user left, there may be further canonicalization
+ // opportunities.
+ if (!operand)
continue;
- if (auto *defOp = operand.getDefiningOp())
- addToWorklist(defOp);
+
+ auto *defOp = operand.getDefiningOp();
+ if (!defOp)
+ continue;
+
+ Operation *otherUser = nullptr;
+ bool hasMoreThanTwoUses = false;
+ for (auto user : operand.getUsers()) {
+ if (user == op || user == otherUser)
+ continue;
+ if (!otherUser) {
+ otherUser = user;
+ continue;
+ }
+ hasMoreThanTwoUses = true;
+ break;
+ }
+ if (hasMoreThanTwoUses)
+ continue;
+
+ addToWorklist(defOp);
}
}
@@ -722,7 +741,7 @@ void GreedyPatternRewriteDriver::notifyOperationErased(Operation *op) {
if (config.listener)
config.listener->notifyOperationErased(op);
- addOperandsToWorklist(op->getOperands());
+ addOperandsToWorklist(op);
worklist.remove(op);
if (config.strictMode != GreedyRewriteStrictness::AnyOp)
diff --git a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-failed.mlir b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-failed.mlir
new file mode 100644
index 000000000000..30abd81f3d44
--- /dev/null
+++ b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc-failed.mlir
@@ -0,0 +1,15 @@
+// RUN: mlir-opt -convert-arith-to-emitc %s -split-input-file -verify-diagnostics
+
+func.func @bool(%arg0: i1, %arg1: i1) {
+ // expected-error@+1 {{failed to legalize operation 'arith.addi'}}
+ %0 = arith.addi %arg0, %arg1 : i1
+ return
+}
+
+// -----
+
+func.func @vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) {
+ // expected-error@+1 {{failed to legalize operation 'arith.addi'}}
+ %0 = arith.addi %arg0, %arg1 : vector<4xi32>
+ return
+}
diff --git a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
index 022530ef4db8..76ba518577ab 100644
--- a/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
+++ b/mlir/test/Conversion/ArithToEmitC/arith-to-emitc.mlir
@@ -37,6 +37,57 @@ func.func @arith_ops(%arg0: f32, %arg1: f32) {
// -----
+// CHECK-LABEL: arith_integer_ops
+func.func @arith_integer_ops(%arg0: i32, %arg1: i32) {
+ // CHECK: %[[C1:[^ ]*]] = emitc.cast %arg0 : i32 to ui32
+ // CHECK: %[[C2:[^ ]*]] = emitc.cast %arg1 : i32 to ui32
+ // CHECK: %[[ADD:[^ ]*]] = emitc.add %[[C1]], %[[C2]] : (ui32, ui32) -> ui32
+ // CHECK: %[[C3:[^ ]*]] = emitc.cast %[[ADD]] : ui32 to i32
+ %0 = arith.addi %arg0, %arg1 : i32
+ // CHECK: %[[C1:[^ ]*]] = emitc.cast %arg0 : i32 to ui32
+ // CHECK: %[[C2:[^ ]*]] = emitc.cast %arg1 : i32 to ui32
+ // CHECK: %[[SUB:[^ ]*]] = emitc.sub %[[C1]], %[[C2]] : (ui32, ui32) -> ui32
+ // CHECK: %[[C3:[^ ]*]] = emitc.cast %[[SUB]] : ui32 to i32
+ %1 = arith.subi %arg0, %arg1 : i32
+ // CHECK: %[[C1:[^ ]*]] = emitc.cast %arg0 : i32 to ui32
+ // CHECK: %[[C2:[^ ]*]] = emitc.cast %arg1 : i32 to ui32
+ // CHECK: %[[MUL:[^ ]*]] = emitc.mul %[[C1]], %[[C2]] : (ui32, ui32) -> ui32
+ // CHECK: %[[C3:[^ ]*]] = emitc.cast %[[MUL]] : ui32 to i32
+ %2 = arith.muli %arg0, %arg1 : i32
+
+ return
+}
+
+// -----
+
+// CHECK-LABEL: arith_integer_ops_signed_nsw
+func.func @arith_integer_ops_signed_nsw(%arg0: i32, %arg1: i32) {
+ // CHECK: emitc.add %arg0, %arg1 : (i32, i32) -> i32
+ %0 = arith.addi %arg0, %arg1 overflow<nsw> : i32
+ // CHECK: emitc.sub %arg0, %arg1 : (i32, i32) -> i32
+ %1 = arith.subi %arg0, %arg1 overflow<nsw> : i32
+ // CHECK: emitc.mul %arg0, %arg1 : (i32, i32) -> i32
+ %2 = arith.muli %arg0, %arg1 overflow<nsw> : i32
+
+ return
+}
+
+// -----
+
+// CHECK-LABEL: arith_index
+func.func @arith_index(%arg0: index, %arg1: index) {
+ // CHECK: emitc.add %arg0, %arg1 : (index, index) -> index
+ %0 = arith.addi %arg0, %arg1 : index
+ // CHECK: emitc.sub %arg0, %arg1 : (index, index) -> index
+ %1 = arith.subi %arg0, %arg1 : index
+ // CHECK: emitc.mul %arg0, %arg1 : (index, index) -> index
+ %2 = arith.muli %arg0, %arg1 : index
+
+ return
+}
+
+// -----
+
func.func @arith_select(%arg0: i1, %arg1: tensor<8xi32>, %arg2: tensor<8xi32>) -> () {
// CHECK: [[V0:[^ ]*]] = emitc.conditional %arg0, %arg1, %arg2 : tensor<8xi32>
%0 = arith.select %arg0, %arg1, %arg2 : i1, tensor<8xi32>
diff --git a/mlir/test/Conversion/ArithToSPIRV/fast-math.mlir b/mlir/test/Conversion/ArithToSPIRV/fast-math.mlir
index dbf0361c2ab3..9bbe28fb127a 100644
--- a/mlir/test/Conversion/ArithToSPIRV/fast-math.mlir
+++ b/mlir/test/Conversion/ArithToSPIRV/fast-math.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -split-input-file -convert-arith-to-spirv=enable-fast-math -verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt -split-input-file -convert-arith-to-spirv -verify-diagnostics %s | FileCheck %s
module attributes {
spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
@@ -8,7 +8,7 @@ module attributes {
// CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
func.func @cmpf_ordered(%arg0 : f32, %arg1 : f32) -> i1 {
// CHECK: %[[T:.+]] = spirv.Constant true
- %0 = arith.cmpf ord, %arg0, %arg1 : f32
+ %0 = arith.cmpf ord, %arg0, %arg1 fastmath<fast> : f32
// CHECK: return %[[T]]
return %0: i1
}
@@ -17,7 +17,7 @@ func.func @cmpf_ordered(%arg0 : f32, %arg1 : f32) -> i1 {
// CHECK-SAME: %[[LHS:.+]]: vector<4xf32>, %[[RHS:.+]]: vector<4xf32>
func.func @cmpf_unordered(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xi1> {
// CHECK: %[[F:.+]] = spirv.Constant dense<false>
- %0 = arith.cmpf uno, %arg0, %arg1 : vector<4xf32>
+ %0 = arith.cmpf uno, %arg0, %arg1 fastmath<nnan> : vector<4xf32>
// CHECK: return %[[F]]
return %0: vector<4xi1>
}
@@ -34,7 +34,7 @@ module attributes {
// CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
func.func @minimumf(%arg0 : f32, %arg1 : f32) -> f32 {
// CHECK: %[[F:.+]] = spirv.GL.FMin %[[LHS]], %[[RHS]]
- %0 = arith.minimumf %arg0, %arg1 : f32
+ %0 = arith.minimumf %arg0, %arg1 fastmath<fast> : f32
// CHECK: return %[[F]]
return %0: f32
}
@@ -43,7 +43,7 @@ func.func @minimumf(%arg0 : f32, %arg1 : f32) -> f32 {
// CHECK-SAME: %[[LHS:.+]]: vector<4xf32>, %[[RHS:.+]]: vector<4xf32>
func.func @maximumf(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xf32> {
// CHECK: %[[F:.+]] = spirv.GL.FMax %[[LHS]], %[[RHS]]
- %0 = arith.maximumf %arg0, %arg1 : vector<4xf32>
+ %0 = arith.maximumf %arg0, %arg1 fastmath<fast> : vector<4xf32>
// CHECK: return %[[F]]
return %0: vector<4xf32>
}
@@ -52,7 +52,7 @@ func.func @maximumf(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xf3
// CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
func.func @minnumf(%arg0 : f32, %arg1 : f32) -> f32 {
// CHECK: %[[F:.+]] = spirv.GL.FMin %[[LHS]], %[[RHS]]
- %0 = arith.minnumf %arg0, %arg1 : f32
+ %0 = arith.minnumf %arg0, %arg1 fastmath<fast> : f32
// CHECK: return %[[F]]
return %0: f32
}
@@ -61,7 +61,7 @@ func.func @minnumf(%arg0 : f32, %arg1 : f32) -> f32 {
// CHECK-SAME: %[[LHS:.+]]: vector<4xf32>, %[[RHS:.+]]: vector<4xf32>
func.func @maxnumf(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xf32> {
// CHECK: %[[F:.+]] = spirv.GL.FMax %[[LHS]], %[[RHS]]
- %0 = arith.maxnumf %arg0, %arg1 : vector<4xf32>
+ %0 = arith.maxnumf %arg0, %arg1 fastmath<fast> : vector<4xf32>
// CHECK: return %[[F]]
return %0: vector<4xf32>
}
diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
index 5918ff2e0f36..bac94aae6b74 100644
--- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
@@ -1834,3 +1834,49 @@ func.func @complex_sqrt_with_fmf(%arg: complex<f32>) -> complex<f32> {
// CHECK: %[[VAR40:.*]] = arith.select %[[VAR38]], %cst, %[[VAR32]] : f32
// CHECK: %[[VAR41:.*]] = complex.create %[[VAR39]], %[[VAR40]] : complex<f32>
// CHECK: return %[[VAR41]] : complex<f32>
+
+// -----
+
+// CHECK-LABEL: func @complex_cos_with_fmf
+// CHECK-SAME: %[[ARG:.*]]: complex<f32>
+func.func @complex_cos_with_fmf(%arg: complex<f32>) -> complex<f32> {
+ %cos = complex.cos %arg fastmath<nnan,contract> : complex<f32>
+ return %cos : complex<f32>
+}
+// CHECK-DAG: %[[REAL:.*]] = complex.re %[[ARG]]
+// CHECK-DAG: %[[IMAG:.*]] = complex.im %[[ARG]]
+// CHECK-DAG: %[[HALF:.*]] = arith.constant 5.000000e-01 : f32
+// CHECK-DAG: %[[EXP:.*]] = math.exp %[[IMAG]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[HALF_EXP:.*]] = arith.mulf %[[HALF]], %[[EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[HALF_REXP:.*]] = arith.divf %[[HALF]], %[[EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[SIN:.*]] = math.sin %[[REAL]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[COS:.*]] = math.cos %[[REAL]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[EXP_SUM:.*]] = arith.addf %[[HALF_REXP]], %[[HALF_EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT_REAL:.*]] = arith.mulf %[[EXP_SUM]], %[[COS]] fastmath<nnan,contract>
+// CHECK-DAG: %[[EXP_DIFF:.*]] = arith.subf %[[HALF_REXP]], %[[HALF_EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT_IMAG:.*]] = arith.mulf %[[EXP_DIFF]], %[[SIN]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT:.*]] = complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : complex<f32>
+// CHECK: return %[[RESULT]]
+
+// -----
+
+// CHECK-LABEL: func @complex_sin_with_fmf
+// CHECK-SAME: %[[ARG:.*]]: complex<f32>
+func.func @complex_sin_with_fmf(%arg: complex<f32>) -> complex<f32> {
+ %cos = complex.sin %arg fastmath<nnan,contract> : complex<f32>
+ return %cos : complex<f32>
+}
+// CHECK-DAG: %[[REAL:.*]] = complex.re %[[ARG]]
+// CHECK-DAG: %[[IMAG:.*]] = complex.im %[[ARG]]
+// CHECK-DAG: %[[HALF:.*]] = arith.constant 5.000000e-01 : f32
+// CHECK-DAG: %[[EXP:.*]] = math.exp %[[IMAG]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[HALF_EXP:.*]] = arith.mulf %[[HALF]], %[[EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[HALF_REXP:.*]] = arith.divf %[[HALF]], %[[EXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[SIN:.*]] = math.sin %[[REAL]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[COS:.*]] = math.cos %[[REAL]] fastmath<nnan,contract> : f32
+// CHECK-DAG: %[[EXP_SUM:.*]] = arith.addf %[[HALF_EXP]], %[[HALF_REXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT_REAL:.*]] = arith.mulf %[[EXP_SUM]], %[[SIN]] fastmath<nnan,contract>
+// CHECK-DAG: %[[EXP_DIFF:.*]] = arith.subf %[[HALF_EXP]], %[[HALF_REXP]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT_IMAG:.*]] = arith.mulf %[[EXP_DIFF]], %[[COS]] fastmath<nnan,contract>
+// CHECK-DAG: %[[RESULT:.*]] = complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : complex<f32>
+// CHECK: return %[[RESULT]]
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index dd3b6c2080aa..8877ee083286 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -112,7 +112,7 @@ gpu.module @test_module_3 {
gpu.module @test_module_4 {
// CHECK-LABEL: func @gpu_shuffle()
- func.func @gpu_shuffle() -> (f32, f32, f32, f32) {
+ func.func @gpu_shuffle() -> (f32, f32, f32, f32, i1, i1, i1, i1) {
// CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
%arg0 = arith.constant 1.0 : f32
// CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32
@@ -143,11 +143,41 @@ gpu.module @test_module_4 {
// CHECK: nvvm.shfl.sync idx {{.*}} {return_value_and_is_valid} : f32 -> !llvm.struct<(f32, i1)>
%shfli, %predi = gpu.shuffle idx %arg0, %arg1, %arg2 : f32
- func.return %shfl, %shflu, %shfld, %shfli : f32, f32,f32, f32
+ func.return %shfl, %shflu, %shfld, %shfli, %pred, %predu, %predd, %predi
+ : f32, f32,f32, f32, i1, i1, i1, i1
}
-}
+ // CHECK-LABEL: func @gpu_shuffle_unused_pred()
+ func.func @gpu_shuffle_unused_pred() -> (f32, f32, f32, f32) {
+ // CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
+ %arg0 = arith.constant 1.0 : f32
+ // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32
+ %arg1 = arith.constant 4 : i32
+ // CHECK: %[[#WIDTH:]] = llvm.mlir.constant(23 : i32) : i32
+ %arg2 = arith.constant 23 : i32
+ // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[#MINUS_ONE:]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: %[[#THIRTY_TWO:]] = llvm.mlir.constant(32 : i32) : i32
+ // CHECK: %[[#NUM_LANES:]] = llvm.sub %[[#THIRTY_TWO]], %[[#WIDTH]] : i32
+ // CHECK: %[[#MASK:]] = llvm.lshr %[[#MINUS_ONE]], %[[#NUM_LANES]] : i32
+ // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : i32
+ // CHECK: %[[#SHFL:]] = nvvm.shfl.sync bfly %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#CLAMP]] : f32 -> f32
+ %shfl, %pred = gpu.shuffle xor %arg0, %arg1, %arg2 : f32
+ // CHECK: %[[#ONE:]] = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[#MINUS_ONE:]] = llvm.mlir.constant(-1 : i32) : i32
+ // CHECK: %[[#THIRTY_TWO:]] = llvm.mlir.constant(32 : i32) : i32
+ // CHECK: %[[#NUM_LANES:]] = llvm.sub %[[#THIRTY_TWO]], %[[#WIDTH]] : i32
+ // CHECK: %[[#MASK:]] = llvm.lshr %[[#MINUS_ONE]], %[[#NUM_LANES]] : i32
+ // CHECK: %[[#SHFL:]] = nvvm.shfl.sync up %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#NUM_LANES]] : f32 -> f32
+ %shflu, %predu = gpu.shuffle up %arg0, %arg1, %arg2 : f32
+ // CHECK: nvvm.shfl.sync down {{.*}} : f32 -> f32
+ %shfld, %predd = gpu.shuffle down %arg0, %arg1, %arg2 : f32
+ // CHECK: nvvm.shfl.sync idx {{.*}} : f32 -> f32
+ %shfli, %predi = gpu.shuffle idx %arg0, %arg1, %arg2 : f32
+ func.return %shfl, %shflu, %shfld, %shfli : f32, f32,f32, f32
+ }
+}
gpu.module @test_module_5 {
// CHECK-LABEL: func @gpu_sync()
diff --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
index fa12da8ef9d4..4339799ccd5e 100644
--- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
@@ -60,13 +60,9 @@ module attributes {
// CHECK: %[[INDEX2:.*]] = spirv.IAdd %[[ARG4]], %[[LOCALINVOCATIONIDX]]
%13 = arith.addi %arg4, %3 : index
// CHECK: %[[ZERO:.*]] = spirv.Constant 0 : i32
- // CHECK: %[[OFFSET1_0:.*]] = spirv.Constant 0 : i32
// CHECK: %[[STRIDE1_1:.*]] = spirv.Constant 4 : i32
- // CHECK: %[[UPDATE1_1:.*]] = spirv.IMul %[[STRIDE1_1]], %[[INDEX1]] : i32
- // CHECK: %[[OFFSET1_1:.*]] = spirv.IAdd %[[OFFSET1_0]], %[[UPDATE1_1]] : i32
- // CHECK: %[[STRIDE1_2:.*]] = spirv.Constant 1 : i32
- // CHECK: %[[UPDATE1_2:.*]] = spirv.IMul %[[STRIDE1_2]], %[[INDEX2]] : i32
- // CHECK: %[[OFFSET1_2:.*]] = spirv.IAdd %[[OFFSET1_1]], %[[UPDATE1_2]] : i32
+ // CHECK: %[[UPDATE1_1:.*]] = spirv.IMul %[[INDEX1]], %[[STRIDE1_1]] : i32
+ // CHECK: %[[OFFSET1_2:.*]] = spirv.IAdd %[[INDEX2]], %[[UPDATE1_1]] : i32
// CHECK: %[[PTR1:.*]] = spirv.AccessChain %[[ARG0]]{{\[}}%[[ZERO]], %[[OFFSET1_2]]{{\]}}
// CHECK-NEXT: %[[VAL1:.*]] = spirv.Load "StorageBuffer" %[[PTR1]]
%14 = memref.load %arg0[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
diff --git a/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc-failed.mlir b/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc-failed.mlir
new file mode 100644
index 000000000000..390190d341e5
--- /dev/null
+++ b/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc-failed.mlir
@@ -0,0 +1,40 @@
+// RUN: mlir-opt -convert-memref-to-emitc %s -split-input-file -verify-diagnostics
+
+func.func @memref_op(%arg0 : memref<2x4xf32>) {
+ // expected-error@+1 {{failed to legalize operation 'memref.copy'}}
+ memref.copy %arg0, %arg0 : memref<2x4xf32> to memref<2x4xf32>
+ return
+}
+
+// -----
+
+func.func @alloca_with_dynamic_shape() {
+ %0 = index.constant 1
+ // expected-error@+1 {{failed to legalize operation 'memref.alloca'}}
+ %1 = memref.alloca(%0) : memref<4x?xf32>
+ return
+}
+
+// -----
+
+func.func @alloca_with_alignment() {
+ // expected-error@+1 {{failed to legalize operation 'memref.alloca'}}
+ %0 = memref.alloca() {alignment = 64 : i64}: memref<4xf32>
+ return
+}
+
+// -----
+
+func.func @non_identity_layout() {
+ // expected-error@+1 {{failed to legalize operation 'memref.alloca'}}
+ %0 = memref.alloca() : memref<4x3xf32, affine_map<(d0, d1) -> (d1, d0)>>
+ return
+}
+
+// -----
+
+func.func @zero_rank() {
+ // expected-error@+1 {{failed to legalize operation 'memref.alloca'}}
+ %0 = memref.alloca() : memref<f32>
+ return
+}
diff --git a/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc.mlir b/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc.mlir
new file mode 100644
index 000000000000..9793b2d6d783
--- /dev/null
+++ b/mlir/test/Conversion/MemRefToEmitC/memref-to-emitc.mlir
@@ -0,0 +1,28 @@
+// RUN: mlir-opt -convert-memref-to-emitc %s -split-input-file | FileCheck %s
+
+// CHECK-LABEL: memref_store
+// CHECK-SAME: %[[v:.*]]: f32, %[[i:.*]]: index, %[[j:.*]]: index
+func.func @memref_store(%v : f32, %i: index, %j: index) {
+ // CHECK: %[[ALLOCA:.*]] = "emitc.variable"() <{value = #emitc.opaque<"">}> : () -> !emitc.array<4x8xf32>
+ %0 = memref.alloca() : memref<4x8xf32>
+
+ // CHECK: %[[SUBSCRIPT:.*]] = emitc.subscript %[[ALLOCA]][%[[i]], %[[j]]] : <4x8xf32>
+ // CHECK: emitc.assign %[[v]] : f32 to %[[SUBSCRIPT:.*]] : f32
+ memref.store %v, %0[%i, %j] : memref<4x8xf32>
+ return
+}
+// -----
+
+// CHECK-LABEL: memref_load
+// CHECK-SAME: %[[i:.*]]: index, %[[j:.*]]: index
+func.func @memref_load(%i: index, %j: index) -> f32 {
+ // CHECK: %[[ALLOCA:.*]] = "emitc.variable"() <{value = #emitc.opaque<"">}> : () -> !emitc.array<4x8xf32>
+ %0 = memref.alloca() : memref<4x8xf32>
+
+ // CHECK: %[[LOAD:.*]] = emitc.subscript %[[ALLOCA]][%[[i]], %[[j]]] : <4x8xf32>
+ // CHECK: %[[VAR:.*]] = "emitc.variable"() <{value = #emitc.opaque<"">}> : () -> f32
+ // CHECK: emitc.assign %[[LOAD]] : f32 to %[[VAR]] : f32
+ %1 = memref.load %0[%i, %j] : memref<4x8xf32>
+ // CHECK: return %[[VAR]] : f32
+ return %1 : f32
+}
diff --git a/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir b/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
index 470c8531e2e0..52ed14e8cce2 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
@@ -12,16 +12,10 @@ module attributes {
// CHECK-LABEL: @load_i1
func.func @load_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>) -> i1 {
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ZERO]]]
// CHECK: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]]
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
- // CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+ // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[LOAD]], %[[MASK]] : i32
// CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
// CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
// CHECK: %[[T4:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
@@ -37,32 +31,20 @@ func.func @load_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>) -> i1
// INDEX64-LABEL: @load_i8
func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) -> i8 {
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ZERO]]]
// CHECK: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]]
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
- // CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+ // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[LOAD]], %[[MASK]] : i32
// CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
// CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
// CHECK: %[[SR:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
// CHECK: builtin.unrealized_conversion_cast %[[SR]]
// INDEX64: %[[ZERO:.+]] = spirv.Constant 0 : i64
- // INDEX64: %[[FOUR:.+]] = spirv.Constant 4 : i64
- // INDEX64: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]] : {{.+}}, i64, i64
+ // INDEX64: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ZERO]]] : {{.+}}, i64, i64
// INDEX64: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]] : i32
- // INDEX64: %[[EIGHT:.+]] = spirv.Constant 8 : i64
- // INDEX64: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i64
- // INDEX64: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i64
// INDEX64: %[[MASK:.+]] = spirv.Constant 255 : i32
- // INDEX64: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+ // INDEX64: %[[T1:.+]] = spirv.BitwiseAnd %[[LOAD]], %[[MASK]] : i32
// INDEX64: %[[T2:.+]] = spirv.Constant 24 : i32
// INDEX64: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
// INDEX64: %[[SR:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
@@ -76,15 +58,12 @@ func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) -> i8
func.func @load_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>, %index : index) -> i16 {
// CHECK: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[UPDATE:.+]] = spirv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
- // CHECK: %[[FLAT_IDX:.+]] = spirv.IAdd %[[ZERO]], %[[UPDATE]] : i32
// CHECK: %[[TWO:.+]] = spirv.Constant 2 : i32
- // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[FLAT_IDX]], %[[TWO]] : i32
+ // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ARG1_CAST]], %[[TWO]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
// CHECK: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]]
// CHECK: %[[SIXTEEN:.+]] = spirv.Constant 16 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[FLAT_IDX]], %[[TWO]] : i32
+ // CHECK: %[[IDX:.+]] = spirv.UMod %[[ARG1_CAST]], %[[TWO]] : i32
// CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[SIXTEEN]] : i32
// CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Constant 65535 : i32
@@ -110,20 +89,12 @@ func.func @load_f32(%arg0: memref<f32, #spirv.storage_class<StorageBuffer>>) {
func.func @store_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>, %value: i1) {
// CHECK: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
- // CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
- // CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+ // CHECK: %[[MASK:.+]] = spirv.Constant -256 : i32
// CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
// CHECK: %[[CASTED_ARG1:.+]] = spirv.Select %[[ARG1]], %[[ONE]], %[[ZERO]] : i1, i32
- // CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CASTED_ARG1]], %[[OFFSET]] : i32, i32
- // CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ZERO]]]
// CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[CASTED_ARG1]]
memref.store %value, %arg0[] : memref<i1, #spirv.storage_class<StorageBuffer>>
return
}
@@ -136,36 +107,22 @@ func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %val
// CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
// CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
// CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
- // CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+ // CHECK: %[[MASK2:.+]] = spirv.Constant -256 : i32
// CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
- // CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
- // CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ZERO]]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK2]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[CLAMPED_VAL]]
// INDEX64-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
// INDEX64-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// INDEX64: %[[ZERO:.+]] = spirv.Constant 0 : i64
- // INDEX64: %[[FOUR:.+]] = spirv.Constant 4 : i64
- // INDEX64: %[[EIGHT:.+]] = spirv.Constant 8 : i64
- // INDEX64: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i64
// INDEX64: %[[MASK1:.+]] = spirv.Constant 255 : i32
- // INDEX64: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i64
- // INDEX64: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+ // INDEX64: %[[MASK2:.+]] = spirv.Constant -256 : i32
// INDEX64: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
- // INDEX64: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i64
- // INDEX64: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]] : {{.+}}, i64, i64
- // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
- // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
+ // INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ZERO]]] : {{.+}}, i64, i64
+ // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK2]]
+ // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[CLAMPED_VAL]]
memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
return
}
@@ -177,19 +134,16 @@ func.func @store_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>,
// CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[UPDATE:.+]] = spirv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
- // CHECK: %[[FLAT_IDX:.+]] = spirv.IAdd %[[ZERO]], %[[UPDATE]] : i32
// CHECK: %[[TWO:.+]] = spirv.Constant 2 : i32
// CHECK: %[[SIXTEEN:.+]] = spirv.Constant 16 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[FLAT_IDX]], %[[TWO]] : i32
+ // CHECK: %[[IDX:.+]] = spirv.UMod %[[ARG1_CAST]], %[[TWO]] : i32
// CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[SIXTEEN]] : i32
// CHECK: %[[MASK1:.+]] = spirv.Constant 65535 : i32
// CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
// CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG2_CAST]], %[[MASK1]] : i32
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
- // CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[FLAT_IDX]], %[[TWO]] : i32
+ // CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ARG1_CAST]], %[[TWO]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
// CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
// CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
@@ -222,15 +176,12 @@ module attributes {
func.func @load_i4(%arg0: memref<?xi4, #spirv.storage_class<StorageBuffer>>, %i: index) -> i4 {
// CHECK: %[[INDEX:.+]] = builtin.unrealized_conversion_cast %{{.+}} : index to i32
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[INDEX]] : i32
- // CHECK: %[[OFFSET:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
// CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[OFFSET]], %[[EIGHT]] : i32
+ // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[INDEX]], %[[EIGHT]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
// CHECK: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]] : i32
// CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[OFFSET]], %[[EIGHT]] : i32
+ // CHECK: %[[IDX:.+]] = spirv.UMod %[[INDEX]], %[[EIGHT]] : i32
// CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[FOUR]] : i32
// CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Constant 15 : i32
@@ -248,19 +199,16 @@ func.func @store_i4(%arg0: memref<?xi4, #spirv.storage_class<StorageBuffer>>, %v
// CHECK: %[[VAL:.+]] = builtin.unrealized_conversion_cast %{{.+}} : i4 to i32
// CHECK: %[[INDEX:.+]] = builtin.unrealized_conversion_cast %{{.+}} : index to i32
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[INDEX]] : i32
- // CHECK: %[[OFFSET:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
// CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant [[OFFSET]] : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[OFFSET]], %[[EIGHT]] : i32
+ // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
+ // CHECK: %[[IDX:.+]] = spirv.UMod %[[INDEX]], %[[EIGHT]] : i32
// CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[FOUR]] : i32
// CHECK: %[[MASK1:.+]] = spirv.Constant 15 : i32
// CHECK: %[[SL:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[BITS]] : i32, i32
// CHECK: %[[MASK2:.+]] = spirv.Not %[[SL]] : i32
// CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[VAL]], %[[MASK1]] : i32
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[BITS]] : i32, i32
- // CHECK: %[[ACCESS_INDEX:.+]] = spirv.SDiv %[[OFFSET]], %[[EIGHT]] : i32
+ // CHECK: %[[ACCESS_INDEX:.+]] = spirv.SDiv %[[INDEX]], %[[EIGHT]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ACCESS_INDEX]]]
// CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK2]]
// CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
@@ -283,16 +231,10 @@ module attributes {
// INDEX64-LABEL: @load_i8
func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) -> i8 {
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ZERO]]]
// CHECK: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]]
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
- // CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
// CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+ // CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[LOAD]], %[[MASK]] : i32
// CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
// CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
// CHECK: %[[SR:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
@@ -300,16 +242,10 @@ func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) -> i8
// CHECK: return %[[CAST]] : i8
// INDEX64: %[[ZERO:.+]] = spirv.Constant 0 : i64
- // INDEX64: %[[FOUR:.+]] = spirv.Constant 4 : i64
- // INDEX64: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]] : {{.+}}, i64, i64
+ // INDEX64: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ZERO]]] : {{.+}}, i64, i64
// INDEX64: %[[LOAD:.+]] = spirv.Load "StorageBuffer" %[[PTR]] : i32
- // INDEX64: %[[EIGHT:.+]] = spirv.Constant 8 : i64
- // INDEX64: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i64
- // INDEX64: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i64
// INDEX64: %[[MASK:.+]] = spirv.Constant 255 : i32
- // INDEX64: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+ // INDEX64: %[[T1:.+]] = spirv.BitwiseAnd %[[LOAD]], %[[MASK]] : i32
// INDEX64: %[[T2:.+]] = spirv.Constant 24 : i32
// INDEX64: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
// INDEX64: %[[SR:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
@@ -326,37 +262,19 @@ func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) -> i8
func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %value: i8) {
// CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
- // CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
- // CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
- // CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
- // CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
- // CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+ // CHECK: %[[MASK1:.+]] = spirv.Constant -256 : i32
// CHECK: %[[ARG1_CAST:.+]] = spirv.UConvert %[[ARG1]] : i8 to i32
- // CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
- // CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
- // CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
- // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
+ // CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ZERO]]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK1]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[ARG1_CAST]]
// INDEX64-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// INDEX64: %[[ZERO:.+]] = spirv.Constant 0 : i64
- // INDEX64: %[[FOUR:.+]] = spirv.Constant 4 : i64
- // INDEX64: %[[EIGHT:.+]] = spirv.Constant 8 : i64
- // INDEX64: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i64
- // INDEX64: %[[MASK1:.+]] = spirv.Constant 255 : i32
- // INDEX64: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i64
- // INDEX64: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+ // INDEX64: %[[MASK1:.+]] = spirv.Constant -256 : i32
// INDEX64: %[[ARG1_CAST:.+]] = spirv.UConvert %[[ARG1]] : i8 to i32
- // INDEX64: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
- // INDEX64: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i64
- // INDEX64: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
- // INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]] : {{.+}}, i64, i64
- // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
- // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
+ // INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ZERO]]] : {{.+}}, i64, i64
+ // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK1]]
+ // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[ARG1_CAST]]
memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
return
}
diff --git a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
index feb6d4e92401..10c03a270005 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
@@ -70,11 +70,8 @@ func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spirv.stora
func.func @load_i1(%src: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i : index) -> i1 {
// CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x i8, stride=1> [0])>, StorageBuffer>
// CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
- // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
- // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
- // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[SRC_CAST]][%[[ZERO]], %[[ADD]]]
+ // CHECK: %[[ZERO:.*]] = spirv.Constant 0 : i32
+ // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[SRC_CAST]][%[[ZERO]], %[[IDX_CAST]]]
// CHECK: %[[VAL:.+]] = spirv.Load "StorageBuffer" %[[ADDR]] : i8
// CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
// CHECK: %[[BOOL:.+]] = spirv.IEqual %[[VAL]], %[[ONE_I8]] : i8
@@ -90,15 +87,10 @@ func.func @store_i1(%dst: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i:
%true = arith.constant true
// CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x i8, stride=1> [0])>, StorageBuffer>
// CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
- // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
- // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
- // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[DST_CAST]][%[[ZERO]], %[[ADD]]]
- // CHECK: %[[ZERO_I8:.+]] = spirv.Constant 0 : i8
+ // CHECK: %[[ZERO:.*]] = spirv.Constant 0 : i32
+ // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[DST_CAST]][%[[ZERO]], %[[IDX_CAST]]]
// CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
- // CHECK: %[[RES:.+]] = spirv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
- // CHECK: spirv.Store "StorageBuffer" %[[ADDR]], %[[RES]] : i8
+ // CHECK: spirv.Store "StorageBuffer" %[[ADDR]], %[[ONE_I8]] : i8
memref.store %true, %dst[%i]: memref<4xi1, #spirv.storage_class<StorageBuffer>>
return
}
@@ -234,11 +226,7 @@ func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spirv.stora
func.func @load_i1(%src: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>, %i : index) -> i1 {
// CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<!spirv.array<4 x i8>, CrossWorkgroup>
// CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
- // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
- // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
- // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[SRC_CAST]][%[[ADD]]]
+ // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[SRC_CAST]][%[[IDX_CAST]]]
// CHECK: %[[VAL:.+]] = spirv.Load "CrossWorkgroup" %[[ADDR]] : i8
// CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
// CHECK: %[[BOOL:.+]] = spirv.IEqual %[[VAL]], %[[ONE_I8]] : i8
@@ -254,15 +242,9 @@ func.func @store_i1(%dst: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>, %i
%true = arith.constant true
// CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<!spirv.array<4 x i8>, CrossWorkgroup>
// CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
- // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
- // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
- // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO]], %[[MUL]] : i32
- // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[DST_CAST]][%[[ADD]]]
- // CHECK: %[[ZERO_I8:.+]] = spirv.Constant 0 : i8
+ // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[DST_CAST]][%[[IDX_CAST]]]
// CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
- // CHECK: %[[RES:.+]] = spirv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
- // CHECK: spirv.Store "CrossWorkgroup" %[[ADDR]], %[[RES]] : i8
+ // CHECK: spirv.Store "CrossWorkgroup" %[[ADDR]], %[[ONE_I8]] : i8
memref.store %true, %dst[%i]: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>
return
}
diff --git a/mlir/test/Conversion/SCFToSPIRV/for.mlir b/mlir/test/Conversion/SCFToSPIRV/for.mlir
index 02558463b866..81661ec7a3a0 100644
--- a/mlir/test/Conversion/SCFToSPIRV/for.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/for.mlir
@@ -19,17 +19,9 @@ func.func @loop_kernel(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer
// CHECK: spirv.BranchConditional %[[CMP]], ^[[BODY:.*]], ^[[MERGE:.*]]
// CHECK: ^[[BODY]]:
// CHECK: %[[ZERO1:.*]] = spirv.Constant 0 : i32
- // CHECK: %[[OFFSET1:.*]] = spirv.Constant 0 : i32
- // CHECK: %[[STRIDE1:.*]] = spirv.Constant 1 : i32
- // CHECK: %[[UPDATE1:.*]] = spirv.IMul %[[STRIDE1]], %[[INDVAR]] : i32
- // CHECK: %[[INDEX1:.*]] = spirv.IAdd %[[OFFSET1]], %[[UPDATE1]] : i32
- // CHECK: spirv.AccessChain {{%.*}}{{\[}}%[[ZERO1]], %[[INDEX1]]{{\]}}
+ // CHECK: spirv.AccessChain {{%.*}}{{\[}}%[[ZERO1]], %[[INDVAR]]{{\]}}
// CHECK: %[[ZERO2:.*]] = spirv.Constant 0 : i32
- // CHECK: %[[OFFSET2:.*]] = spirv.Constant 0 : i32
- // CHECK: %[[STRIDE2:.*]] = spirv.Constant 1 : i32
- // CHECK: %[[UPDATE2:.*]] = spirv.IMul %[[STRIDE2]], %[[INDVAR]] : i32
- // CHECK: %[[INDEX2:.*]] = spirv.IAdd %[[OFFSET2]], %[[UPDATE2]] : i32
- // CHECK: spirv.AccessChain {{%.*}}[%[[ZERO2]], %[[INDEX2]]]
+ // CHECK: spirv.AccessChain {{%.*}}[%[[ZERO2]], %[[INDVAR]]]
// CHECK: %[[INCREMENT:.*]] = spirv.IAdd %[[INDVAR]], %[[STEP]] : i32
// CHECK: spirv.Branch ^[[HEADER]](%[[INCREMENT]] : i32)
// CHECK: ^[[MERGE]]
diff --git a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
index 19de613bf5b0..32d0fbea65b1 100644
--- a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
@@ -14,14 +14,12 @@ func.func @tensor_extract_constant(%a : index, %b: index, %c: index) -> i32 {
// CHECK: spirv.Store "Function" %[[VAR]], %[[CST]] : !spirv.array<12 x i32>
// CHECK: %[[C0:.+]] = spirv.Constant 0 : i32
// CHECK: %[[C6:.+]] = spirv.Constant 6 : i32
- // CHECK: %[[MUL0:.+]] = spirv.IMul %[[C6]], %[[A]] : i32
- // CHECK: %[[ADD0:.+]] = spirv.IAdd %[[C0]], %[[MUL0]] : i32
+ // CHECK: %[[MUL0:.+]] = spirv.IMul %[[A]], %[[C6]] : i32
// CHECK: %[[C3:.+]] = spirv.Constant 3 : i32
- // CHECK: %[[MUL1:.+]] = spirv.IMul %[[C3]], %[[B]] : i32
- // CHECK: %[[ADD1:.+]] = spirv.IAdd %[[ADD0]], %[[MUL1]] : i32
+ // CHECK: %[[MUL1:.+]] = spirv.IMul %[[B]], %[[C3]] : i32
+ // CHECK: %[[ADD1:.+]] = spirv.IAdd %[[MUL1]], %[[MUL0]] : i32
// CHECK: %[[C1:.+]] = spirv.Constant 1 : i32
- // CHECK: %[[MUL2:.+]] = spirv.IMul %[[C1]], %[[C]] : i32
- // CHECK: %[[ADD2:.+]] = spirv.IAdd %[[ADD1]], %[[MUL2]] : i32
+ // CHECK: %[[ADD2:.+]] = spirv.IAdd %[[C]], %[[ADD1]] : i32
// CHECK: %[[AC:.+]] = spirv.AccessChain %[[VAR]][%[[ADD2]]]
// CHECK: %[[VAL:.+]] = spirv.Load "Function" %[[AC]] : i32
%extract = tensor.extract %cst[%a, %b, %c] : tensor<2x2x3xi32>
diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
index daaa68a7260b..a8a3c42e1684 100644
--- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
+++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
@@ -1,95 +1,363 @@
// RUN: mlir-opt --split-input-file --tosa-to-tensor %s -o -| FileCheck %s
-// CHECK-LABEL: @test_reshape_downrank
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]
-func.func @test_reshape_downrank(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
- // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
- %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 6>} : (tensor<2x3xf32>) -> tensor<6xf32>
- // CHECK: return [[RESHAPE]]
- return %0 : tensor<6xf32>
+// -----
+
+// CHECK-LABEL: test_reshape_0d_same_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<f32>
+// CHECK: return %[[ARG_0]] : tensor<f32>
+func.func @test_reshape_0d_same_s2s_explicit(%arg0: tensor<f32>) -> tensor<f32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64>} : (tensor<f32>) -> tensor<f32>
+ return %0 : tensor<f32>
}
// -----
-// CHECK-LABEL: @test_reshape_downrank_dyn
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]
-func.func @test_reshape_downrank_dyn(%arg0: tensor<2x?xf32>) -> tensor<?xf32> {
- // CHECK: [[RESHAPE:%.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
- %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<2x?xf32>) -> tensor<?xf32>
- // CHECK: return [[RESHAPE]]
+// CHECK-LABEL: test_reshape_0d_up_s2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<f32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] : tensor<f32> into tensor<1xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor<?xf32>
+// CHECK: return %[[VAL_1]] : tensor<?xf32>
+func.func @test_reshape_0d_up_s2d_auto(%arg0: tensor<f32>) -> tensor<?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<f32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
// -----
-// CHECK-LABEL: @test_reshape_uprank
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]
-func.func @test_reshape_uprank(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
- // CHECK: [[RESHAPE:%.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1]]
- %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3>} : (tensor<6xf32>) -> tensor<2x3xf32>
- // CHECK: return [[RESHAPE]]
- return %0 : tensor<2x3xf32>
+// CHECK-LABEL: test_reshape_0d_up_s2d_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<f32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] : tensor<f32> into tensor<1xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.cast %[[VAL_0]] : tensor<1xf32> to tensor<?xf32>
+// CHECK: return %[[VAL_1]] : tensor<?xf32>
+func.func @test_reshape_0d_up_s2d_explicit(%arg0: tensor<f32>) -> tensor<?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 1>} : (tensor<f32>) -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_0d_up_s2s_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<f32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] : tensor<f32> into tensor<1xf32>
+// CHECK: return %[[VAL_0]] : tensor<1xf32>
+func.func @test_reshape_0d_up_s2s_auto(%arg0: tensor<f32>) -> tensor<1xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<f32>) -> tensor<1xf32>
+ return %0 : tensor<1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_0d_up_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<f32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] [] : tensor<f32> into tensor<1xf32>
+// CHECK: return %[[VAL_0]] : tensor<1xf32>
+func.func @test_reshape_0d_up_s2s_explicit(%arg0: tensor<f32>) -> tensor<1xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 1>} : (tensor<f32>) -> tensor<1xf32>
+ return %0 : tensor<1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_1d_down_d2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.cast %[[ARG_0]] : tensor<?xf32> to tensor<1xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1xf32> into tensor<f32>
+// CHECK: return %[[VAL_1]] : tensor<f32>
+func.func @test_reshape_1d_down_d2s_explicit(%arg0: tensor<?xf32>) -> tensor<f32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64>} : (tensor<?xf32>) -> tensor<f32>
+ return %0 : tensor<f32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_1d_down_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<1xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] [] : tensor<1xf32> into tensor<f32>
+// CHECK: return %[[VAL_0]] : tensor<f32>
+func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor<f32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64>} : (tensor<1xf32>) -> tensor<f32>
+ return %0 : tensor<f32>
}
// -----
-// CHECK-LABEL: @test_reshape_uprank_dyn
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]
-func.func @test_reshape_uprank_dyn(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
- // CHECK: [[RESHAPE:%.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1]]
+// CHECK-LABEL: test_reshape_1d_up_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<?xf32> into tensor<2x?xf32>
+// CHECK: return %[[VAL_0]] : tensor<2x?xf32>
+func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
%0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<?xf32>) -> tensor<2x?xf32>
- // CHECK: return [[RESHAPE]]
return %0 : tensor<2x?xf32>
}
// -----
-// CHECK-LABEL: @test_reshape_samerank
-// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x2xf32>)
-func.func @test_reshape_samerank(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
- // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
- // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]]
- %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3>} : (tensor<3x2xf32>) -> tensor<2x3xf32>
- // CHECK-NEXT: return %[[RESHAPE2]]
+// CHECK-LABEL: test_reshape_1d_up_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<6xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<6xf32> into tensor<2x3xf32>
+// CHECK: return %[[VAL_0]] : tensor<2x3xf32>
+func.func @test_reshape_1d_up_s2s_explicit(%arg0: tensor<6xf32>) -> tensor<2x3xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3>} : (tensor<6xf32>) -> tensor<2x3xf32>
return %0 : tensor<2x3xf32>
}
// -----
-// CHECK-LABEL: @test_reshape_samerank_dyn
-// CHECK-SAME: (%[[ARG0:.*]]: tensor<?x2xf32>)
-func.func @test_reshape_samerank_dyn(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
- // CHECK-NEXT: %[[RESHAPE1:.*]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]]
- // CHECK-NEXT: %[[RESHAPE2:.*]] = tensor.expand_shape %[[RESHAPE1]] {{\[}}[0, 1]]
+// CHECK-LABEL: test_reshape_2d_down_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x?xf32> into tensor<?xf32>
+// CHECK: return %[[VAL_0]] : tensor<?xf32>
+func.func @test_reshape_2d_down_d2d_auto(%arg0: tensor<2x?xf32>) -> tensor<?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1>} : (tensor<2x?xf32>) -> tensor<?xf32>
+ return %0 : tensor<?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_2d_down_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x3xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x3xf32> into tensor<6xf32>
+// CHECK: return %[[VAL_0]] : tensor<6xf32>
+func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 6>} : (tensor<2x3xf32>) -> tensor<6xf32>
+ return %0 : tensor<6xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_2d_same_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x2xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<?x2xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] : tensor<?xf32> into tensor<2x?xf32>
+// CHECK: return %[[VAL_1]] : tensor<2x?xf32>
+func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
%0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1>} : (tensor<?x2xf32>) -> tensor<2x?xf32>
- // CHECK-NEXT: return %[[RESHAPE2]]
return %0 : tensor<2x?xf32>
}
// -----
-// CHECK-LABEL: @test_reshape_downrank_6D
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]:
-func.func @test_reshape_downrank_6D(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
- // CHECK: tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2], [3], [4, 5]]
- %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 6, 5, 77>} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32>
- return %0 : tensor<6x5x77xf32>
+// CHECK-LABEL: test_reshape_2d_same_s2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x4xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x4xf32> into tensor<8xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] : tensor<8xf32> into tensor<4x2xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<4x2xf32> to tensor<?x2xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x2xf32>
+func.func @test_reshape_2d_same_s2d_auto(%arg0: tensor<2x4xf32>) -> tensor<?x2xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1, 2>} : (tensor<2x4xf32>) -> tensor<?x2xf32>
+ return %0 : tensor<?x2xf32>
}
// -----
-// CHECK-LABEL: @test_reshape_downrank_6D_dyn
-// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]:
-func.func @test_reshape_downrank_6D_dyn(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {
- // CHECK: tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1, 2, 3, 4, 5]]
- // CHECK: tensor.expand_shape %{{.*}} {{\[}}[0, 1, 2]]
+// CHECK-LABEL: test_reshape_2d_same_s2d_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x4xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<2x4xf32> into tensor<8xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] : tensor<8xf32> into tensor<4x2xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<4x2xf32> to tensor<?x2xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x2xf32>
+func.func @test_reshape_2d_same_s2d_explicit(%arg0: tensor<2x4xf32>) -> tensor<?x2xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 4, 2>} : (tensor<2x4xf32>) -> tensor<?x2xf32>
+ return %0 : tensor<?x2xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_2d_same_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<3x2xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1]] : tensor<3x2xf32> into tensor<6xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] : tensor<6xf32> into tensor<2x3xf32>
+// CHECK: return %[[VAL_1]] : tensor<2x3xf32>
+func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2x3xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3>} : (tensor<3x2xf32>) -> tensor<2x3xf32>
+ return %0 : tensor<2x3xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_auto_empty
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<3x2x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<3x2x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<0x3x?xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor<?x?x?xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
+func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 0, 3, -1>} : (tensor<3x2x?xf32>) -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<2x?x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<2x?x4xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor<?x?x?xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
+func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1, 4>} : (tensor<2x?x?xf32>) -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_auto_identity
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x3x4xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<?x3x4xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<2x3x?xf32>
+// CHECK: return %[[VAL_1]] : tensor<2x3x?xf32>
+func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> tensor<2x3x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3, -1>} : (tensor<?x3x4xf32>) -> tensor<2x3x?xf32>
+ return %0 : tensor<2x3x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_explicit_empty
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<3x2x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<3x2x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<?x3x2xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<?x3x2xf32> to tensor<?x?x?xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
+func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 0, 3, 2>} : (tensor<3x2x?xf32>) -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<?x?x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<?x3x4xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<?x3x4xf32> to tensor<?x?x?xf32>
+// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
+func.func @test_reshape_3d_same_d2d_explicit(%arg0: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3, 4>} : (tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2d_explicit_identity
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x3x4xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.cast %[[ARG_0]] : tensor<?x3x4xf32> to tensor<2x3x?xf32>
+// CHECK: return %[[VAL_0]] : tensor<2x3x?xf32>
+func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor<?x3x4xf32>) -> tensor<2x3x?xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3, 4>} : (tensor<?x3x4xf32>) -> tensor<2x3x?xf32>
+ return %0 : tensor<2x3x?xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2s_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<?x?x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<2x?x4xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor<2x3x4xf32>
+// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
+func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, -1, 4>} : (tensor<?x?x?xf32>) -> tensor<2x3x4xf32>
+ return %0 : tensor<2x3x4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_d2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<?x?x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<?x3x4xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<?x3x4xf32> to tensor<2x3x4xf32>
+// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
+func.func @test_reshape_3d_same_d2s_explicit(%arg0: tensor<?x?x?xf32>) -> tensor<2x3x4xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3, 4>} : (tensor<?x?x?xf32>) -> tensor<2x3x4xf32>
+ return %0 : tensor<2x3x4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_same_s2s_explicit_identity
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<2x3x4xf32>
+// CHECK: return %[[ARG_0]] : tensor<2x3x4xf32>
+func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 2, 3, 4>} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf32>
+ return %0 : tensor<2x3x4xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_3d_up_d2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2]] : tensor<?x?x?xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2, 3]] : tensor<?xf32> into tensor<?x3x2x1xf32>
+// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<?x3x2x1xf32> to tensor<1x3x2x1xf32>
+// CHECK: return %[[VAL_2]] : tensor<1x3x2x1xf32>
+func.func @test_reshape_3d_up_d2s_explicit(%input: tensor<?x?x?xf32>) -> tensor<1x3x2x1xf32> {
+ %0 = tosa.reshape %input {new_shape = array<i64: 1, 3, 2, 1>} : (tensor<?x?x?xf32>) -> tensor<1x3x2x1xf32>
+ return %0 : tensor<1x3x2x1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_4d_down_d2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?x?xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.cast %[[ARG_0]] : tensor<?x?x?x?xf32> to tensor<1x1x1x1xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.collapse_shape %[[VAL_0]] [] : tensor<1x1x1x1xf32> into tensor<f32>
+// CHECK: return %[[VAL_1]] : tensor<f32>
+func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor<?x?x?x?xf32>) -> tensor<f32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64>} : (tensor<?x?x?x?xf32>) -> tensor<f32>
+ return %0 : tensor<f32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_5d_down_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<?x?x?x2x3xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2, 3, 4]] : tensor<?x?x?x2x3xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<?x2x3xf32>
+// CHECK: return %[[VAL_1]] : tensor<?x2x3xf32>
+func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor<?x2x3xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1, 2, 3>} : (tensor<?x?x?x2x3xf32>) -> tensor<?x2x3xf32>
+ return %0 : tensor<?x2x3xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_6d_down_d2d_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<1x2x?x5x7x11xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2, 3, 4, 5]] : tensor<1x2x?x5x7x11xf32> into tensor<?xf32>
+// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] : tensor<?xf32> into tensor<?x5x77xf32>
+// CHECK: return %[[VAL_1]] : tensor<?x5x77xf32>
+func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {
%0 = "tosa.reshape"(%arg0) {new_shape = array<i64: -1, 5, 77>} : (tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32>
return %0 : tensor<?x5x77xf32>
}
// -----
-// CHECK-LABLE: func @slice
+// CHECK-LABEL: test_reshape_6d_down_s2s_auto
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<1x2x3x5x7x11xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32>
+// CHECK: return %[[VAL_0]] : tensor<6x5x77xf32>
+func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 6, 5, -1>} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32>
+ return %0 : tensor<6x5x77xf32>
+}
+
+// -----
+
+// CHECK-LABEL: test_reshape_6d_down_s2s_explicit
+// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<1x2x3x5x7x11xf32>
+// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32>
+// CHECK: return %[[VAL_0]] : tensor<6x5x77xf32>
+func.func @test_reshape_6d_down_s2s_explicit(%arg0: tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32> {
+ %0 = "tosa.reshape"(%arg0) {new_shape = array<i64: 6, 5, 77>} : (tensor<1x2x3x5x7x11xf32>) -> tensor<6x5x77xf32>
+ return %0 : tensor<6x5x77xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @slice
func.func @slice(%arg0: tensor<6xf32>) ->() {
// CHECK: [[SLICE:%.+]] = tensor.extract_slice %arg0[2] [1] [1]
%0 = "tosa.slice"(%arg0) {start = array<i64: 2>, size = array<i64: 1>} : (tensor<6xf32>) -> (tensor<1xf32>)
diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
index c9984091d5ac..cddc4ee38535 100644
--- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
+++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
@@ -720,9 +720,7 @@ module attributes {
// CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32
-// CHECK: %[[S2:.+]] = spirv.IMul %[[CST3]], %[[S1]] : i32
-// CHECK: %[[S3:.+]] = spirv.IAdd %[[CST2]], %[[S2]] : i32
-// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S3]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
+// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S1]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
// CHECK: %[[S5:.+]] = spirv.Bitcast %[[S4]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
// CHECK: %[[R0:.+]] = spirv.Load "StorageBuffer" %[[S5]] : vector<4xf32>
// CHECK: return %[[R0]] : vector<4xf32>
@@ -743,11 +741,9 @@ func.func @vector_load(%arg0 : memref<4xf32, #spirv.storage_class<StorageBuffer>
// CHECK: %[[CST0_1:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST0_2:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST4:.+]] = spirv.Constant 4 : i32
-// CHECK: %[[S3:.+]] = spirv.IMul %[[CST4]], %[[S1]] : i32
-// CHECK: %[[S4:.+]] = spirv.IAdd %[[CST0_2]], %[[S3]] : i32
+// CHECK: %[[S3:.+]] = spirv.IMul %[[S1]], %[[CST4]] : i32
// CHECK: %[[CST1:.+]] = spirv.Constant 1 : i32
-// CHECK: %[[S5:.+]] = spirv.IMul %[[CST1]], %[[S2]] : i32
-// CHECK: %[[S6:.+]] = spirv.IAdd %[[S4]], %[[S5]] : i32
+// CHECK: %[[S6:.+]] = spirv.IAdd %[[S2]], %[[S3]] : i32
// CHECK: %[[S7:.+]] = spirv.AccessChain %[[S0]][%[[CST0_1]], %[[S6]]] : !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
// CHECK: %[[S8:.+]] = spirv.Bitcast %[[S7]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
// CHECK: %[[R0:.+]] = spirv.Load "StorageBuffer" %[[S8]] : vector<4xf32>
@@ -768,9 +764,7 @@ func.func @vector_load_2d(%arg0 : memref<4x4xf32, #spirv.storage_class<StorageBu
// CHECK: %[[CST1:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST2:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST3:.+]] = spirv.Constant 1 : i32
-// CHECK: %[[S2:.+]] = spirv.IMul %[[CST3]], %[[S1]] : i32
-// CHECK: %[[S3:.+]] = spirv.IAdd %[[CST2]], %[[S2]] : i32
-// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S3]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
+// CHECK: %[[S4:.+]] = spirv.AccessChain %[[S0]][%[[CST1]], %[[S1]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
// CHECK: %[[S5:.+]] = spirv.Bitcast %[[S4]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
// CHECK: spirv.Store "StorageBuffer" %[[S5]], %[[ARG1]] : vector<4xf32>
func.func @vector_store(%arg0 : memref<4xf32, #spirv.storage_class<StorageBuffer>>, %arg1 : vector<4xf32>) {
@@ -790,11 +784,9 @@ func.func @vector_store(%arg0 : memref<4xf32, #spirv.storage_class<StorageBuffer
// CHECK: %[[CST0_1:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST0_2:.+]] = spirv.Constant 0 : i32
// CHECK: %[[CST4:.+]] = spirv.Constant 4 : i32
-// CHECK: %[[S3:.+]] = spirv.IMul %[[CST4]], %[[S1]] : i32
-// CHECK: %[[S4:.+]] = spirv.IAdd %[[CST0_2]], %[[S3]] : i32
+// CHECK: %[[S3:.+]] = spirv.IMul %[[S1]], %[[CST4]] : i32
// CHECK: %[[CST1:.+]] = spirv.Constant 1 : i32
-// CHECK: %[[S5:.+]] = spirv.IMul %[[CST1]], %[[S2]] : i32
-// CHECK: %[[S6:.+]] = spirv.IAdd %[[S4]], %[[S5]] : i32
+// CHECK: %[[S6:.+]] = spirv.IAdd %[[S2]], %[[S3]] : i32
// CHECK: %[[S7:.+]] = spirv.AccessChain %[[S0]][%[[CST0_1]], %[[S6]]] : !spirv.ptr<!spirv.struct<(!spirv.array<16 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
// CHECK: %[[S8:.+]] = spirv.Bitcast %[[S7]] : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<vector<4xf32>, StorageBuffer>
// CHECK: spirv.Store "StorageBuffer" %[[S8]], %[[ARG1]] : vector<4xf32>
diff --git a/mlir/test/Dialect/Arith/expand-ops.mlir b/mlir/test/Dialect/Arith/expand-ops.mlir
index 91f652e5a270..6bed93e4c969 100644
--- a/mlir/test/Dialect/Arith/expand-ops.mlir
+++ b/mlir/test/Dialect/Arith/expand-ops.mlir
@@ -66,23 +66,17 @@ func.func @ceildivi_index(%arg0: index, %arg1: index) -> (index) {
func.func @floordivi(%arg0: i32, %arg1: i32) -> (i32) {
%res = arith.floordivsi %arg0, %arg1 : i32
return %res : i32
-// CHECK: [[ONE:%.+]] = arith.constant 1 : i32
-// CHECK: [[ZERO:%.+]] = arith.constant 0 : i32
-// CHECK: [[MIN1:%.+]] = arith.constant -1 : i32
-// CHECK: [[CMP1:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : i32
-// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[ONE]], [[MIN1]] : i32
-// CHECK: [[TRUE1:%.+]] = arith.subi [[X]], [[ARG0]] : i32
-// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : i32
-// CHECK: [[TRUE3:%.+]] = arith.subi [[MIN1]], [[TRUE2]] : i32
-// CHECK: [[FALSE:%.+]] = arith.divsi [[ARG0]], [[ARG1]] : i32
-// CHECK: [[NNEG:%.+]] = arith.cmpi slt, [[ARG0]], [[ZERO]] : i32
-// CHECK: [[NPOS:%.+]] = arith.cmpi sgt, [[ARG0]], [[ZERO]] : i32
-// CHECK: [[MNEG:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : i32
-// CHECK: [[MPOS:%.+]] = arith.cmpi sgt, [[ARG1]], [[ZERO]] : i32
-// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MPOS]] : i1
-// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MNEG]] : i1
-// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE]] : i32
+// CHECK: %[[QUOTIENT:.*]] = arith.divsi %arg0, %arg1 : i32
+// CHECK: %[[PRODUCT:.*]] = arith.muli %[[QUOTIENT]], %arg1 : i32
+// CHECK: %[[NOT_EQ_PRODUCT:.*]] = arith.cmpi ne, %arg0, %[[PRODUCT]] : i32
+// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0 : i32
+// CHECK: %[[NEG_DIVISOR:.*]] = arith.cmpi slt, %arg0, %[[ZERO]] : i32
+// CHECK: %[[NEG_DIVIDEND:.*]] = arith.cmpi slt, %arg1, %[[ZERO]] : i32
+// CHECK: %[[OPPOSITE_SIGN:.*]] = arith.cmpi ne, %[[NEG_DIVISOR]], %[[NEG_DIVIDEND]] : i1
+// CHECK: %[[CONDITION:.*]] = arith.andi %[[NOT_EQ_PRODUCT]], %[[OPPOSITE_SIGN]] : i1
+// CHECK-DAG: %[[NEG_ONE:.*]] = arith.constant -1 : i32
+// CHECK: %[[MINUS_ONE:.*]] = arith.addi %[[QUOTIENT]], %[[NEG_ONE]] : i32
+// CHECK: %[[RES:.*]] = arith.select %[[CONDITION]], %[[MINUS_ONE]], %[[QUOTIENT]] : i32
}
// -----
@@ -93,23 +87,17 @@ func.func @floordivi(%arg0: i32, %arg1: i32) -> (i32) {
func.func @floordivi_index(%arg0: index, %arg1: index) -> (index) {
%res = arith.floordivsi %arg0, %arg1 : index
return %res : index
-// CHECK: [[ONE:%.+]] = arith.constant 1 : index
-// CHECK: [[ZERO:%.+]] = arith.constant 0 : index
-// CHECK: [[MIN1:%.+]] = arith.constant -1 : index
-// CHECK: [[CMP1:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : index
-// CHECK: [[X:%.+]] = arith.select [[CMP1]], [[ONE]], [[MIN1]] : index
-// CHECK: [[TRUE1:%.+]] = arith.subi [[X]], [[ARG0]] : index
-// CHECK: [[TRUE2:%.+]] = arith.divsi [[TRUE1]], [[ARG1]] : index
-// CHECK: [[TRUE3:%.+]] = arith.subi [[MIN1]], [[TRUE2]] : index
-// CHECK: [[FALSE:%.+]] = arith.divsi [[ARG0]], [[ARG1]] : index
-// CHECK: [[NNEG:%.+]] = arith.cmpi slt, [[ARG0]], [[ZERO]] : index
-// CHECK: [[NPOS:%.+]] = arith.cmpi sgt, [[ARG0]], [[ZERO]] : index
-// CHECK: [[MNEG:%.+]] = arith.cmpi slt, [[ARG1]], [[ZERO]] : index
-// CHECK: [[MPOS:%.+]] = arith.cmpi sgt, [[ARG1]], [[ZERO]] : index
-// CHECK: [[TERM1:%.+]] = arith.andi [[NNEG]], [[MPOS]] : i1
-// CHECK: [[TERM2:%.+]] = arith.andi [[NPOS]], [[MNEG]] : i1
-// CHECK: [[CMP2:%.+]] = arith.ori [[TERM1]], [[TERM2]] : i1
-// CHECK: [[RES:%.+]] = arith.select [[CMP2]], [[TRUE3]], [[FALSE]] : index
+// CHECK: %[[QUOTIENT:.*]] = arith.divsi %arg0, %arg1 : index
+// CHECK: %[[PRODUCT:.*]] = arith.muli %[[QUOTIENT]], %arg1 : index
+// CHECK: %[[NOT_EQ_PRODUCT:.*]] = arith.cmpi ne, %arg0, %[[PRODUCT]] : index
+// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0 : index
+// CHECK: %[[NEG_DIVISOR:.*]] = arith.cmpi slt, %arg0, %[[ZERO]] : index
+// CHECK: %[[NEG_DIVIDEND:.*]] = arith.cmpi slt, %arg1, %[[ZERO]] : index
+// CHECK: %[[OPPOSITE_SIGN:.*]] = arith.cmpi ne, %[[NEG_DIVISOR]], %[[NEG_DIVIDEND]] : i1
+// CHECK: %[[CONDITION:.*]] = arith.andi %[[NOT_EQ_PRODUCT]], %[[OPPOSITE_SIGN]] : i1
+// CHECK: %[[NEG_ONE:.*]] = arith.constant -1 : index
+// CHECK-DAG: %[[MINUS_ONE:.*]] = arith.addi %[[QUOTIENT]], %[[NEG_ONE]] : index
+// CHECK: %[[RES:.*]] = arith.select %[[CONDITION]], %[[MINUS_ONE]], %[[QUOTIENT]] : index
}
// -----
@@ -121,23 +109,17 @@ func.func @floordivi_index(%arg0: index, %arg1: index) -> (index) {
func.func @floordivi_vec(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> (vector<4xi32>) {
%res = arith.floordivsi %arg0, %arg1 : vector<4xi32>
return %res : vector<4xi32>
-// CHECK: %[[VAL_2:.*]] = arith.constant dense<1> : vector<4xi32>
-// CHECK: %[[VAL_3:.*]] = arith.constant dense<0> : vector<4xi32>
-// CHECK: %[[VAL_4:.*]] = arith.constant dense<-1> : vector<4xi32>
-// CHECK: %[[VAL_5:.*]] = arith.cmpi slt, %[[VAL_1]], %[[VAL_3]] : vector<4xi32>
-// CHECK: %[[VAL_6:.*]] = arith.select %[[VAL_5]], %[[VAL_2]], %[[VAL_4]] : vector<4xi1>, vector<4xi32>
-// CHECK: %[[VAL_7:.*]] = arith.subi %[[VAL_6]], %[[VAL_0]] : vector<4xi32>
-// CHECK: %[[VAL_8:.*]] = arith.divsi %[[VAL_7]], %[[VAL_1]] : vector<4xi32>
-// CHECK: %[[VAL_9:.*]] = arith.subi %[[VAL_4]], %[[VAL_8]] : vector<4xi32>
-// CHECK: %[[VAL_10:.*]] = arith.divsi %[[VAL_0]], %[[VAL_1]] : vector<4xi32>
-// CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_0]], %[[VAL_3]] : vector<4xi32>
-// CHECK: %[[VAL_12:.*]] = arith.cmpi sgt, %[[VAL_0]], %[[VAL_3]] : vector<4xi32>
-// CHECK: %[[VAL_13:.*]] = arith.cmpi slt, %[[VAL_1]], %[[VAL_3]] : vector<4xi32>
-// CHECK: %[[VAL_14:.*]] = arith.cmpi sgt, %[[VAL_1]], %[[VAL_3]] : vector<4xi32>
-// CHECK: %[[VAL_15:.*]] = arith.andi %[[VAL_11]], %[[VAL_14]] : vector<4xi1>
-// CHECK: %[[VAL_16:.*]] = arith.andi %[[VAL_12]], %[[VAL_13]] : vector<4xi1>
-// CHECK: %[[VAL_17:.*]] = arith.ori %[[VAL_15]], %[[VAL_16]] : vector<4xi1>
-// CHECK: %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_9]], %[[VAL_10]] : vector<4xi1>, vector<4xi32>
+// CHECK: %[[QUOTIENT:.*]] = arith.divsi %arg0, %arg1 : vector<4xi32>
+// CHECK: %[[PRODUCT:.*]] = arith.muli %[[QUOTIENT]], %arg1 : vector<4xi32>
+// CHECK: %[[NOT_EQ_PRODUCT:.*]] = arith.cmpi ne, %arg0, %[[PRODUCT]] : vector<4xi32>
+// CHECK-DAG: %[[ZERO:.*]] = arith.constant dense<0> : vector<4xi32>
+// CHECK: %[[NEG_DIVISOR:.*]] = arith.cmpi slt, %arg0, %[[ZERO]] : vector<4xi32>
+// CHECK: %[[NEG_DIVIDEND:.*]] = arith.cmpi slt, %arg1, %[[ZERO]] : vector<4xi32>
+// CHECK: %[[OPPOSITE_SIGN:.*]] = arith.cmpi ne, %[[NEG_DIVISOR]], %[[NEG_DIVIDEND]] : vector<4xi1>
+// CHECK: %[[CONDITION:.*]] = arith.andi %[[NOT_EQ_PRODUCT]], %[[OPPOSITE_SIGN]] : vector<4xi1>
+// CHECK-DAG: %[[NEG_ONE:.*]] = arith.constant dense<-1> : vector<4xi32>
+// CHECK: %[[MINUS_ONE:.*]] = arith.addi %[[QUOTIENT]], %[[NEG_ONE]] : vector<4xi32>
+// CHECK: %[[RES:.*]] = arith.select %[[CONDITION]], %[[MINUS_ONE]], %[[QUOTIENT]] : vector<4xi1>, vector<4xi32>
}
// -----
diff --git a/mlir/test/Dialect/Arith/one-shot-bufferize.mlir b/mlir/test/Dialect/Arith/one-shot-bufferize.mlir
index 174bf2fc8e4b..f6bdca7f4d9e 100644
--- a/mlir/test/Dialect/Arith/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Arith/one-shot-bufferize.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries" -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -one-shot-bufferize="unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file -o /dev/null
diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-loops.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-loops.mlir
new file mode 100644
index 000000000000..53b28c3aab6f
--- /dev/null
+++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-loops.mlir
@@ -0,0 +1,86 @@
+// RUN: mlir-opt %s -expand-realloc="emit-deallocs=false" -ownership-based-buffer-deallocation="private-function-dynamic-ownership=true" -canonicalize -buffer-deallocation-simplification | FileCheck %s
+
+// A function that reallocates two buffer inside of a loop. The simplification
+// pass should be able to figure out that the iter_args are always originating
+// from different allocations. IR like this one appears in the sparse compiler.
+
+// CHECK-LABEL: func private @loop_with_realloc(
+func.func private @loop_with_realloc(%lb: index, %ub: index, %step: index, %c: i1, %s1: index, %s2: index) -> (memref<?xf32>, memref<?xf32>) {
+ // CHECK-DAG: %[[false:.*]] = arith.constant false
+ // CHECK-DAG: %[[true:.*]] = arith.constant true
+
+ // CHECK: %[[m0:.*]] = memref.alloc
+ %m0 = memref.alloc(%s1) : memref<?xf32>
+ // CHECK: %[[m1:.*]] = memref.alloc
+ %m1 = memref.alloc(%s1) : memref<?xf32>
+
+ // CHECK: %[[r:.*]]:4 = scf.for {{.*}} iter_args(%[[arg0:.*]] = %[[m0]], %[[arg1:.*]] = %[[m1]], %[[o0:.*]] = %[[false]], %[[o1:.*]] = %[[false]])
+ %r0, %r1 = scf.for %iv = %lb to %ub step %step iter_args(%arg0 = %m0, %arg1 = %m1) -> (memref<?xf32>, memref<?xf32>) {
+ // CHECK: %[[m2:.*]]:2 = scf.if %{{.*}} -> (memref<?xf32>, i1) {
+ // CHECK-NEXT: memref.alloc
+ // CHECK-NEXT: memref.subview
+ // CHECK-NEXT: memref.copy
+ // CHECK-NEXT: scf.yield %{{.*}}, %[[true]]
+ // CHECK-NEXT: } else {
+ // CHECK-NEXT: memref.reinterpret_cast
+ // CHECK-NEXT: scf.yield %{{.*}}, %[[false]]
+ // CHECK-NEXT: }
+ %m2 = memref.realloc %arg0(%s2) : memref<?xf32> to memref<?xf32>
+ // CHECK: %[[m3:.*]]:2 = scf.if %{{.*}} -> (memref<?xf32>, i1) {
+ // CHECK-NEXT: memref.alloc
+ // CHECK-NEXT: memref.subview
+ // CHECK-NEXT: memref.copy
+ // CHECK-NEXT: scf.yield %{{.*}}, %[[true]]
+ // CHECK-NEXT: } else {
+ // CHECK-NEXT: memref.reinterpret_cast
+ // CHECK-NEXT: scf.yield %{{.*}}, %[[false]]
+ // CHECK-NEXT: }
+ %m3 = memref.realloc %arg1(%s2) : memref<?xf32> to memref<?xf32>
+
+ // CHECK: %[[base0:.*]], %{{.*}}, %{{.*}}, %{{.*}} = memref.extract_strided_metadata %[[arg0]]
+ // CHECK: %[[base1:.*]], %{{.*}}, %{{.*}}, %{{.*}} = memref.extract_strided_metadata %[[arg1]]
+ // CHECK: %[[d0:.*]] = bufferization.dealloc (%[[base0]] : memref<f32>) if (%[[o0]]) retain (%[[m2]]#0 : memref<?xf32>)
+ // CHECK: %[[d1:.*]] = bufferization.dealloc (%[[base1]] : memref<f32>) if (%[[o1]]) retain (%[[m3]]#0 : memref<?xf32>)
+ // CHECK-DAG: %[[o2:.*]] = arith.ori %[[d0]], %[[m2]]#1
+ // CHECK-DAG: %[[o3:.*]] = arith.ori %[[d1]], %[[m3]]#1
+ // CHECK: scf.yield %[[m2]]#0, %[[m3]]#0, %[[o2]], %[[o3]]
+ scf.yield %m2, %m3 : memref<?xf32>, memref<?xf32>
+ }
+
+ // CHECK: %[[d2:.*]] = bufferization.dealloc (%[[m0]] : memref<?xf32>) if (%[[true]]) retain (%[[r]]#0 : memref<?xf32>)
+ // CHECK: %[[d3:.*]] = bufferization.dealloc (%[[m1]] : memref<?xf32>) if (%[[true]]) retain (%[[r]]#1 : memref<?xf32>)
+ // CHECK-DAG: %[[or0:.*]] = arith.ori %[[d2]], %[[r]]#2
+ // CHECK-DAG: %[[or1:.*]] = arith.ori %[[d3]], %[[r]]#3
+ // CHECK: return %[[r]]#0, %[[r]]#1, %[[or0]], %[[or1]]
+ return %r0, %r1 : memref<?xf32>, memref<?xf32>
+}
+
+// -----
+
+// The yielded values of the loop are swapped. Therefore, the
+// bufferization.dealloc before the func.return can now longer be split,
+// because %r0 could originate from either %m0 and %m1 (same for %r1).
+
+// CHECK-LABEL: func private @swapping_loop_with_realloc(
+func.func private @swapping_loop_with_realloc(%lb: index, %ub: index, %step: index, %c: i1, %s1: index, %s2: index) -> (memref<?xf32>, memref<?xf32>) {
+ // CHECK-DAG: %[[false:.*]] = arith.constant false
+ // CHECK-DAG: %[[true:.*]] = arith.constant true
+
+ // CHECK: %[[m0:.*]] = memref.alloc
+ %m0 = memref.alloc(%s1) : memref<?xf32>
+ // CHECK: %[[m1:.*]] = memref.alloc
+ %m1 = memref.alloc(%s1) : memref<?xf32>
+
+ // CHECK: %[[r:.*]]:4 = scf.for {{.*}} iter_args(%[[arg0:.*]] = %[[m0]], %[[arg1:.*]] = %[[m1]], %[[o0:.*]] = %[[false]], %[[o1:.*]] = %[[false]])
+ %r0, %r1 = scf.for %iv = %lb to %ub step %step iter_args(%arg0 = %m0, %arg1 = %m1) -> (memref<?xf32>, memref<?xf32>) {
+ %m2 = memref.realloc %arg0(%s2) : memref<?xf32> to memref<?xf32>
+ %m3 = memref.realloc %arg1(%s2) : memref<?xf32> to memref<?xf32>
+ scf.yield %m3, %m2 : memref<?xf32>, memref<?xf32>
+ }
+
+ // CHECK: %[[base0:.*]], %{{.*}}, %{{.*}}, %{{.*}} = memref.extract_strided_metadata %[[r]]#0
+ // CHECK: %[[base1:.*]], %{{.*}}, %{{.*}}, %{{.*}} = memref.extract_strided_metadata %[[r]]#1
+ // CHECK: %[[d:.*]]:2 = bufferization.dealloc (%[[m0]], %[[m1]], %[[base0]], %[[base1]] : {{.*}}) if (%[[true]], %[[true]], %[[r]]#2, %[[r]]#3) retain (%[[r]]#0, %[[r]]#1 : {{.*}})
+ // CHECK: return %[[r]]#0, %[[r]]#1, %[[d]]#0, %[[d]]#1
+ return %r0, %r1 : memref<?xf32>, memref<?xf32>
+}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation-simplification.mlir b/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation-simplification.mlir
index eee69acbe821..b40a17cf800b 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation-simplification.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/buffer-deallocation-simplification.mlir
@@ -92,15 +92,13 @@ func.func @dealloc_split_when_no_other_aliasing(%arg0: i1, %arg1: memref<2xi32>,
// CHECK-NEXT: [[ALLOC0:%.+]] = memref.alloc(
// CHECK-NEXT: [[ALLOC1:%.+]] = memref.alloc(
// CHECK-NEXT: [[V0:%.+]] = arith.select{{.*}}[[ALLOC0]], [[ALLOC1]] :
-// COM: there is only one value in the retained list because the
-// COM: RemoveRetainedMemrefsGuaranteedToNotAlias pattern also applies here and
-// COM: removes %arg1 from the list. In the second dealloc, this does not apply
-// COM: because function arguments are assumed potentially alias (even if the
-// COM: types don't exactly match).
+// COM: there is only one value in the retained lists because the
+// COM: RemoveRetainedMemrefsGuaranteedToNotAlias pattern also applies here:
+// COM: - %alloc is guaranteed to not alias with %arg1.
+// COM: - %arg2 is guaranteed to not alias with %0.
// CHECK-NEXT: [[V1:%.+]] = bufferization.dealloc ([[ALLOC0]] : memref<2xi32>) if ([[ARG0]]) retain ([[V0]] : memref<2xi32>)
-// CHECK-NEXT: [[V2:%.+]]:2 = bufferization.dealloc ([[ARG2]] : memref<2xi32>) if ([[ARG3]]) retain ([[ARG1]], [[V0]] : memref<2xi32>, memref<2xi32>)
-// CHECK-NEXT: [[V3:%.+]] = arith.ori [[V1]], [[V2]]#1
-// CHECK-NEXT: return [[V2]]#0, [[V3]] :
+// CHECK-NEXT: [[V2:%.+]] = bufferization.dealloc ([[ARG2]] : memref<2xi32>) if ([[ARG3]]) retain ([[ARG1]] : memref<2xi32>)
+// CHECK-NEXT: return [[V2]], [[V1]] :
// -----
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
index e4375950d336..8f0170b17381 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
// CHECK-LABEL: func @buffer_not_deallocated(
// CHECK-SAME: %[[t:.*]]: tensor<?xf32>
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis-bottom-up-from-terminators.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis-bottom-up-from-terminators.mlir
new file mode 100644
index 000000000000..1b75edc4c157
--- /dev/null
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis-bottom-up-from-terminators.mlir
@@ -0,0 +1,36 @@
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=bottom-up-from-terminators" -split-input-file | FileCheck %s
+
+// CHECK-LABEL: func @simple_test(
+func.func @simple_test(%lb: index, %ub: index, %step: index, %f1: f32, %f2: f32) -> (tensor<5xf32>, tensor<5xf32>) {
+ %c0 = arith.constant 0 : index
+ %p = arith.constant 0.0 : f32
+
+ // Make sure that ops that feed into region terminators bufferize in-place
+ // (if possible).
+ // Note: This test case fails to bufferize with a "top-down" or "bottom-up"
+ // heuristic.
+
+ %0 = tensor.empty() : tensor<5xf32>
+ %1 = scf.for %iv = %lb to %ub step %step iter_args(%t = %0) -> (tensor<5xf32>) {
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "false"]}
+ %2 = linalg.fill ins(%f1 : f32) outs(%t : tensor<5xf32>) -> tensor<5xf32>
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+ %3 = linalg.fill ins(%f2 : f32) outs(%t : tensor<5xf32>) -> tensor<5xf32>
+ %4 = vector.transfer_read %2[%c0], %p : tensor<5xf32>, vector<5xf32>
+ vector.print %4 : vector<5xf32>
+ scf.yield %3 : tensor<5xf32>
+ }
+
+ %5 = tensor.empty() : tensor<5xf32>
+ %6 = scf.for %iv = %lb to %ub step %step iter_args(%t = %0) -> (tensor<5xf32>) {
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+ %7 = linalg.fill ins(%f1 : f32) outs(%t : tensor<5xf32>) -> tensor<5xf32>
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "false"]}
+ %8 = linalg.fill ins(%f2 : f32) outs(%t : tensor<5xf32>) -> tensor<5xf32>
+ %9 = vector.transfer_read %8[%c0], %p : tensor<5xf32>, vector<5xf32>
+ vector.print %9 : vector<5xf32>
+ scf.yield %7 : tensor<5xf32>
+ }
+
+ return %1, %6 : tensor<5xf32>, tensor<5xf32>
+}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
index 2c5f2083f589..9380c81ce235 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
@@ -4,9 +4,9 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-unknown-ops unknown-type-conversion=identity-layout-map" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="dialect-filter=tensor,bufferization allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-TENSOR
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="dialect-filter=scf,bufferization allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-SCF
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
index 611b67e198c0..0ed3a9f077ce 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops" -verify-diagnostics -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23" -verify-diagnostics -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59" -verify-diagnostics -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91" -verify-diagnostics -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -verify-diagnostics -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -verify-diagnostics -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -verify-diagnostics -split-input-file -o /dev/null
// Run with top-down analysis.
// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops analysis-heuristic=top-down" -verify-diagnostics -split-input-file | FileCheck %s --check-prefix=CHECK-TOP-DOWN-ANALYSIS
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
index 9319ac61d928..c58b153d438c 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
@@ -2,9 +2,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 " -split-input-file | FileCheck %s --check-prefix=NO-DROP
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map" -split-input-file -o /dev/null
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
index 6e7b113aa35c..42d9cc00d3ff 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir
@@ -1,9 +1,14 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only" -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+
+// Try different heuristics. Not checking the result, just make sure that we do
+// not crash.
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-heuristic=bottom-up-from-terminators" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries test-analysis-only analysis-heuristic=top-down" -split-input-file -o /dev/null
// TODO: Extract op-specific test cases and move them to their respective
// dialects.
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
index 39f4835b28ff..429c9e4dea9e 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
@@ -2,9 +2,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1" -canonicalize -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
diff --git a/mlir/test/Dialect/Complex/canonicalize.mlir b/mlir/test/Dialect/Complex/canonicalize.mlir
index 64c3f313dda9..16fa0fdf56a1 100644
--- a/mlir/test/Dialect/Complex/canonicalize.mlir
+++ b/mlir/test/Dialect/Complex/canonicalize.mlir
@@ -281,3 +281,59 @@ func.func @double_reverse_bitcast(%arg0 : complex<f32>) -> f64 {
// CHECK: return %[[R0]] : f64
func.return %1 : f64
}
+
+
+// CHECK-LABEL: func @div_one_f16
+// CHECK-SAME: (%[[ARG0:.*]]: f16, %[[ARG1:.*]]: f16) -> complex<f16>
+func.func @div_one_f16(%arg0: f16, %arg1: f16) -> complex<f16> {
+ %create = complex.create %arg0, %arg1: complex<f16>
+ %one = complex.constant [1.0 : f16, 0.0 : f16] : complex<f16>
+ %div = complex.div %create, %one : complex<f16>
+ // CHECK: %[[CREATE:.*]] = complex.create %[[ARG0]], %[[ARG1]] : complex<f16>
+ // CHECK-NEXT: return %[[CREATE]]
+ return %div : complex<f16>
+}
+
+// CHECK-LABEL: func @div_one_f32
+// CHECK-SAME: (%[[ARG0:.*]]: f32, %[[ARG1:.*]]: f32) -> complex<f32>
+func.func @div_one_f32(%arg0: f32, %arg1: f32) -> complex<f32> {
+ %create = complex.create %arg0, %arg1: complex<f32>
+ %one = complex.constant [1.0 : f32, 0.0 : f32] : complex<f32>
+ %div = complex.div %create, %one : complex<f32>
+ // CHECK: %[[CREATE:.*]] = complex.create %[[ARG0]], %[[ARG1]] : complex<f32>
+ // CHECK-NEXT: return %[[CREATE]]
+ return %div : complex<f32>
+}
+
+// CHECK-LABEL: func @div_one_f64
+// CHECK-SAME: (%[[ARG0:.*]]: f64, %[[ARG1:.*]]: f64) -> complex<f64>
+func.func @div_one_f64(%arg0: f64, %arg1: f64) -> complex<f64> {
+ %create = complex.create %arg0, %arg1: complex<f64>
+ %one = complex.constant [1.0 : f64, 0.0 : f64] : complex<f64>
+ %div = complex.div %create, %one : complex<f64>
+ // CHECK: %[[CREATE:.*]] = complex.create %[[ARG0]], %[[ARG1]] : complex<f64>
+ // CHECK-NEXT: return %[[CREATE]]
+ return %div : complex<f64>
+}
+
+// CHECK-LABEL: func @div_one_f80
+// CHECK-SAME: (%[[ARG0:.*]]: f80, %[[ARG1:.*]]: f80) -> complex<f80>
+func.func @div_one_f80(%arg0: f80, %arg1: f80) -> complex<f80> {
+ %create = complex.create %arg0, %arg1: complex<f80>
+ %one = complex.constant [1.0 : f80, 0.0 : f80] : complex<f80>
+ %div = complex.div %create, %one : complex<f80>
+ // CHECK: %[[CREATE:.*]] = complex.create %[[ARG0]], %[[ARG1]] : complex<f80>
+ // CHECK-NEXT: return %[[CREATE]]
+ return %div : complex<f80>
+}
+
+// CHECK-LABEL: func @div_one_f128
+// CHECK-SAME: (%[[ARG0:.*]]: f128, %[[ARG1:.*]]: f128) -> complex<f128>
+func.func @div_one_f128(%arg0: f128, %arg1: f128) -> complex<f128> {
+ %create = complex.create %arg0, %arg1: complex<f128>
+ %one = complex.constant [1.0 : f128, 0.0 : f128] : complex<f128>
+ %div = complex.div %create, %one : complex<f128>
+ // CHECK: %[[CREATE:.*]] = complex.create %[[ARG0]], %[[ARG1]] : complex<f128>
+ // CHECK-NEXT: return %[[CREATE]]
+ return %div : complex<f128>
+}
diff --git a/mlir/test/Dialect/EmitC/transforms.mlir b/mlir/test/Dialect/EmitC/transforms.mlir
index ad167fa455a1..8ac606a2c8c0 100644
--- a/mlir/test/Dialect/EmitC/transforms.mlir
+++ b/mlir/test/Dialect/EmitC/transforms.mlir
@@ -107,3 +107,20 @@ func.func @expression_with_address_taken(%arg0: i32, %arg1: i32, %arg2: !emitc.p
%d = emitc.cmp lt, %c, %arg2 :(!emitc.ptr<i32>, !emitc.ptr<i32>) -> i1
return %d : i1
}
+
+// CHECK-LABEL: func.func @no_nested_expression(
+// CHECK-SAME: %[[VAL_0:.*]]: i32, %[[VAL_1:.*]]: i32) -> i1 {
+// CHECK: %[[VAL_2:.*]] = emitc.expression : i1 {
+// CHECK: %[[VAL_3:.*]] = emitc.cmp lt, %[[VAL_0]], %[[VAL_1]] : (i32, i32) -> i1
+// CHECK: emitc.yield %[[VAL_3]] : i1
+// CHECK: }
+// CHECK: return %[[VAL_2]] : i1
+// CHECK: }
+
+func.func @no_nested_expression(%arg0: i32, %arg1: i32) -> i1 {
+ %a = emitc.expression : i1 {
+ %b = emitc.cmp lt, %arg0, %arg1 :(i32, i32) -> i1
+ emitc.yield %b : i1
+ }
+ return %a : i1
+}
diff --git a/mlir/test/Dialect/LLVMIR/debuginfo.mlir b/mlir/test/Dialect/LLVMIR/debuginfo.mlir
index 4c2de0aa4c22..94bb2bb06229 100644
--- a/mlir/test/Dialect/LLVMIR/debuginfo.mlir
+++ b/mlir/test/Dialect/LLVMIR/debuginfo.mlir
@@ -96,11 +96,10 @@
file = #file, line = 3, scopeLine = 3, subprogramFlags = "Definition|Optimized", type = #spType0
>
-// CHECK-DAG: #[[SP1:.*]] = #llvm.di_subprogram<compileUnit = #[[CU]], scope = #[[COMP2]], name = "value", file = #[[FILE]], subprogramFlags = Definition, type = #[[SPTYPE1]]>
+// CHECK-DAG: #[[SP1:.*]] = #llvm.di_subprogram<scope = #[[COMP2]], file = #[[FILE]], type = #[[SPTYPE1]]>
#sp1 = #llvm.di_subprogram<
- // Omit the optional linkageName parameter.
- compileUnit = #cu, scope = #comp2, name = "value",
- file = #file, subprogramFlags = "Definition", type = #spType1
+ // Omit the optional parameters.
+ scope = #comp2, file = #file, type = #spType1
>
// CHECK-DAG: #[[MODULE:.*]] = #llvm.di_module<file = #[[FILE]], scope = #[[FILE]], name = "module", configMacros = "bar", includePath = "/", apinotes = "/", line = 42, isDecl = true>
@@ -112,7 +111,6 @@
// CHECK-DAG: #[[SP2:.*]] = #llvm.di_subprogram<compileUnit = #[[CU]], scope = #[[MODULE]], name = "value", file = #[[FILE]], subprogramFlags = Definition, type = #[[SPTYPE2]]>
#sp2 = #llvm.di_subprogram<
- // Omit the optional linkageName parameter.
compileUnit = #cu, scope = #module, name = "value",
file = #file, subprogramFlags = "Definition", type = #spType2
>
diff --git a/mlir/test/Dialect/LLVMIR/layout.mlir b/mlir/test/Dialect/LLVMIR/layout.mlir
index 2868e1740f86..a78fb771242e 100644
--- a/mlir/test/Dialect/LLVMIR/layout.mlir
+++ b/mlir/test/Dialect/LLVMIR/layout.mlir
@@ -7,6 +7,7 @@ module {
// CHECK: alloca_memory_space = 0
// CHECK: bitsize = 64
// CHECK: global_memory_space = 0
+ // CHECK: index = 64
// CHECK: preferred = 8
// CHECK: program_memory_space = 0
// CHECK: size = 8
@@ -16,6 +17,7 @@ module {
// CHECK: alloca_memory_space = 0
// CHECK: bitsize = 64
// CHECK: global_memory_space = 0
+ // CHECK: index = 64
// CHECK: preferred = 8
// CHECK: program_memory_space = 0
// CHECK: size = 8
@@ -25,6 +27,7 @@ module {
// CHECK: alloca_memory_space = 0
// CHECK: bitsize = 64
// CHECK: global_memory_space = 0
+ // CHECK: index = 64
// CHECK: preferred = 8
// CHECK: program_memory_space = 0
// CHECK: size = 8
@@ -39,7 +42,7 @@ module {
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!llvm.ptr, dense<[32, 32, 64]> : vector<3xi64>>,
#dlti.dl_entry<!llvm.ptr<5>, dense<[64, 64, 64]> : vector<3xi64>>,
- #dlti.dl_entry<!llvm.ptr<4>, dense<[32, 64, 64]> : vector<3xi64>>,
+ #dlti.dl_entry<!llvm.ptr<4>, dense<[32, 64, 64, 24]> : vector<4xi64>>,
#dlti.dl_entry<"dlti.alloca_memory_space", 5 : ui64>,
#dlti.dl_entry<"dlti.global_memory_space", 2 : ui64>,
#dlti.dl_entry<"dlti.program_memory_space", 3 : ui64>,
@@ -51,6 +54,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// CHECK: alloca_memory_space = 5
// CHECK: bitsize = 32
// CHECK: global_memory_space = 2
+ // CHECK: index = 32
// CHECK: preferred = 8
// CHECK: program_memory_space = 3
// CHECK: size = 4
@@ -60,6 +64,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// CHECK: alloca_memory_space = 5
// CHECK: bitsize = 32
// CHECK: global_memory_space = 2
+ // CHECK: index = 32
// CHECK: preferred = 8
// CHECK: program_memory_space = 3
// CHECK: size = 4
@@ -69,24 +74,17 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// CHECK: alloca_memory_space = 5
// CHECK: bitsize = 64
// CHECK: global_memory_space = 2
+ // CHECK: index = 64
// CHECK: preferred = 8
// CHECK: program_memory_space = 3
// CHECK: size = 8
// CHECK: stack_alignment = 128
"test.data_layout_query"() : () -> !llvm.ptr<5>
- // CHECK: alignment = 4
- // CHECK: alloca_memory_space = 5
- // CHECK: bitsize = 32
- // CHECK: global_memory_space = 2
- // CHECK: preferred = 8
- // CHECK: program_memory_space = 3
- // CHECK: size = 4
- // CHECK: stack_alignment = 128
- "test.data_layout_query"() : () -> !llvm.ptr<3>
// CHECK: alignment = 8
// CHECK: alloca_memory_space = 5
// CHECK: bitsize = 32
// CHECK: global_memory_space = 2
+ // CHECK: index = 24
// CHECK: preferred = 8
// CHECK: program_memory_space = 3
// CHECK: size = 4
@@ -134,6 +132,7 @@ module {
// simple case
// CHECK: alignment = 4
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 4
"test.data_layout_query"() : () -> !llvm.struct<(i32)>
@@ -141,6 +140,7 @@ module {
// padding inbetween
// CHECK: alignment = 8
// CHECK: bitsize = 128
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 16
"test.data_layout_query"() : () -> !llvm.struct<(i32, f64)>
@@ -148,6 +148,7 @@ module {
// padding at end of struct
// CHECK: alignment = 8
// CHECK: bitsize = 128
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 16
"test.data_layout_query"() : () -> !llvm.struct<(f64, i32)>
@@ -155,6 +156,7 @@ module {
// packed
// CHECK: alignment = 1
// CHECK: bitsize = 96
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 12
"test.data_layout_query"() : () -> !llvm.struct<packed (f64, i32)>
@@ -162,6 +164,7 @@ module {
// empty
// CHECK: alignment = 1
// CHECK: bitsize = 0
+ // CHECK: index = 0
// CHECK: preferred = 1
// CHECK: size = 0
"test.data_layout_query"() : () -> !llvm.struct<()>
@@ -179,6 +182,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// Strict alignment is applied
// CHECK: alignment = 4
// CHECK: bitsize = 16
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 2
"test.data_layout_query"() : () -> !llvm.struct<(i16)>
@@ -186,6 +190,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// No impact on structs that have stricter requirements
// CHECK: alignment = 8
// CHECK: bitsize = 128
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 16
"test.data_layout_query"() : () -> !llvm.struct<(i32, f64)>
@@ -193,6 +198,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// Only the preferred alignment of structs is affected
// CHECK: alignment = 1
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 4
"test.data_layout_query"() : () -> !llvm.struct<packed (i16, i16)>
@@ -200,6 +206,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// empty
// CHECK: alignment = 4
// CHECK: bitsize = 0
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 0
"test.data_layout_query"() : () -> !llvm.struct<()>
@@ -265,6 +272,7 @@ module {
// simple case
// CHECK: alignment = 4
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 8
"test.data_layout_query"() : () -> !llvm.array<2 x i32>
@@ -272,6 +280,7 @@ module {
// size 0
// CHECK: alignment = 8
// CHECK: bitsize = 0
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 0
"test.data_layout_query"() : () -> !llvm.array<0 x f64>
@@ -279,6 +288,7 @@ module {
// alignment info matches element type
// CHECK: alignment = 4
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 8
"test.data_layout_query"() : () -> !llvm.array<1 x i64>
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index b157cf001418..31acf2b95e46 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -647,3 +647,18 @@ llvm.func @experimental_noalias_scope_decl() {
llvm.intr.experimental.noalias.scope.decl #alias_scope
llvm.return
}
+
+// CHECK-LABEL: @experimental_constrained_fptrunc
+llvm.func @experimental_constrained_fptrunc(%in: f64) {
+ // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} towardzero ignore : f64 to f32
+ %0 = llvm.intr.experimental.constrained.fptrunc %in towardzero ignore : f64 to f32
+ // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearest maytrap : f64 to f32
+ %1 = llvm.intr.experimental.constrained.fptrunc %in tonearest maytrap : f64 to f32
+ // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} upward strict : f64 to f32
+ %2 = llvm.intr.experimental.constrained.fptrunc %in upward strict : f64 to f32
+ // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} downward ignore : f64 to f32
+ %3 = llvm.intr.experimental.constrained.fptrunc %in downward ignore : f64 to f32
+ // CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearestaway ignore : f64 to f32
+ %4 = llvm.intr.experimental.constrained.fptrunc %in tonearestaway ignore : f64 to f32
+ llvm.return
+}
diff --git a/mlir/test/Dialect/LLVMIR/sroa.mlir b/mlir/test/Dialect/LLVMIR/sroa.mlir
index 02d25f27f978..3f4d17c6a43f 100644
--- a/mlir/test/Dialect/LLVMIR/sroa.mlir
+++ b/mlir/test/Dialect/LLVMIR/sroa.mlir
@@ -215,3 +215,106 @@ llvm.func @no_nested_dynamic_indexing(%arg: i32) -> i32 {
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
+
+// -----
+
+// CHECK-LABEL: llvm.func @store_first_field
+llvm.func @store_first_field(%arg: i32) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: llvm.store %{{.*}}, %[[ALLOCA]] : i32
+ llvm.store %arg, %1 : i32, !llvm.ptr
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @store_first_field_different_type
+// CHECK-SAME: (%[[ARG:.*]]: f32)
+llvm.func @store_first_field_different_type(%arg: f32) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]] : f32
+ llvm.store %arg, %1 : f32, !llvm.ptr
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @store_sub_field
+// CHECK-SAME: (%[[ARG:.*]]: f32)
+llvm.func @store_sub_field(%arg: f32) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i64
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (i64, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]] : f32
+ llvm.store %arg, %1 : f32, !llvm.ptr
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @load_first_field
+llvm.func @load_first_field() -> i32 {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]] : !llvm.ptr -> i32
+ %2 = llvm.load %1 : !llvm.ptr -> i32
+ // CHECK: llvm.return %[[RES]] : i32
+ llvm.return %2 : i32
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @load_first_field_different_type
+llvm.func @load_first_field_different_type() -> f32 {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i32
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]] : !llvm.ptr -> f32
+ %2 = llvm.load %1 : !llvm.ptr -> f32
+ // CHECK: llvm.return %[[RES]] : f32
+ llvm.return %2 : f32
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @load_sub_field
+llvm.func @load_sub_field() -> i32 {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x i64 : (i32) -> !llvm.ptr
+ %1 = llvm.alloca %0 x !llvm.struct<(i64, i32)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: %[[RES:.*]] = llvm.load %[[ALLOCA]]
+ %res = llvm.load %1 : !llvm.ptr -> i32
+ // CHECK: llvm.return %[[RES]] : i32
+ llvm.return %res : i32
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @vector_store_type_mismatch
+// CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
+llvm.func @vector_store_type_mismatch(%arg: vector<4xi32>) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x vector<4xf32>
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: llvm.store %[[ARG]], %[[ALLOCA]]
+ llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
+ llvm.return
+}
+
+// -----
+
+// CHECK-LABEL: llvm.func @store_to_memory
+// CHECK-SAME: %[[ARG:.*]]: !llvm.ptr
+llvm.func @store_to_memory(%arg: !llvm.ptr) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<
+ %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
+ // CHECK-NEXT: llvm.store %[[ALLOCA]], %[[ARG]]
+ llvm.store %1, %arg : !llvm.ptr, !llvm.ptr
+ llvm.return
+}
diff --git a/mlir/test/Dialect/LLVMIR/type-consistency.mlir b/mlir/test/Dialect/LLVMIR/type-consistency.mlir
index 021151b929d8..a6176142f174 100644
--- a/mlir/test/Dialect/LLVMIR/type-consistency.mlir
+++ b/mlir/test/Dialect/LLVMIR/type-consistency.mlir
@@ -26,63 +26,6 @@ llvm.func @same_address_keep_inbounds(%arg: i32) {
// -----
-// CHECK-LABEL: llvm.func @struct_store_instead_of_first_field
-llvm.func @struct_store_instead_of_first_field(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]] : i32
- llvm.store %arg, %1 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @struct_store_instead_of_first_field_same_size
-// CHECK-SAME: (%[[ARG:.*]]: f32)
-llvm.func @struct_store_instead_of_first_field_same_size(%arg: f32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK-DAG: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK-DAG: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK-DAG: %[[BITCAST:.*]] = llvm.bitcast %[[ARG]] : f32 to i32
- // CHECK: llvm.store %[[BITCAST]], %[[GEP]] : i32
- llvm.store %arg, %1 : f32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @struct_load_instead_of_first_field
-llvm.func @struct_load_instead_of_first_field() -> i32 {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: %[[RES:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
- %2 = llvm.load %1 : !llvm.ptr -> i32
- // CHECK: llvm.return %[[RES]] : i32
- llvm.return %2 : i32
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @struct_load_instead_of_first_field_same_size
-llvm.func @struct_load_instead_of_first_field_same_size() -> f32 {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: %[[LOADED:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
- // CHECK: %[[RES:.*]] = llvm.bitcast %[[LOADED]] : i32 to f32
- %2 = llvm.load %1 : !llvm.ptr -> f32
- // CHECK: llvm.return %[[RES]] : f32
- llvm.return %2 : f32
-}
-
-// -----
-
// CHECK-LABEL: llvm.func @index_in_final_padding
llvm.func @index_in_final_padding(%arg: i32) {
%0 = llvm.mlir.constant(1 : i32) : i32
@@ -135,22 +78,6 @@ llvm.func @index_not_in_padding_because_packed(%arg: i16) {
// -----
-// CHECK-LABEL: llvm.func @index_to_struct
-// CHECK-SAME: (%[[ARG:.*]]: i32)
-llvm.func @index_to_struct(%arg: i32) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, struct<"bar", (i32, i32)>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, struct<"bar", (i32, i32)>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, struct<"bar", (i32, i32)>)>
- // CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"bar", (i32, i32)>
- %7 = llvm.getelementptr %1[4] : (!llvm.ptr) -> !llvm.ptr, i8
- // CHECK: llvm.store %[[ARG]], %[[GEP1]]
- llvm.store %arg, %7 : i32, !llvm.ptr
- llvm.return
-}
-
-// -----
-
// CHECK-LABEL: llvm.func @no_crash_on_negative_gep_index
llvm.func @no_crash_on_negative_gep_index() {
%0 = llvm.mlir.constant(1.000000e+00 : f16) : f16
@@ -175,10 +102,9 @@ llvm.func @coalesced_store_ints(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
@@ -225,11 +151,9 @@ llvm.func @coalesced_store_floats(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (f32, f32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (f32, f32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (f32, f32)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[TRUNC]] : i32 to f32
- // CHECK: llvm.store %[[BIT_CAST]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (f32, f32)>
@@ -298,10 +222,9 @@ llvm.func @coalesced_store_packed_struct(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", packed (i16, i32, i16)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", packed (i16, i32, i16)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
@@ -328,9 +251,8 @@ llvm.func @vector_write_split(%arg: vector<4xi32>) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32] : vector<4xi32>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: llvm.store %[[EXTRACT]], %[[ALLOCA]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
@@ -405,36 +327,6 @@ llvm.func @vector_write_split_struct(%arg: vector<2xi64>) {
// -----
-// CHECK-LABEL: llvm.func @type_consistent_vector_store
-// CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
-llvm.func @type_consistent_vector_store(%arg: vector<4xi32>) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (vector<4xi32>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xi32>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (vector<4xi32>)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
- llvm.return
-}
-
-// -----
-
-// CHECK-LABEL: llvm.func @type_consistent_vector_store_other_type
-// CHECK-SAME: %[[ARG:.*]]: vector<4xi32>
-llvm.func @type_consistent_vector_store_other_type(%arg: vector<4xi32>) {
- %0 = llvm.mlir.constant(1 : i32) : i32
- // CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (vector<4xf32>)>
- %1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (vector<4xf32>)>
- // CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[ARG]] : vector<4xi32> to vector<4xf32>
- // CHECK: llvm.store %[[BIT_CAST]], %[[GEP]]
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.return
-}
-
-// -----
-
// CHECK-LABEL: llvm.func @bitcast_insertion
// CHECK-SAME: %[[ARG:.*]]: i32
llvm.func @bitcast_insertion(%arg: i32) {
@@ -478,10 +370,9 @@ llvm.func @coalesced_store_ints_subaggregate(%arg: i64) {
%3 = llvm.getelementptr %1[0, 1, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, struct<(i32, i32)>)>
// CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, struct<(i32, i32)>)>
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[TOP_GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
@@ -520,10 +411,9 @@ llvm.func @overlapping_int_aggregate_store(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
// CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
@@ -531,8 +421,7 @@ llvm.func @overlapping_int_aggregate_store(%arg: i64) {
// Normal integer splitting of [[TRUNC]] follows:
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: llvm.store %{{.*}}, %[[TOP_GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
// CHECK: llvm.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
@@ -557,14 +446,12 @@ llvm.func @overlapping_vector_aggregate_store(%arg: vector<4 x i16>) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32]
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]]
+ // CHECK: llvm.store %[[EXTRACT]], %[[ALLOCA]]
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32]
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
- // CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
+ // CHECK: llvm.store %[[EXTRACT]], %[[GEP0]]
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32]
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
@@ -593,10 +480,9 @@ llvm.func @partially_overlapping_aggregate_store(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
// CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
@@ -604,8 +490,7 @@ llvm.func @partially_overlapping_aggregate_store(%arg: i64) {
// Normal integer splitting of [[TRUNC]] follows:
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: llvm.store %{{.*}}, %[[TOP_GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
// CHECK: llvm.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
@@ -651,10 +536,9 @@ llvm.func @coalesced_store_ints_array(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.array<2 x i32>
%1 = llvm.alloca %0 x !llvm.array<2 x i32> : (i32) -> !llvm.ptr
- // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x i32>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: llvm.store %[[TRUNC]], %[[ALLOCA]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x i32>
diff --git a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
index e036695a2ac9..79d61ab757e3 100644
--- a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
+++ b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir
@@ -905,3 +905,163 @@ func.func @unpack_different_destination_shape(%arg0: tensor<1x1x1080x1920x16xi32
// CHECK-SAME: inner_dims_pos = [0] inner_tiles = [16]
// CHECK-SAME: into %[[UNPACK_NEW_DEST]]
// CHECK: return %[[UNPACK]] : tensor<16x540x960xi32>
+
+// -----
+
+func.func @bubble_up_pack_through_collapse(%1: tensor<?x16x4xf32>, %dim : index) -> tensor<?x4x8x1xf32> {
+ %collapsed = tensor.collapse_shape %1 [[0, 1], [2]] : tensor<?x16x4xf32> into tensor<?x4xf32>
+ %2 = tensor.empty(%dim) : tensor<?x4x8x1xf32>
+ %pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %2 : tensor<?x4xf32> -> tensor<?x4x8x1xf32>
+ func.return %pack : tensor<?x4x8x1xf32>
+}
+// CHECK-LABEL: func.func @bubble_up_pack_through_collapse
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
+// CHECK: %[[C0:.+]] = arith.constant 0 : index
+// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x16x4xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x2x4x8x1xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<?x16x4xf32> -> tensor<?x2x4x8x1xf32>
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[PACK]] {{\[}}[0, 1], [2], [3], [4]] : tensor<?x2x4x8x1xf32> into tensor<?x4x8x1xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<?x4x8x1xf32>
+
+// -----
+
+func.func @bubble_up_permuted_pack_through_collapse(%1: tensor<4x192x16x256xf32>) -> tensor<4x32x3072x8x1xf32> {
+ %collapsed = tensor.collapse_shape %1 [[0], [1, 2], [3]] : tensor<4x192x16x256xf32> into tensor<4x3072x256xf32>
+ %2 = tensor.empty() : tensor<4x32x3072x8x1xf32>
+ %pack = tensor.pack %collapsed outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [8, 1] into %2 : tensor<4x3072x256xf32> -> tensor<4x32x3072x8x1xf32>
+ func.return %pack : tensor<4x32x3072x8x1xf32>
+}
+// CHECK-LABEL: func.func @bubble_up_permuted_pack_through_collapse
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<4x32x192x16x8x1xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 3, 1, 2] inner_dims_pos = [3, 2] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<4x192x16x256xf32> -> tensor<4x32x192x16x8x1xf32>
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %pack {{\[}}[0], [1], [2, 3], [4], [5]] : tensor<4x32x192x16x8x1xf32> into tensor<4x32x3072x8x1xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<4x32x3072x8x1xf32>
+
+// -----
+
+func.func @bubble_up_pack_through_unit_collapse(%1: tensor<1x64x1x4xf32>) -> tensor<8x4x8x1xf32> {
+ %collapsed = tensor.collapse_shape %1 [[0, 1, 2], [3]] : tensor<1x64x1x4xf32> into tensor<64x4xf32>
+ %2 = tensor.empty() : tensor<8x4x8x1xf32>
+ %pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %2 : tensor<64x4xf32> -> tensor<8x4x8x1xf32>
+ func.return %pack : tensor<8x4x8x1xf32>
+}
+// CHECK-LABEL: func.func @bubble_up_pack_through_unit_collapse
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<1x8x1x4x8x1xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 1, 2, 3] inner_dims_pos = [1, 3] inner_tiles = [8, 1] into %[[EMPTY]] : tensor<1x64x1x4xf32> -> tensor<1x8x1x4x8x1xf32>
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[PACK]] {{\[}}[0, 1, 2], [3], [4], [5]] : tensor<1x8x1x4x8x1xf32> into tensor<8x4x8x1xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<8x4x8x1xf32>
+
+// -----
+
+func.func @bubble_up_pack_through_collapse_on_outer_dims(%1: tensor<?x16x4xf32>, %dim : index) -> tensor<?x1x4xf32> {
+ %collapsed = tensor.collapse_shape %1 [[0, 1], [2]] : tensor<?x16x4xf32> into tensor<?x4xf32>
+ %2 = tensor.empty(%dim) : tensor<?x1x4xf32>
+ %pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [1] inner_tiles = [4] into %2 : tensor<?x4xf32> -> tensor<?x1x4xf32>
+ func.return %pack : tensor<?x1x4xf32>
+}
+// CHECK-LABEL: func.func @bubble_up_pack_through_collapse_on_outer_dims
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
+// CHECK: %[[C0:.+]] = arith.constant 0 : index
+// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x16x4xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x16x1x4xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [2] inner_tiles = [4] into %[[EMPTY]] : tensor<?x16x4xf32> -> tensor<?x16x1x4xf32>
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[PACK]] {{\[}}[0, 1], [2], [3]] : tensor<?x16x1x4xf32> into tensor<?x1x4xf32>
+// CHECK: return %[[COLLAPSED]] : tensor<?x1x4xf32>
+
+// -----
+
+func.func @no_bubble_up_pack_through_non_divisible_collapse(%1: tensor<3072x64x4xf32>) -> tensor<384x32x8x8xf32> {
+ %collapsed = tensor.collapse_shape %1 [[0], [1, 2]] : tensor<3072x64x4xf32> into tensor<3072x256xf32>
+ %2 = tensor.empty() : tensor<384x32x8x8xf32>
+ %pack = tensor.pack %collapsed outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %2 : tensor<3072x256xf32> -> tensor<384x32x8x8xf32>
+ func.return %pack : tensor<384x32x8x8xf32>
+}
+// CHECK-LABEL: func.func @no_bubble_up_pack_through_non_divisible_collapse
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0], [1, 2]] : tensor<3072x64x4xf32> into tensor<3072x256xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[COLLAPSED]]
+// CHECK: return %[[PACK]] : tensor<384x32x8x8xf32>
+
+// -----
+
+func.func @push_down_unpack_through_expand(%5: tensor<?x32x8x8xf32>, %dim: index) -> tensor<?x256x256xf32> {
+ %6 = tensor.empty(%dim) : tensor<?x256xf32>
+ %unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<?x32x8x8xf32> -> tensor<?x256xf32>
+ %expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<?x256xf32> into tensor<?x256x256xf32>
+ func.return %expanded : tensor<?x256x256xf32>
+}
+// CHECK-LABEL: func.func @push_down_unpack_through_expand
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
+// CHECK: %[[C0:.+]] = arith.constant 0 : index
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
+// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED:.+]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [1, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<?x32x32x8x8xf32> -> tensor<?x256x256xf32>
+// CHECK: return %[[UNPACK]] : tensor<?x256x256xf32>
+
+// -----
+
+func.func @push_down_permuted_unpack_through_expand(%5: tensor<4x32x384x8x8xf32>) -> tensor<4x12x256x256xf32> {
+ %6 = tensor.empty() : tensor<4x3072x256xf32>
+ %unpack = tensor.unpack %5 outer_dims_perm = [0, 2, 1] inner_dims_pos = [2, 1] inner_tiles = [8, 8] into %6 : tensor<4x32x384x8x8xf32> -> tensor<4x3072x256xf32>
+ %expanded = tensor.expand_shape %unpack [[0], [1, 2], [3]] : tensor<4x3072x256xf32> into tensor<4x12x256x256xf32>
+ func.return %expanded : tensor<4x12x256x256xf32>
+}
+// CHECK-LABEL: @push_down_permuted_unpack_through_expand
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1], [2, 3], [4], [5]] : tensor<4x32x384x8x8xf32> into tensor<4x32x12x32x8x8xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<4x12x256x256xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED]] outer_dims_perm = [0, 3, 1, 2] inner_dims_pos = [3, 2] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<4x32x12x32x8x8xf32> -> tensor<4x12x256x256xf32>
+// CHECK: return %[[UNPACK]] : tensor<4x12x256x256xf32>
+
+// -----
+
+func.func @push_down_unpack_through_unit_expand(%5: tensor<6x32x8x8xf32>) -> tensor<3x16x1x256xf32> {
+ %6 = tensor.empty() : tensor<48x256xf32>
+ %unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<6x32x8x8xf32> -> tensor<48x256xf32>
+ %expanded = tensor.expand_shape %unpack [[0, 1, 2], [3]] : tensor<48x256xf32> into tensor<3x16x1x256xf32>
+ func.return %expanded : tensor<3x16x1x256xf32>
+}
+// CHECK-LABEL: func.func @push_down_unpack_through_unit_expand
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1, 2], [3], [4], [5]] : tensor<6x32x8x8xf32> into tensor<3x2x1x32x8x8xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<3x16x1x256xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED]] outer_dims_perm = [0, 1, 2, 3] inner_dims_pos = [1, 3] inner_tiles = [8, 8] into %[[EMPTY]] : tensor<3x2x1x32x8x8xf32> -> tensor<3x16x1x256xf32>
+// CHECK: return %[[UNPACK]] : tensor<3x16x1x256xf32>
+
+// -----
+
+func.func @push_down_unpack_through_expand_on_outer_dims(%5: tensor<?x32x8xf32>, %dim: index) -> tensor<?x256x256xf32> {
+ %6 = tensor.empty(%dim) : tensor<?x256xf32>
+ %unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [1] inner_tiles = [8] into %6 : tensor<?x32x8xf32> -> tensor<?x256xf32>
+ %expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<?x256xf32> into tensor<?x256x256xf32>
+ func.return %expanded : tensor<?x256x256xf32>
+}
+// CHECK-LABEL: func.func @push_down_unpack_through_expand_on_outer_dims
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]
+// CHECK: %[[C0:.+]] = arith.constant 0 : index
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]] : tensor<?x32x8xf32> into tensor<?x256x32x8xf32>
+// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x256x32x8xf32>
+// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[EXPANDED:.+]] outer_dims_perm = [0, 1, 2] inner_dims_pos = [2] inner_tiles = [8] into %[[EMPTY]] : tensor<?x256x32x8xf32> -> tensor<?x256x256xf32>
+// CHECK: return %[[UNPACK]] : tensor<?x256x256xf32>
+
+// -----
+
+func.func @no_push_down_unpack_through_non_divisible_expand(%5: tensor<384x32x8x8xf32>) -> tensor<256x12x256xf32> {
+ %6 = tensor.empty() : tensor<3072x256xf32>
+ %unpack = tensor.unpack %5 outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %6 : tensor<384x32x8x8xf32> -> tensor<3072x256xf32>
+ %expanded = tensor.expand_shape %unpack [[0, 1], [2]] : tensor<3072x256xf32> into tensor<256x12x256xf32>
+ func.return %expanded : tensor<256x12x256xf32>
+}
+// CHECK-LABEL: func.func @no_push_down_unpack_through_non_divisible_expand
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[UNPACK]] {{\[}}[0, 1], [2]] : tensor<3072x256xf32> into tensor<256x12x256xf32>
+// CHECK: return %[[EXPANDED]] : tensor<256x12x256xf32>
diff --git a/mlir/test/Dialect/Linalg/flatten-elementwise.mlir b/mlir/test/Dialect/Linalg/flatten-elementwise.mlir
index 858c133dd536..5a27fe76b134 100644
--- a/mlir/test/Dialect/Linalg/flatten-elementwise.mlir
+++ b/mlir/test/Dialect/Linalg/flatten-elementwise.mlir
@@ -67,6 +67,27 @@ module attributes {transform.with_named_sequence} {
// -----
+// CHECK-LABEL: func.func @map_already_flat(
+// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<32xf32>
+// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref<32xf32>
+// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref<32xf32>
+// CHECK-NEXT: linalg.map { arith.addf } ins(%[[ARG0]], %[[ARG1]] : memref<32xf32>, memref<32xf32>) outs(%[[ARG2]] : memref<32xf32>)
+func.func @map_already_flat(%arg0: memref<32xf32>, %arg1: memref<32xf32>, %arg2: memref<32xf32>) {
+ linalg.map {arith.addf} ins(%arg0, %arg1: memref<32xf32>, memref<32xf32>) outs(%arg2: memref<32xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
// CHECK-LABEL: func.func @generic
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref<32x7xf32>
diff --git a/mlir/test/Dialect/Linalg/flatten-unsupported.mlir b/mlir/test/Dialect/Linalg/flatten-unsupported.mlir
new file mode 100644
index 000000000000..499db4cfb329
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/flatten-unsupported.mlir
@@ -0,0 +1,33 @@
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics
+
+func.func @non_elementwise(%arg0: memref<2x3xf32>, %arg1: memref<3x4xf32>, %arg2: memref<2x4xf32>) {
+ // expected-error @below {{only elementwise flattening is supported}}
+ linalg.matmul ins(%arg0, %arg1 : memref<2x3xf32>, memref<3x4xf32>) outs(%arg2: memref<2x4xf32>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @unsupported_memref(%arg0: memref<32x7xf32, strided<[7, 2]>>, %arg1: memref<32x7xf32, strided<[7, 2]>>, %arg2: memref<32x7xf32, strided<[7, 2]>>) {
+ // expected-error @below {{attempted to flatten, but failed}}
+ linalg.map {arith.addf} ins(%arg0, %arg1: memref<32x7xf32, strided<[7, 2]>>, memref<32x7xf32, strided<[7, 2]>>) outs(%arg2: memref<32x7xf32, strided<[7, 2]>>)
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %flattened = transform.structured.flatten_elementwise %0
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
index c69701b65e20..9616a3e32a06 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries" -canonicalize -buffer-loop-hoisting -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -one-shot-bufferize="unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
diff --git a/mlir/test/Dialect/Linalg/tile-to-forall.mlir b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
index abd807b3e4d3..12e2dea5530b 100644
--- a/mlir/test/Dialect/Linalg/tile-to-forall.mlir
+++ b/mlir/test/Dialect/Linalg/tile-to-forall.mlir
@@ -586,3 +586,144 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+#map1 = affine_map<(d0, d1) -> (d0)>
+
+func.func @tile_thread_safety1(%arg0: tensor<100x300xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
+ // expected-warning@below {{tiling is not thread safe at axis #1}}
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%arg0 : tensor<100x300xf32>) outs(%arg1 : tensor<100xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = arith.addf %in, %out : f32
+ linalg.yield %1 : f32
+ } -> tensor<100xf32>
+ return %0 : tensor<100xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [4, 2]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// -----
+
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d1, d2)>
+
+func.func @tile_thread_safety2(%arg0: tensor<100x300x8xf32>, %arg1: tensor<300x8xf32>) -> tensor<300x8xf32> {
+ // expected-warning@below {{tiling is not thread safe at axis #0}}
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["reduction", "parallel", "parallel"]} ins(%arg0 : tensor<100x300x8xf32>) outs(%arg1 : tensor<300x8xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = arith.addf %in, %out : f32
+ linalg.yield %1 : f32
+ } -> tensor<300x8xf32>
+ return %0 : tensor<300x8xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [8]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// -----
+
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
+
+func.func @tile_thread_safety3(%arg0: tensor<100x300x8xf32>, %arg1: tensor<100x8xf32>) -> tensor<100x8xf32> {
+ // expected-warning@below {{tiling is not thread safe at axis #1}}
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction", "parallel"]} ins(%arg0 : tensor<100x300x8xf32>) outs(%arg1 : tensor<100x8xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = arith.addf %in, %out : f32
+ linalg.yield %1 : f32
+ } -> tensor<100x8xf32>
+ return %0 : tensor<100x8xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [8, 4, 2]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// -----
+
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+#map1 = affine_map<(d0, d1, d2) -> (d0, d2)>
+#map2 = affine_map<(d0, d1, d2) -> (d2)>
+
+func.func @tile_thread_safety4(%arg0: tensor<100x300x8xf32>, %arg1: tensor<100x8xf32>, %arg2 : tensor<8xf32>) -> (tensor<100x8xf32>, tensor<8xf32>) {
+ // expected-warning@+2 {{tiling is not thread safe at axis #0}}
+ // expected-warning@below {{tiling is not thread safe at axis #1}}
+ %0:2 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["reduction", "reduction", "parallel"]} ins(%arg0 : tensor<100x300x8xf32>) outs(%arg1, %arg2 : tensor<100x8xf32>, tensor<8xf32>) {
+ ^bb0(%in: f32, %out1: f32, %out2: f32):
+ %1 = arith.addf %in, %out1 : f32
+ %2 = arith.addf %in, %out2 : f32
+ linalg.yield %1, %2 : f32, f32
+ } -> (tensor<100x8xf32>, tensor<8xf32>)
+ return %0#0, %0#1 : tensor<100x8xf32>, tensor<8xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [8, 4, 2]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// -----
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+#map1 = affine_map<(d0, d1) -> (d0)>
+
+func.func @tile_thread_safety5(%arg0: tensor<100x300xf32>, %arg1: tensor<100xf32>) -> tensor<100xf32> {
+ // expected-warning@below {{tiling is not thread safe at axis #1}}
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "reduction"]} ins(%arg0 : tensor<100x300xf32>) outs(%arg1 : tensor<100xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = arith.addf %in, %out : f32
+ linalg.yield %1 : f32
+ } -> tensor<100xf32>
+ return %0 : tensor<100xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 tile_sizes [10, 1]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @tile_thread_safety6(%A: tensor<?x?xf32>, %B: tensor<?x?xf32>, %C: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ // expected-warning@below {{tiling is not thread safe at axis #2}}
+ %0 = linalg.matmul ins(%A, %B : tensor<?x?xf32>, tensor<?x?xf32>)
+ outs(%C : tensor<?x?xf32>) -> (tensor<?x?xf32>)
+ return %0 : tensor<?x?xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %forall, %tiled_generic = transform.structured.tile_using_forall %0 num_threads [2, 0, 8]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/SCF/for-loop-peeling-front.mlir b/mlir/test/Dialect/SCF/for-loop-peeling-front.mlir
index 65141ff7623f..fe3b3e686a3e 100644
--- a/mlir/test/Dialect/SCF/for-loop-peeling-front.mlir
+++ b/mlir/test/Dialect/SCF/for-loop-peeling-front.mlir
@@ -13,11 +13,11 @@
// CHECK: %[[INIT:.*]] = arith.addi %[[ACC]], %[[CAST]] : i32
// CHECK: scf.yield %[[INIT]]
// CHECK: }
-// CHECK: %[[RESULT:.*]] = scf.for %[[IV:.*]] = %[[C4]] to %[[C17]]
-// CHECK-SAME: step %[[C4]] iter_args(%[[ACC:.*]] = %[[FIRST]]) -> (i32) {
-// CHECK: %[[MIN2:.*]] = affine.min #[[MAP]](%[[C17]], %[[IV]])[%[[C4]]]
+// CHECK: %[[RESULT:.*]] = scf.for %[[IV2:.*]] = %[[C4]] to %[[C17]]
+// CHECK-SAME: step %[[C4]] iter_args(%[[ACC2:.*]] = %[[FIRST]]) -> (i32) {
+// CHECK: %[[MIN2:.*]] = affine.min #[[MAP]](%[[C17]], %[[IV2]])[%[[C4]]]
// CHECK: %[[CAST2:.*]] = arith.index_cast %[[MIN2]] : index to i32
-// CHECK: %[[ADD:.*]] = arith.addi %[[ACC]], %[[CAST2]] : i32
+// CHECK: %[[ADD:.*]] = arith.addi %[[ACC2]], %[[CAST2]] : i32
// CHECK: scf.yield %[[ADD]]
// CHECK: }
// CHECK: return %[[RESULT]]
@@ -111,6 +111,45 @@ func.func @fully_dynamic_bounds(%lb : index, %ub: index, %step: index) -> i32 {
// -----
// CHECK-DAG: #[[MAP:.*]] = affine_map<(d0, d1)[s0] -> (4, d0 - d1)>
+// CHECK: func @two_iteration_example(
+// CHECK-DAG: %[[C0_I32:.*]] = arith.constant 0 : i32
+// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
+// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index
+// CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index
+// CHECK-DAG: %[[C6:.*]] = arith.constant 6 : index
+// CHECK: %[[FIRST:.*]] = scf.for %[[IV:.*]] = %[[C2]] to %[[C6]]
+// CHECK-SAME: step %[[C4]] iter_args(%[[ACC:.*]] = %[[C0_I32]]) -> (i32) {
+// CHECK: %[[MIN:.*]] = affine.min #[[MAP]](%[[C6]], %[[IV]])[%[[C4]]]
+// CHECK: %[[CAST:.*]] = arith.index_cast %[[MIN]] : index to i32
+// CHECK: %[[INIT:.*]] = arith.addi %[[ACC]], %[[CAST]] : i32
+// CHECK: scf.yield %[[INIT]]
+// CHECK: }
+// CHECK: %[[RESULT:.*]] = scf.for %[[IV2:.*]] = %[[C6]] to %[[C8]]
+// CHECK-SAME: step %[[C4]] iter_args(%[[ACC2:.*]] = %[[FIRST]]) -> (i32) {
+// CHECK: %[[MIN2:.*]] = affine.min #[[MAP]](%[[C8]], %[[IV2]])[%[[C4]]]
+// CHECK: %[[CAST2:.*]] = arith.index_cast %[[MIN2]] : index to i32
+// CHECK: %[[ADD:.*]] = arith.addi %[[ACC2]], %[[CAST2]] : i32
+// CHECK: scf.yield %[[ADD]]
+// CHECK: }
+// CHECK: return %[[RESULT]]
+#map = affine_map<(d0, d1)[s0] -> (s0, d0 - d1)>
+func.func @two_iteration_example() -> i32 {
+ %c0_i32 = arith.constant 0 : i32
+ %lb = arith.constant 2 : index
+ %step = arith.constant 4 : index
+ %ub = arith.constant 8 : index
+ %r = scf.for %iv = %lb to %ub step %step iter_args(%arg = %c0_i32) -> i32 {
+ %s = affine.min #map(%ub, %iv)[%step]
+ %casted = arith.index_cast %s : index to i32
+ %0 = arith.addi %arg, %casted : i32
+ scf.yield %0 : i32
+ }
+ return %r : i32
+}
+
+// -----
+
+// CHECK-DAG: #[[MAP:.*]] = affine_map<(d0, d1)[s0] -> (4, d0 - d1)>
// CHECK: func @no_peeling_front(
// CHECK-DAG: %[[C0_I32:.*]] = arith.constant 0 : i32
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
index 7d23498f32e1..4d82021e86f5 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only" -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
// CHECK-LABEL: func @scf_for_yield_only
func.func @scf_for_yield_only(
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index 24da8d84b18e..485fdd9b0e59 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops bufferize-function-boundaries" -cse -canonicalize -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops test-analysis-only analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops analysis-heuristic=fuzzer test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops analysis-heuristic=fuzzer test-analysis-only analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops analysis-heuristic=fuzzer test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs-from-loops unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file -o /dev/null
diff --git a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
index faaa2db3aa57..0f51b1cdbe0c 100644
--- a/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
+++ b/mlir/test/Dialect/SCF/transform-loop-fuse-sibling.mlir
@@ -1,14 +1,113 @@
// RUN: mlir-opt %s -transform-interpreter --cse --canonicalize -split-input-file -verify-diagnostics | FileCheck %s
+// RUN: mlir-opt %s -transform-interpreter -split-input-file -verify-diagnostics | FileCheck %s --check-prefix CHECK-NOCLEANUP
-func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
+// CHECK: func.func @fuse_1st_for_into_2nd([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
+func.func @fuse_1st_for_into_2nd(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
+ // CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+ // CHECK-DAG: [[C16:%.*]] = arith.constant 16 : index
+ // CHECK-DAG: [[C128:%.*]] = arith.constant 128 : index
+ // CHECK-DAG: [[ZERO:%.*]] = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %c16 = arith.constant 16 : index
+ %c128 = arith.constant 128 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ // CHECK: [[R0:%.*]]:2 = scf.for [[IV:%.*]] = [[C0]] to [[C128]] step [[C16]] iter_args([[IA:%.*]] = [[A]], [[IB:%.*]] = [[B]]) {{.*}}
+ %1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %A) -> (tensor<128xf32>) {
+ // CHECK-DAG: [[ASLICE:%.*]] = vector.transfer_read [[A]][[[IV]]], [[ZERO]]
+ // CHECK-DAG: [[SLICE0:%.*]] = vector.transfer_read [[IA]][[[IV]]], [[ZERO]]
+ // CHECK: [[OUT1:%.*]] = arith.addf [[SLICE0]], [[ASLICE]]
+ // CHECK-NEXT: [[WRT0:%.*]] = vector.transfer_write [[OUT1]], [[IA]][[[IV]]]
+ %2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %5 = arith.addf %3, %2 : vector<16xf32>
+ %6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %6 : tensor<128xf32>
+ }
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
+ // CHECK-DAG: [[SLICE1:%.*]] = vector.transfer_read [[IB]][[[IV]]], [[ZERO]]
+ // CHECK: [[OUT2:%.*]] = arith.addf [[SLICE1]], [[ASLICE]]
+ // CHECK-NEXT: [[WRT1:%.*]] = vector.transfer_write [[OUT2]], [[IB]][[[IV]]]
+ %dup2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
+ %dup6 = vector.transfer_write %dup5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ // CHECK: scf.yield [[WRT0]], [[WRT1]] : {{.*}}
+ scf.yield %dup6 : tensor<128xf32>
+ }
+ return %1, %dup1 : tensor<128xf32>, tensor<128xf32>
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %for:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %for#0 into %for#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK: func.func @fuse_2nd_for_into_1st([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
+func.func @fuse_2nd_for_into_1st(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
+ // CHECK-DAG: [[C0:%.*]] = arith.constant 0 : index
+ // CHECK-DAG: [[C16:%.*]] = arith.constant 16 : index
+ // CHECK-DAG: [[C128:%.*]] = arith.constant 128 : index
+ // CHECK-DAG: [[ZERO:%.*]] = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %c16 = arith.constant 16 : index
+ %c128 = arith.constant 128 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ // CHECK: [[R0:%.*]]:2 = scf.for [[IV:%.*]] = [[C0]] to [[C128]] step [[C16]] iter_args([[IB:%.*]] = [[B]], [[IA:%.*]] = [[A]]) {{.*}}
+ %1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %A) -> (tensor<128xf32>) {
+ // CHECK-DAG: [[ASLICE:%.*]] = vector.transfer_read [[A]][[[IV]]], [[ZERO]]
+ // CHECK-DAG: [[SLICE0:%.*]] = vector.transfer_read [[IB]][[[IV]]], [[ZERO]]
+ // CHECK: [[OUT1:%.*]] = arith.addf [[SLICE0]], [[ASLICE]]
+ // CHECK-NEXT: [[WRT0:%.*]] = vector.transfer_write [[OUT1]], [[IB]][[[IV]]]
+ %2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %5 = arith.addf %3, %2 : vector<16xf32>
+ %6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %6 : tensor<128xf32>
+ }
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
+ // CHECK-DAG: [[SLICE1:%.*]] = vector.transfer_read [[IA]][[[IV]]], [[ZERO]]
+ // CHECK: [[OUT2:%.*]] = arith.addf [[SLICE1]], [[ASLICE]]
+ // CHECK-NEXT: [[WRT1:%.*]] = vector.transfer_write [[OUT2]], [[IA]][[[IV]]]
+ %dup2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ // NB: the dominance check used to fail on the following line,
+ // however the defining op for the value of %arg3 occurs above the source loop and hence is safe
+ // and %arg4 is a block argument of the scope of the loops and hence is safe
+ %dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
+ %dup6 = vector.transfer_write %dup5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ // CHECK: scf.yield [[WRT0]], [[WRT1]] : {{.*}}
+ scf.yield %dup6 : tensor<128xf32>
+ }
+ return %1, %dup1 : tensor<128xf32>, tensor<128xf32>
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %for:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %for#1 into %for#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+// CHECK: func.func @matmul_fuse_1st_forall_into_2nd([[A1:%.*]]: {{.*}}, [[A2:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
+func.func @matmul_fuse_1st_forall_into_2nd(%A1 : tensor<128x128xf32>, %A2 : tensor<128x128xf32>, %B : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
%zero = arith.constant 0.0 : f32
%out_alloc = tensor.empty() : tensor<128x128xf32>
%out = linalg.fill ins(%zero : f32) outs(%out_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
// CHECK: scf.forall ([[I:%.*]]) in (4) shared_outs([[S1:%.*]] = [[IN1:%.*]], [[S2:%.*]] = [[IN2:%.*]]) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
// CHECK: [[T:%.*]] = affine.apply
+ // CHECK: tensor.extract_slice [[A2]][[[T]], 0] [32, 128] [1, 1]
// CHECK: tensor.extract_slice [[S1]][[[T]], 0] [32, 128] [1, 1]
// CHECK: [[OUT1:%.*]] = linalg.matmul
+ // CHECK: tensor.extract_slice [[A1]][[[T]], 0] [32, 128] [1, 1]
// CHECK: tensor.extract_slice [[S2]][[[T]], 0] [32, 128] [1, 1]
// CHECK: [[OUT2:%.*]] = linalg.matmul
// CHECK: scf.forall.in_parallel {
@@ -16,12 +115,11 @@ func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tenso
// CHECK: tensor.parallel_insert_slice [[OUT2]] into [[S2]][[[T]], 0] [32, 128] [1, 1]
// CHECK: }
// CHECK: }
- %out1 = linalg.matmul ins(%A, %B1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
- %out2 = linalg.matmul ins(%A, %B2 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out1 = linalg.matmul ins(%A1, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out2 = linalg.matmul ins(%A2, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
%matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
@@ -31,25 +129,37 @@ module attributes {transform.with_named_sequence} {
%tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+ %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
transform.yield
}
}
// -----
-func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
+// CHECK: func.func @matmul_fuse_2nd_forall_into_1st([[A1:%.*]]: {{.*}}, [[A2:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
+func.func @matmul_fuse_2nd_forall_into_1st(%A1 : tensor<128x128xf32>, %A2 : tensor<128x128xf32>, %B : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
%zero = arith.constant 0.0 : f32
%out_alloc = tensor.empty() : tensor<128x128xf32>
%out = linalg.fill ins(%zero : f32) outs(%out_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
- // expected-error @below {{user of results of target should be properly dominated by source}}
- %out1 = linalg.matmul ins(%A, %B1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
- %out2 = linalg.matmul ins(%A, %out1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
+ // CHECK: scf.forall ([[I:%.*]]) in (4) shared_outs([[S1:%.*]] = [[IN1:%.*]], [[S2:%.*]] = [[IN2:%.*]]) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
+ // CHECK: [[T:%.*]] = affine.apply
+ // CHECK: tensor.extract_slice [[A1]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: tensor.extract_slice [[S1]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: [[OUT1:%.*]] = linalg.matmul
+ // CHECK: tensor.extract_slice [[A2]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: tensor.extract_slice [[S2]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: [[OUT2:%.*]] = linalg.matmul
+ // CHECK: scf.forall.in_parallel {
+ // CHECK: tensor.parallel_insert_slice [[OUT1]] into [[S1]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: tensor.parallel_insert_slice [[OUT2]] into [[S2]][[[T]], 0] [32, 128] [1, 1]
+ // CHECK: }
+ // CHECK: }
+ %out1 = linalg.matmul ins(%A1, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out2 = linalg.matmul ins(%A2, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
%matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
@@ -66,18 +176,84 @@ module attributes {transform.with_named_sequence} {
// -----
-func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
+// CHECK-NOCLEANUP: func.func @fuse_no_iter_args([[A:%.*]]: {{.*}}, [[B:%.*]]: {{.*}}
+func.func @fuse_no_iter_args(%A: tensor<128xf32>, %B: tensor<128xf32>) {
+ // CHECK-NOCLEANUP: [[C0:%.*]] = arith.constant 0 : index
+ // CHECK-NOCLEANUP: [[C16:%.*]] = arith.constant 16 : index
+ // CHECK-NOCLEANUP: [[C128:%.*]] = arith.constant 128 : index
+ // CHECK-NOCLEANUP: [[ZERO:%.*]] = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %c16 = arith.constant 16 : index
+ %c128 = arith.constant 128 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ // CHECK-NOCLEANUP: scf.for [[IV:%.*]] = [[C0]] to [[C128]] step [[C16]] {{.*}}
+ scf.for %arg0 = %c0 to %c128 step %c16 {
+ // CHECK-NOCLEANUP: [[ASLICE:%.*]] = vector.transfer_read [[A]][[[IV]]], [[ZERO]]
+ %2 = vector.transfer_read %A[%arg0], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ scf.yield
+ }
+ scf.for %arg0 = %c0 to %c128 step %c16 {
+ // CHECK-NOCLEANUP: [[BSLICE:%.*]] = vector.transfer_read [[B]][[[IV]]], [[ZERO]]
+ %dup2 = vector.transfer_read %B[%arg0], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ scf.yield
+ }
+ return
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %for:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %for#0 into %for#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @source_for_uses_result_of_target_for_err(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
+ %c0 = arith.constant 0 : index
+ %c16 = arith.constant 16 : index
+ %c128 = arith.constant 128 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ // expected-error @below {{user of results of target should be properly dominated by source}}
+ %1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %A) -> (tensor<128xf32>) {
+ %2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %5 = arith.addf %3, %2 : vector<16xf32>
+ %6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %6 : tensor<128xf32>
+ }
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %1) -> (tensor<128xf32>) {
+ %dup2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
+ %dup6 = vector.transfer_write %dup5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %dup6 : tensor<128xf32>
+ }
+ return %1, %dup1 : tensor<128xf32>, tensor<128xf32>
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %for:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %for#0 into %for#1 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @source_forall_uses_result_of_target_forall_err(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
%zero = arith.constant 0.0 : f32
%out_alloc = tensor.empty() : tensor<128x128xf32>
%out = linalg.fill ins(%zero : f32) outs(%out_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
+ // expected-error @below {{user of results of target should be properly dominated by source}}
%out1 = linalg.matmul ins(%A, %B1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
- // expected-error @below {{values used inside regions of target should be properly dominated by source}}
%out2 = linalg.matmul ins(%A, %out1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
%matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
@@ -87,25 +263,58 @@ module attributes {transform.with_named_sequence} {
%tiled_mm1, %loop1 = transform.structured.tile_using_forall %mm1 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%tiled_mm2, %loop2 = transform.structured.tile_using_forall %mm2 tile_sizes [32] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
- %fused_loop = transform.loop.fuse_sibling %loop2 into %loop1 : (!transform.any_op, !transform.any_op) -> !transform.any_op
+ %fused_loop = transform.loop.fuse_sibling %loop1 into %loop2 : (!transform.any_op, !transform.any_op) -> !transform.any_op
transform.yield
}
}
// -----
-func.func @test(%A : tensor<128x128xf32>, %B1 : tensor<128x128xf32>, %B2 : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
- %zero = arith.constant 0.0 : f32
- %out_alloc = tensor.empty() : tensor<128x128xf32>
- %out = linalg.fill ins(%zero : f32) outs(%out_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
+func.func @target_for_region_uses_result_of_source_for_err(%A: tensor<128xf32>, %B: tensor<128xf32>) -> (tensor<128xf32>, tensor<128xf32>) {
+ %c0 = arith.constant 0 : index
+ %c16 = arith.constant 16 : index
+ %c128 = arith.constant 128 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ %1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %A) -> (tensor<128xf32>) {
+ %2 = vector.transfer_read %A[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %5 = arith.addf %3, %2 : vector<16xf32>
+ %6 = vector.transfer_write %5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %6 : tensor<128xf32>
+ }
+ %dup1 = scf.for %arg3 = %c0 to %c128 step %c16 iter_args(%arg4 = %B) -> (tensor<128xf32>) {
+ // expected-error @below {{values used inside regions of target should be properly dominated by source}}
+ %dup2 = vector.transfer_read %1[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup3 = vector.transfer_read %arg4[%arg3], %cst {in_bounds = [true]} : tensor<128xf32>, vector<16xf32>
+ %dup5 = arith.addf %dup3, %dup2 : vector<16xf32>
+ %dup6 = vector.transfer_write %dup5, %arg4[%arg3] {in_bounds = [true]} : vector<16xf32>, tensor<128xf32>
+ scf.yield %dup6 : tensor<128xf32>
+ }
+ return %1, %dup1 : tensor<128xf32>, tensor<128xf32>
+}
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["scf.for"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %for:2 = transform.split_handle %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
+ %fused = transform.loop.fuse_sibling %for#1 into %for#0 : (!transform.any_op,!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
- %out1 = linalg.matmul ins(%A, %B1 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out : tensor<128x128xf32>) -> tensor<128x128xf32>
+// -----
+
+func.func @target_forall_depends_on_value_not_dominated_by_source_forall_err(%A1 : tensor<128x128xf32>, %A2 : tensor<128x128xf32>, %B : tensor<128x128xf32>) -> (tensor<128x128xf32>, tensor<128x128xf32>) {
+ %zero = arith.constant 0.0 : f32
+ %buf1_alloc = tensor.empty() : tensor<128x128xf32>
+ %buf1 = linalg.fill ins(%zero : f32) outs(%buf1_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out1 = linalg.matmul ins(%A1, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%buf1 : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out_alloc2 = tensor.empty() : tensor<128x128xf32>
+ %buf2 = linalg.fill ins(%zero : f32) outs(%buf1_alloc : tensor<128x128xf32>) -> tensor<128x128xf32>
// expected-error @below {{operands of target should be properly dominated by source}}
- %out2 = linalg.matmul ins(%A, %B2 : tensor<128x128xf32>, tensor<128x128xf32>) outs(%out1 : tensor<128x128xf32>) -> tensor<128x128xf32>
+ %out2 = linalg.matmul ins(%A2, %B : tensor<128x128xf32>, tensor<128x128xf32>) outs(%buf2 : tensor<128x128xf32>) -> tensor<128x128xf32>
func.return %out1, %out2 : tensor<128x128xf32>, tensor<128x128xf32>
}
-
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%variant_op : !transform.any_op {transform.readonly}) {
%matched = transform.structured.match ops{["linalg.matmul"]} in %variant_op : (!transform.any_op) -> (!transform.any_op)
diff --git a/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir b/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir
index de21d114e9fc..977d31a6bfe5 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir
@@ -1479,6 +1479,358 @@ func.func @const_fold_vector_inotequal() -> vector<3xi1> {
// -----
//===----------------------------------------------------------------------===//
+// spirv.SGreaterThan
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @sgt_same
+func.func @sgt_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ // CHECK-DAG: %[[CVFALSE:.*]] = spirv.Constant dense<false>
+ %0 = spirv.SGreaterThan %arg0, %arg0 : i32
+ %1 = spirv.SGreaterThan %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CFALSE]], %[[CVFALSE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_sgt
+func.func @const_fold_scalar_sgt() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %c6 = spirv.Constant 6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.SGreaterThan %c5, %c6 : i32
+ %1 = spirv.SGreaterThan %c5, %c4 : i32
+
+ // CHECK: return %[[CFALSE]], %[[CTRUE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_sgt
+func.func @const_fold_vector_sgt() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[false, false, true]>
+ %0 = spirv.SGreaterThan %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.SGreaterThanEqual
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @sge_same
+func.func @sge_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CVTRUE:.*]] = spirv.Constant dense<true>
+ %0 = spirv.SGreaterThanEqual %arg0, %arg0 : i32
+ %1 = spirv.SGreaterThanEqual %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CTRUE]], %[[CVTRUE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_sge
+func.func @const_fold_scalar_sge() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %c6 = spirv.Constant 6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.SGreaterThanEqual %c5, %c6 : i32
+ %1 = spirv.SGreaterThanEqual %c5, %c4 : i32
+
+ // CHECK: return %[[CFALSE]], %[[CTRUE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_sge
+func.func @const_fold_vector_sge() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[true, false, true]>
+ %0 = spirv.SGreaterThanEqual %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.UGreaterThan
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @ugt_same
+func.func @ugt_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ // CHECK-DAG: %[[CVFALSE:.*]] = spirv.Constant dense<false>
+ %0 = spirv.UGreaterThan %arg0, %arg0 : i32
+ %1 = spirv.UGreaterThan %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CFALSE]], %[[CVFALSE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_ugt
+func.func @const_fold_scalar_ugt() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %cn6 = spirv.Constant -6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.UGreaterThan %c5, %cn6 : i32
+ %1 = spirv.UGreaterThan %c5, %c4 : i32
+
+ // CHECK: return %[[CFALSE]], %[[CTRUE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_ugt
+func.func @const_fold_vector_ugt() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[false, false, true]>
+ %0 = spirv.UGreaterThan %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.UGreaterThanEqual
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @uge_same
+func.func @uge_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CVTRUE:.*]] = spirv.Constant dense<true>
+ %0 = spirv.UGreaterThanEqual %arg0, %arg0 : i32
+ %1 = spirv.UGreaterThanEqual %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CTRUE]], %[[CVTRUE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_uge
+func.func @const_fold_scalar_uge() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %cn6 = spirv.Constant -6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.UGreaterThanEqual %c5, %cn6 : i32
+ %1 = spirv.UGreaterThanEqual %c5, %c4 : i32
+
+ // CHECK: return %[[CFALSE]], %[[CTRUE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_uge
+func.func @const_fold_vector_uge() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[true, false, true]>
+ %0 = spirv.UGreaterThanEqual %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.SLessThan
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @slt_same
+func.func @slt_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ // CHECK-DAG: %[[CVFALSE:.*]] = spirv.Constant dense<false>
+ %0 = spirv.SLessThan %arg0, %arg0 : i32
+ %1 = spirv.SLessThan %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CFALSE]], %[[CVFALSE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_slt
+func.func @const_fold_scalar_slt() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %c6 = spirv.Constant 6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.SLessThan %c5, %c6 : i32
+ %1 = spirv.SLessThan %c5, %c4 : i32
+
+ // CHECK: return %[[CTRUE]], %[[CFALSE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_slt
+func.func @const_fold_vector_slt() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[false, true, false]>
+ %0 = spirv.SLessThan %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.SLessThanEqual
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @sle_same
+func.func @sle_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CVTRUE:.*]] = spirv.Constant dense<true>
+ %0 = spirv.SLessThanEqual %arg0, %arg0 : i32
+ %1 = spirv.SLessThanEqual %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CTRUE]], %[[CVTRUE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_sle
+func.func @const_fold_scalar_sle() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %c6 = spirv.Constant 6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.SLessThanEqual %c5, %c6 : i32
+ %1 = spirv.SLessThanEqual %c5, %c4 : i32
+
+ // CHECK: return %[[CTRUE]], %[[CFALSE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_sle
+func.func @const_fold_vector_sle() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[true, true, false]>
+ %0 = spirv.SLessThanEqual %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.ULessThan
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @ult_same
+func.func @ult_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ // CHECK-DAG: %[[CVFALSE:.*]] = spirv.Constant dense<false>
+ %0 = spirv.ULessThan %arg0, %arg0 : i32
+ %1 = spirv.ULessThan %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CFALSE]], %[[CVFALSE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_ult
+func.func @const_fold_scalar_ult() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %cn6 = spirv.Constant -6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.ULessThan %c5, %cn6 : i32
+ %1 = spirv.ULessThan %c5, %c4 : i32
+
+ // CHECK: return %[[CTRUE]], %[[CFALSE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_ult
+func.func @const_fold_vector_ult() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[false, true, false]>
+ %0 = spirv.ULessThan %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
+// spirv.ULessThanEqual
+//===----------------------------------------------------------------------===//
+
+// CHECK-LABEL: @ule_same
+func.func @ule_same(%arg0 : i32, %arg1 : vector<3xi32>) -> (i1, vector<3xi1>) {
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CVTRUE:.*]] = spirv.Constant dense<true>
+ %0 = spirv.ULessThanEqual %arg0, %arg0 : i32
+ %1 = spirv.ULessThanEqual %arg1, %arg1 : vector<3xi32>
+
+ // CHECK: return %[[CTRUE]], %[[CVTRUE]]
+ return %0, %1 : i1, vector<3xi1>
+}
+
+// CHECK-LABEL: @const_fold_scalar_ule
+func.func @const_fold_scalar_ule() -> (i1, i1) {
+ %c4 = spirv.Constant 4 : i32
+ %c5 = spirv.Constant 5 : i32
+ %cn6 = spirv.Constant -6 : i32
+
+ // CHECK-DAG: %[[CTRUE:.*]] = spirv.Constant true
+ // CHECK-DAG: %[[CFALSE:.*]] = spirv.Constant false
+ %0 = spirv.ULessThanEqual %c5, %cn6 : i32
+ %1 = spirv.ULessThanEqual %c5, %c4 : i32
+
+ // CHECK: return %[[CTRUE]], %[[CFALSE]]
+ return %0, %1 : i1, i1
+}
+
+// CHECK-LABEL: @const_fold_vector_ule
+func.func @const_fold_vector_ule() -> vector<3xi1> {
+ %cv0 = spirv.Constant dense<[-1, -4, 3]> : vector<3xi32>
+ %cv1 = spirv.Constant dense<[-1, -3, 2]> : vector<3xi32>
+
+ // CHECK: %[[RET:.*]] = spirv.Constant dense<[true, true, false]>
+ %0 = spirv.ULessThanEqual %cv0, %cv1 : vector<3xi32>
+
+ // CHECK: return %[[RET]]
+ return %0 : vector<3xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
// spirv.LeftShiftLogical
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/SPIRV/Transforms/webgpu-prepare.mlir b/mlir/test/Dialect/SPIRV/Transforms/webgpu-prepare.mlir
index 1ec4e5e4f966..45f188da3815 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/webgpu-prepare.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/webgpu-prepare.mlir
@@ -182,4 +182,36 @@ spirv.func @iaddcarry_i16(%a : i16, %b : i16) -> !spirv.struct<(i16, i16)> "None
spirv.ReturnValue %0 : !spirv.struct<(i16, i16)>
}
+// CHECK-LABEL: func @is_inf_f32
+// CHECK-NEXT: [[FALSE:%.+]] = spirv.Constant false
+// CHECK-NEXT: spirv.ReturnValue [[FALSE]] : i1
+spirv.func @is_inf_f32(%a : f32) -> i1 "None" {
+ %0 = spirv.IsInf %a : f32
+ spirv.ReturnValue %0 : i1
+}
+
+// CHECK-LABEL: func @is_inf_4xf32
+// CHECK-NEXT: [[FALSE:%.+]] = spirv.Constant dense<false> : vector<4xi1>
+// CHECK-NEXT: spirv.ReturnValue [[FALSE]] : vector<4xi1>
+spirv.func @is_inf_4xf32(%a : vector<4xf32>) -> vector<4xi1> "None" {
+ %0 = spirv.IsInf %a : vector<4xf32>
+ spirv.ReturnValue %0 : vector<4xi1>
+}
+
+// CHECK-LABEL: func @is_nan_f32
+// CHECK-NEXT: [[FALSE:%.+]] = spirv.Constant false
+// CHECK-NEXT: spirv.ReturnValue [[FALSE]] : i1
+spirv.func @is_nan_f32(%a : f32) -> i1 "None" {
+ %0 = spirv.IsNan %a : f32
+ spirv.ReturnValue %0 : i1
+}
+
+// CHECK-LABEL: func @is_nan_4xf32
+// CHECK-NEXT: [[FALSE:%.+]] = spirv.Constant dense<false> : vector<4xi1>
+// CHECK-NEXT: spirv.ReturnValue [[FALSE]] : vector<4xi1>
+spirv.func @is_nan_4xf32(%a : vector<4xf32>) -> vector<4xi1> "None" {
+ %0 = spirv.IsNan %a : vector<4xf32>
+ spirv.ReturnValue %0 : vector<4xi1>
+}
+
} // end module
diff --git a/mlir/test/Dialect/SparseTensor/no_fold_into_consumer.mlir b/mlir/test/Dialect/SparseTensor/no_fold_into_consumer.mlir
new file mode 100644
index 000000000000..bbc7f397e793
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/no_fold_into_consumer.mlir
@@ -0,0 +1,47 @@
+// RUN: mlir-opt %s --canonicalize --pre-sparsification-rewrite | FileCheck %s
+
+#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+
+#sparse = #sparse_tensor.encoding<{
+ map = (d0, d1, d2) ->
+ (d0 : compressed(nonunique),
+ d1 : singleton(nonunique, soa),
+ d2 : singleton(soa)),
+ posWidth = 64,
+ crdWidth = 64
+}>
+
+
+module {
+ //
+ // This IR should not end up in an infinite loop trying to fold
+ // the linalg producer into the tensor cast consumer (even though
+ // static sizes can fold, the different encodings cannot). The
+ // cast was sloppy to begin with (but it has been observed by
+ // external sources) and can be easily repaired by the sparsifier.
+ //
+ // CHECK-LABEL: func @avoid_fold
+ // CHECK: arith.constant
+ // CHECK: tensor.empty()
+ // CHECK: linalg.generic
+ // CHECK: sparse_tensor.convert
+ // CHECK: return
+ //
+ func.func @avoid_fold(%0: tensor<10x20x30xf64, #sparse>) -> tensor<10x20x30xf64, #sparse> {
+ %1 = tensor.empty() : tensor<10x20x30xf64>
+ %2 = linalg.generic { indexing_maps = [#map, #map],
+ iterator_types = ["parallel", "parallel", "parallel"]
+ }
+ ins (%0 : tensor<10x20x30xf64, #sparse>)
+ outs(%1 : tensor<10x20x30xf64>) {
+ ^bb0(%in: f64, %out: f64):
+ %cst = arith.constant 0.000000e+00 : f64
+ %4 = arith.cmpf ugt, %in, %cst : f64
+ %5 = arith.select %4, %in, %cst : f64
+ linalg.yield %5 : f64
+ } -> tensor<10x20x30xf64>
+ %cast = tensor.cast %2 : tensor<10x20x30xf64> to tensor<10x20x30xf64, #sparse>
+ return %cast : tensor<10x20x30xf64, #sparse>
+ }
+}
+
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index e5374f031be5..9ab54fe9c133 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -2367,3 +2367,26 @@ func.func @dim_of_reshape_undominated(%arg0: tensor<*xf32>, %arg1: tensor<?xinde
%dim = tensor.dim %reshape, %0 : tensor<*xf32>
return %dim : index
}
+
+// -----
+
+// Test case: This test fails to fold because the index of tensor.dim is out_of_bounds
+// CHECK-LABEL: func @dim_out_of_bounds(
+// CHECK: %[[IDX:.*]] = index.constant 28
+// CHECK-NEXT: bufferization.alloc_tensor
+// CHECK-NEXT: %[[DIM:.*]] = tensor.dim %{{.*}}, %[[IDX]]
+// CHECK-NEXT: memref.alloc
+// CHECK-NEXT: memref.cast
+// CHECK-NEXT: affine.vector_load %{{.*}}[{{.*}}, {{.*}}, symbol(%[[DIM]])]
+// CHECK-NEXT: return
+func.func @dim_out_of_bounds() -> vector<7xi32> {
+ %c1 = arith.constant 1 : index
+ %idx28 = index.constant 28
+ %c29 = arith.constant 29 : index
+ %3 = bufferization.alloc_tensor(%c29) : tensor<?xi16>
+ %dim = tensor.dim %3, %idx28 : tensor<?xi16>
+ %alloc_21 = memref.alloc(%c29) : memref<?x26x2xi32>
+ %16 = affine.vector_load %alloc_21[%c1, %c1, %dim] : memref<?x26x2xi32>, vector<7xi32>
+ return %16 : vector<7xi32>
+}
+
diff --git a/mlir/test/Dialect/Tensor/drop-redundant-insert-slice-rank-expansion.mlir b/mlir/test/Dialect/Tensor/drop-redundant-insert-slice-rank-expansion.mlir
index e337fdd93214..88e55062f477 100644
--- a/mlir/test/Dialect/Tensor/drop-redundant-insert-slice-rank-expansion.mlir
+++ b/mlir/test/Dialect/Tensor/drop-redundant-insert-slice-rank-expansion.mlir
@@ -9,3 +9,68 @@ func.func @test_drop_rank_expansion(%src: tensor<128x480xf32>, %dest: tensor<1x1
%extracted_slice = tensor.extract_slice %inserted_slice[0, 0, 0, 0] [1, 1, 123, 456] [1, 1, 1, 1] : tensor<1x1x128x480xf32> to tensor<123x456xf32>
return %extracted_slice : tensor<123x456xf32>
}
+
+// -----
+
+func.func @fold_casting_insert_slice_of_extract_slice(%in : tensor<?x8x2x8xf32>, %dest : tensor<8x1x8xf32>) -> tensor<8x1x8xf32> {
+ %extracted_slice = tensor.extract_slice %in[0, 0, 0, 0] [1, 8, 1, 8] [1, 1, 1, 1] : tensor<?x8x2x8xf32> to tensor<8x8xf32>
+ %inserted_slice = tensor.insert_slice %extracted_slice into %dest[0, 0, 0] [8, 1, 8] [1, 1, 1] : tensor<8x8xf32> into tensor<8x1x8xf32>
+ return %inserted_slice : tensor<8x1x8xf32>
+}
+// CHECK-LABEL: func.func @fold_casting_insert_slice_of_extract_slice(
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x8x2x8xf32>
+// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[ARG0]][0, 0, 0, 0] [1, 8, 1, 8] [1, 1, 1, 1]
+// CHECK-SAME: : tensor<?x8x2x8xf32> to tensor<8x1x8xf32>
+// CHECK: return %[[EXTRACTED_SLICE]] : tensor<8x1x8xf32>
+
+// -----
+
+func.func @fold_casting_insert_slice_of_strided_extract_slice(%in : tensor<?x8x2x8xf32>, %dest : tensor<1x4x8xf32>) -> tensor<1x4x8xf32> {
+ %extracted_slice = tensor.extract_slice %in[0, 0, 0, 0] [1, 4, 1, 8] [1, 2, 1, 1] : tensor<?x8x2x8xf32> to tensor<4x8xf32>
+ %inserted_slice = tensor.insert_slice %extracted_slice into %dest[0, 0, 0] [1, 4, 8] [1, 1, 1] : tensor<4x8xf32> into tensor<1x4x8xf32>
+ return %inserted_slice : tensor<1x4x8xf32>
+}
+// CHECK-LABEL: func.func @fold_casting_insert_slice_of_strided_extract_slice(
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x8x2x8xf32>
+// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[ARG0]][0, 0, 0, 0] [1, 4, 1, 8] [1, 2, 1, 1]
+// CHECK-SAME: : tensor<?x8x2x8xf32> to tensor<1x4x8xf32>
+// CHECK: return %[[EXTRACTED_SLICE]] : tensor<1x4x8xf32>
+
+// -----
+
+func.func @no_fold_more_unit_dims_insert_slice_of_extract_slice(%in : tensor<?x8x8xf32>, %dest : tensor<1x1x8x8xf32>) -> tensor<1x1x8x8xf32> {
+ %extracted_slice = tensor.extract_slice %in[0, 0, 0] [1, 8, 8] [1, 1, 1] : tensor<?x8x8xf32> to tensor<8x8xf32>
+ %inserted_slice = tensor.insert_slice %extracted_slice into %dest[0, 0, 0, 0] [1, 1, 8, 8] [1, 1, 1, 1] : tensor<8x8xf32> into tensor<1x1x8x8xf32>
+ return %inserted_slice : tensor<1x1x8x8xf32>
+}
+// CHECK-LABEL: func.func @no_fold_more_unit_dims_insert_slice_of_extract_slice(
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x8x8xf32>
+// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[ARG0]]
+// CHECK: %[[INSERTED_SLICE:.*]] = tensor.insert_slice %[[EXTRACTED_SLICE]]
+// CHECK: return %[[INSERTED_SLICE]] : tensor<1x1x8x8xf32>
+
+// -----
+
+func.func @no_fold_strided_insert_slice_of_extract_slice(%in : tensor<?x8x2x8xf32>, %dest : tensor<1x4x4xf32>) -> tensor<1x4x4xf32> {
+ %extracted_slice = tensor.extract_slice %in[0, 0, 0, 0] [1, 8, 1, 8] [1, 1, 1, 1] : tensor<?x8x2x8xf32> to tensor<8x8xf32>
+ %inserted_slice = tensor.insert_slice %extracted_slice into %dest[0, 0, 0] [1, 8, 8] [1, 2, 2] : tensor<8x8xf32> into tensor<1x4x4xf32>
+ return %inserted_slice : tensor<1x4x4xf32>
+}
+// CHECK-LABEL: func.func @no_fold_strided_insert_slice_of_extract_slice(
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x8x2x8xf32>
+// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[ARG0]]
+// CHECK: %[[INSERTED_SLICE:.*]] = tensor.insert_slice %[[EXTRACTED_SLICE]]
+// CHECK: return %[[INSERTED_SLICE]] : tensor<1x4x4xf32>
+
+// -----
+
+func.func @no_fold_non_casting_insert_slice_of_extract_slice(%in : tensor<1x1x1x8x8xf32>, %dest : tensor<2x8x8xf32>) -> tensor<2x8x8xf32> {
+ %extracted_slice = tensor.extract_slice %in[0, 0, 0, 0, 0] [1, 1, 1, 8, 8] [1, 1, 1, 1, 1] : tensor<1x1x1x8x8xf32> to tensor<8x8xf32>
+ %inserted_slice = tensor.insert_slice %extracted_slice into %dest[0, 0, 0] [1, 8, 8] [1, 1, 1] : tensor<8x8xf32> into tensor<2x8x8xf32>
+ return %inserted_slice : tensor<2x8x8xf32>
+}
+// CHECK-LABEL: func.func @no_fold_non_casting_insert_slice_of_extract_slice(
+// CHECK-SAME: %[[ARG0:.*]]: tensor<1x1x1x8x8xf32>
+// CHECK: %[[EXTRACTED_SLICE:.*]] = tensor.extract_slice %[[ARG0]]
+// CHECK: %[[INSERTED_SLICE:.*]] = tensor.insert_slice %[[EXTRACTED_SLICE]]
+// CHECK: return %[[INSERTED_SLICE]] : tensor<2x8x8xf32>
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index 682107dbebbf..aa860e33cf35 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -290,22 +290,21 @@ func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_and_tile_dims(%arg0:
%return_value = tensor.cast %transposed : tensor<12x4x56x9x32x8x2xf32> to tensor<?x?x56x9x32x8x2xf32>
return %return_value : tensor<?x?x56x9x32x8x2xf32>
}
-// CHECK: #[[map:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
-// CHECK: #[[map1:.+]] = affine_map<()[s0] -> (s0 ceildiv 2)>
-// CHECK: module {
-// CHECK: func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_and_tile_dims(
-// CHECK-SAME: %[[ARG0:.+]]: tensor<56x?x?x128xf32>)
-// CHECK-DAG: %[[c1:.+]] = arith.constant 1 : index
-// CHECK-DAG: %[[c2:.+]] = arith.constant 2 : index
-// CHECK: %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<56x?x?x128xf32>
-// CHECK: %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<56x?x?x128xf32>
-// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim]]]
-// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[map1:.+]]()[%[[dim_0]]]
-// CHECK: %[[INIT:.+]] = tensor.empty(%[[mapped_dim2]], %[[mapped_dim1]]) : tensor<?x4x56x?x32x8x2xf32>
-// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 3, 0, 1] inner_dims_pos = [3, 1, 2] inner_tiles = [32, 8, 2] into %[[INIT]] : tensor<56x?x?x128xf32> -> tensor<?x4x56x?x32x8x2xf32>
-// CHECK: %[[CAST:.+]] = tensor.cast %[[PACK]] : tensor<?x4x56x?x32x8x2xf32> to tensor<?x?x56x9x32x8x2xf32>
-// CHECK: return %[[CAST]] : tensor<?x?x56x9x32x8x2xf32>
-// CHECK: }
+// CHECK-DAG: #[[$MAP0:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
+// CHECK-DAG: #[[$MAP1:.+]] = affine_map<()[s0] -> (s0 ceildiv 2)>
+// CHECK-LABEL: func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_and_tile_dims(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x?x?x128xf32>)
+// CHECK-DAG: %[[c1:.+]] = arith.constant 1 : index
+// CHECK-DAG: %[[c2:.+]] = arith.constant 2 : index
+// CHECK: %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<56x?x?x128xf32>
+// CHECK: %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<56x?x?x128xf32>
+// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[$MAP0]]()[%[[dim]]]
+// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[$MAP1]]()[%[[dim_0]]]
+// CHECK: %[[INIT:.+]] = tensor.empty(%[[mapped_dim2]], %[[mapped_dim1]]) : tensor<?x4x56x?x32x8x2xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 3, 0, 1] inner_dims_pos = [3, 1, 2] inner_tiles = [32, 8, 2] into %[[INIT]] : tensor<56x?x?x128xf32> -> tensor<?x4x56x?x32x8x2xf32>
+// CHECK: %[[CAST:.+]] = tensor.cast %[[PACK]] : tensor<?x4x56x?x32x8x2xf32> to tensor<?x?x56x9x32x8x2xf32>
+// CHECK: return %[[CAST]] : tensor<?x?x56x9x32x8x2xf32>
+// CHECK: }
// -----
@@ -323,7 +322,7 @@ func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_s
return %transposed : tensor<?x?x?x?x?x?x?xf32>
}
-// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
+// CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
// CHECK: module {
// CHECK: func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_sizes(
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>,
@@ -338,9 +337,9 @@ func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<?x?x?x?xf32>
// CHECK: %[[dim_1:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<?x?x?x?xf32>
// CHECK: %[[dim_2:.+]] = tensor.dim %[[ARG0]], %[[c3]] : tensor<?x?x?x?xf32>
-// CHECK: %[[mapped_dim0:.+]] = affine.apply #[[map:.+]]()[%[[dim_2]], %[[ARG3]]]
-// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim_0]], %[[ARG1]]]
-// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[map:.+]]()[%[[dim_1]], %[[ARG2]]]
+// CHECK: %[[mapped_dim0:.+]] = affine.apply #[[$MAP]]()[%[[dim_2]], %[[ARG3]]]
+// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[$MAP]]()[%[[dim_0]], %[[ARG1]]]
+// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[$MAP]]()[%[[dim_1]], %[[ARG2]]]
// CHECK: %[[INIT:.+]] = tensor.empty(%[[mapped_dim2]], %[[mapped_dim1]], %[[mapped_dim0]], %[[dim]], %[[ARG3]], %[[ARG1]], %[[ARG2]]) : tensor<?x?x?x?x?x?x?xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 1, 3, 0] inner_dims_pos = [3, 1, 2] inner_tiles = [%[[ARG3]], %[[ARG1]], %[[ARG2]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
@@ -465,7 +464,7 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
into %pack_dest : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
return %pack : tensor<?x?x?x?x?x?x?xf32>
}
-// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
+// CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?x?xf32>,
// CHECK-SAME: %[[ARG2:.+]]: tensor<?x?x?x?x?x?x?xf32>, %[[ARG3:.+]]: index, %[[ARG4:.+]]: index, %[[ARG5:.+]]: index) -> tensor<?x?x?x?x?x?x?xf32> {
@@ -477,9 +476,9 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[DIM2:.+]] = tensor.dim %[[ARG0]], %[[C3]] : tensor<?x?x?x?xf32>
-// CHECK: %[[VAL0:.+]] = affine.apply #[[map:.+]]()[%[[DIM2]], %[[ARG3]]]
-// CHECK: %[[VAL1:.+]] = affine.apply #[[map:.+]]()[%[[DIM0]], %[[ARG4]]]
-// CHECK: %[[VAL2:.+]] = affine.apply #[[map:.+]]()[%[[DIM]], %[[ARG5]]]
+// CHECK: %[[VAL0:.+]] = affine.apply #[[$MAP]]()[%[[DIM2]], %[[ARG3]]]
+// CHECK: %[[VAL1:.+]] = affine.apply #[[$MAP]]()[%[[DIM0]], %[[ARG4]]]
+// CHECK: %[[VAL2:.+]] = affine.apply #[[$MAP]]()[%[[DIM]], %[[ARG5]]]
// CHECK: %[[VAL3:.+]] = tensor.empty(%[[VAL1]], %[[DIM1]], %[[VAL2]], %[[VAL0]], %[[ARG3]], %[[ARG4]], %[[ARG5]]) : tensor<?x?x?x?x?x?x?xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [3, 1, 0] inner_tiles = [%[[ARG3]], %[[ARG4]], %[[ARG5]]] into %[[VAL3]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
@@ -506,13 +505,13 @@ func.func @linalg_transpose_tensor_pack_multiple_tiles(%arg0: tensor<?x32x128xbf
into %2 : tensor<32x128x?xbf16> -> tensor<32x?x64x16x2xbf16>
return %pack : tensor<32x?x64x16x2xbf16>
}
-// CHECK: #[[map:.+]] = affine_map<()[s0] -> (s0 ceildiv 16)>
+// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 ceildiv 16)>
//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_multiple_tiles(
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x32x128xbf16>) -> tensor<32x?x64x16x2xbf16> {
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
// CHECK-DAG: %[[CST:.+]] = arith.constant 0.000000e+00 : bf16
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x128xbf16>
-// CHECK: %[[VAL0:.+]] = affine.apply #[[map:.+]]()[%[[DIM]]]
+// CHECK: %[[VAL0:.+]] = affine.apply #[[$MAP]]()[%[[DIM]]]
// CHECK: %[[VAL1:.+]] = tensor.empty(%[[VAL0]]) : tensor<32x?x64x16x2xbf16>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
// CHECK-SAME: padding_value(%[[CST]] : bf16)
@@ -522,3 +521,91 @@ func.func @linalg_transpose_tensor_pack_multiple_tiles(%arg0: tensor<?x32x128xbf
// CHECK-SAME: into %[[VAL1]] : tensor<?x32x128xbf16> -> tensor<32x?x64x16x2xbf16>
// CHECK: return %[[PACK]] : tensor<32x?x64x16x2xbf16>
// CHECK: }
+
+// -----
+
+func.func @linalg_transpose_tensor_unpack_fold(%arg0: tensor<1x1x4x16xi32>) -> tensor<16x4xi32> {
+ %0 = tensor.empty() : tensor<1x1x16x4xi32>
+ %transposed = linalg.transpose ins(%arg0 : tensor<1x1x4x16xi32>)
+ outs(%0 : tensor<1x1x16x4xi32>)
+ permutation = [1, 0, 3, 2]
+ %1 = tensor.empty() : tensor<16x4xi32>
+ %unpack = tensor.unpack %transposed
+ outer_dims_perm = [0, 1]
+ inner_dims_pos = [0, 1]
+ inner_tiles = [16, 4] into
+ %1 : tensor<1x1x16x4xi32> -> tensor<16x4xi32>
+ return %unpack : tensor<16x4xi32>
+}
+//CHECK-LABEL: func.func @linalg_transpose_tensor_unpack_fold(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<1x1x4x16xi32>) -> tensor<16x4xi32> {
+// CHECK: %[[OUT:.+]] = tensor.empty() : tensor<16x4xi32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [1, 0]
+// CHECK-SAME: inner_dims_pos = [1, 0]
+// CHECK-SAME: inner_tiles = [4, 16]
+// CHEKC-SAME: into %[[OUT]] : tensor<1x1x4x16xi32> -> tensor<16x4xi32>
+// CHECK: return %[[UNPACK]] : tensor<16x4xi32>
+// CHECK: }
+
+// -----
+
+func.func @linalg_transpose_tensor_unpack_fold_dynamic_outer_dims_tile_dims_tile_sizes(%arg0: tensor<?x?x?x?xf32>, %transpose_dest: tensor<?x?x?x?xf32>, %unpack_dest: tensor<?x?xf32>, %tile_p : index, %tile_q : index) -> tensor<?x?xf32> {
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<?x?x?x?xf32>)
+ outs(%transpose_dest : tensor<?x?x?x?xf32>)
+ permutation = [1, 0, 3, 2]
+
+ %unpack = tensor.unpack %transposed
+ outer_dims_perm = [1, 0]
+ inner_dims_pos = [0, 1]
+ inner_tiles = [%tile_p, %tile_q]
+ into %unpack_dest : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
+ return %unpack : tensor<?x?xf32>
+}
+// CHECK: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
+// CHECK-LABEL: func.func @linalg_transpose_tensor_unpack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[IDX1:.+]]: index, %[[IDX2:.+]]: index) -> tensor<?x?xf32> {
+// CHECK-DAG: %[[CST1:.+]] = arith.constant 1 : index
+// CHECK-DAG: %[[CST0:.+]] = arith.constant 0 : index
+// CHECK-DAG: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[CST0]] : tensor<?x?x?x?xf32>
+// CHECK-DAG: %[[DIM1:.+]] = tensor.dim %[[ARG0]], %[[CST1]] : tensor<?x?x?x?xf32>
+// CHECK-DAG: %[[AMAP0:.+]] = affine.apply #[[$MAP]]()[%[[DIM1]], %[[IDX2]]]
+// CHECK-DAG: %[[AMAP1:.+]] = affine.apply #[[$MAP]]()[%[[DIM0]], %[[IDX1]]]
+// CHECK: %[[OUT:.+]] = tensor.empty(%[[AMAP1]], %[[AMAP0]]) : tensor<?x?xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [0, 1]
+// CHECK-SAME: inner_dims_pos = [1, 0]
+// CHECK-SAME: inner_tiles = [%[[IDX2]], %[[IDX1]]]
+// CHECK-SAME: into %[[OUT]] : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
+// CHECK: return %[[UNPACK]] : tensor<?x?xf32>
+// CHECK: }
+
+// -----
+
+func.func @tensor_unpack_linalg_transpose_fold(%arg0: tensor<56x57x1x64xf32>) -> tensor<3648x56xf32> {
+ %0 = tensor.empty() : tensor<56x3648xf32>
+ %pack = tensor.unpack %arg0
+ outer_dims_perm = [0, 1]
+ inner_dims_pos = [0, 1]
+ inner_tiles = [1, 64]
+ into %0 : tensor<56x57x1x64xf32> -> tensor<56x3648xf32>
+
+ %1 = tensor.empty() : tensor<3648x56xf32>
+ %transposed = linalg.transpose
+ ins(%pack : tensor<56x3648xf32>)
+ outs(%1 : tensor<3648x56xf32>)
+ permutation = [1,0]
+ return %transposed : tensor<3648x56xf32>
+}
+// CHECK-LABEL: func.func @tensor_unpack_linalg_transpose_fold(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>) -> tensor<3648x56xf32> {
+// CHECK: %[[OUT:.+]] = tensor.empty() : tensor<3648x56xf32>
+// CHECK: %[[UNPACK:.+]] = tensor.unpack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [1, 0]
+// CHECK-SAME: inner_dims_pos = [1, 0]
+// CHECK-SAME: inner_tiles = [1, 64]
+// CHECK-SAME: into %[[OUT:.+]] : tensor<56x57x1x64xf32> -> tensor<3648x56xf32>
+// CHECK: return %[[UNPACK]] : tensor<3648x56xf32>
+// CHECK: }
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index 38c3bb8af810..e2169fe1404c 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -1,9 +1,9 @@
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with different seeds.
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
-// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59 bufferize-function-boundaries" -split-input-file -o /dev/null
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
// RUN: mlir-opt %s -one-shot-bufferize="unknown-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file -o /dev/null
diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir
index e7ede2e0ccef..6eac759a0836 100644
--- a/mlir/test/Dialect/Tosa/canonicalize.mlir
+++ b/mlir/test/Dialect/Tosa/canonicalize.mlir
@@ -365,6 +365,14 @@ func.func @reshape_canonicalize(%arg0: tensor<?x10xf32>) -> tensor<?x10xf32> {
return %0 : tensor<?x10xf32>
}
+// CHECK-LABEL: @reshape_canonicalize_dyn_nofold
+func.func @reshape_canonicalize_dyn_nofold(%arg0: tensor<?x?x10xf32>) -> tensor<?x?x10xf32> {
+ // CHECK: %[[VAR0:.+]] = tosa.reshape %arg0 {new_shape = array<i64: -1, 2, 10>} : (tensor<?x?x10xf32>) -> tensor<?x?x10xf32>
+ // CHECK: return %[[VAR0]] : tensor<?x?x10xf32>
+ %0 = tosa.reshape %arg0 {new_shape = array<i64: -1, 2, 10>} : (tensor<?x?x10xf32>) -> tensor<?x?x10xf32>
+ return %0 : tensor<?x?x10xf32>
+}
+
// CHECK-LABEL: @reshape_canonicalize_double
func.func @reshape_canonicalize_double(%arg0: tensor<?x10xf32>) -> tensor<?x5xf32> {
// CHECK: %[[VAL_1:.*]] = tosa.reshape %arg0 {new_shape = array<i64: -1, 5>}
diff --git a/mlir/test/Dialect/Tosa/constant-op-fold.mlir b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
index 27ca3ae3c21b..de752f31fcba 100644
--- a/mlir/test/Dialect/Tosa/constant-op-fold.mlir
+++ b/mlir/test/Dialect/Tosa/constant-op-fold.mlir
@@ -112,6 +112,23 @@ func.func @transpose_nofold_quantized_types() -> tensor<1x1x2x2x!quant.uniform<i
return %0: tensor<1x1x2x2x!quant.uniform<i8<-127:127>:f32:3, {1.000000e-01,1.000000e-01}>>
}
+// CHECK-LABEL: @transpose_nofold_dense_resource
+func.func @transpose_nofold_dense_resource() -> tensor<2x2xf32> {
+ %0 = "tosa.const"() <{value = dense_resource<resource> : tensor<2x2xf32>}> : () -> tensor<2x2xf32>
+ %1 = "tosa.const"() <{value = dense<[1, 0]> : tensor<2xi32>}> : () -> tensor<2xi32>
+
+ // CHECK: tosa.transpose
+ %2 = tosa.transpose %0, %1 : (tensor<2x2xf32>, tensor<2xi32>) -> tensor<2x2xf32>
+ return %2 : tensor<2x2xf32>
+}
+{-#
+ dialect_resources: {
+ builtin: {
+ resource: "0x08000000010000000000000002000000000000000300000000000000"
+ }
+ }
+#-}
+
// -----
// CHECK-LABEL: @fold_add_zero_rhs_f32
diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir
index 35ecbcc799e3..d8dd878051f1 100644
--- a/mlir/test/Dialect/Tosa/level_check.mlir
+++ b/mlir/test/Dialect/Tosa/level_check.mlir
@@ -115,6 +115,22 @@ func.func @test_const(%arg0 : tensor<1x1xi32>) -> tensor<1x1x1x1x1x1x1xi32> {
// -----
+func.func @test_const_i2(%arg0 : tensor<1xi2>) {
+ // expected-error@+1 {{'tosa.const' op is not profile-aligned: element type 'i2' is not legal}}
+ %0 = "tosa.const"() {value = dense<0> : tensor<1xi2>} : () -> tensor<1xi2>
+ return
+}
+
+// -----
+
+func.func @test_const_ui32(%arg0 : tensor<1xui32>) {
+ // expected-error@+1 {{'tosa.const' op is not profile-aligned: element type 'ui32' is not legal}}
+ %0 = "tosa.const"() {value = dense<0> : tensor<1xui32>} : () -> tensor<1xui32>
+ return
+}
+
+// -----
+
func.func @test_avgpool2d_kernel_y(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32x32x8xf32> {
// expected-error@+1 {{'tosa.avg_pool2d' op failed level check: kernel <= MAX_KERNEL}}
%0 = "tosa.avg_pool2d"(%arg0) {kernel = array<i64: 8193, 1>, pad = array<i64: 4, 4, 4, 4>, stride = array<i64: 1, 1>, acc_type = f32} :
diff --git a/mlir/test/Dialect/Transform/foreach-match.mlir b/mlir/test/Dialect/Transform/foreach-match.mlir
new file mode 100644
index 000000000000..206625ae0746
--- /dev/null
+++ b/mlir/test/Dialect/Transform/foreach-match.mlir
@@ -0,0 +1,80 @@
+// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics
+
+// Silenceable diagnostics suppressed.
+module attributes { transform.with_named_sequence } {
+ func.func @test_loop_peeling_not_beneficial() {
+ %lb = arith.constant 0 : index
+ %ub = arith.constant 40 : index
+ %step = arith.constant 5 : index
+ scf.for %i = %lb to %ub step %step {
+ arith.addi %i, %i : index
+ }
+ return
+ }
+
+ transform.named_sequence @peel(%arg0: !transform.op<"scf.for"> {transform.consumed}) {
+ transform.loop.peel %arg0 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+ transform.named_sequence @match_for(%arg0: !transform.any_op {transform.readonly}) -> !transform.any_op {
+ transform.match.operation_name %arg0 ["scf.for"] : !transform.any_op
+ transform.yield %arg0 : !transform.any_op
+ }
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ transform.sequence %root : !transform.any_op failures(suppress) {
+ ^bb0(%arg0: !transform.any_op):
+ transform.foreach_match in %arg0
+ @match_for -> @peel
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+ transform.yield
+ }
+}
+
+// -----
+
+// Silenceable diagnostics propagated.
+module attributes { transform.with_named_sequence } {
+ func.func @test_loop_peeling_not_beneficial() {
+ %lb = arith.constant 0 : index
+ %ub = arith.constant 40 : index
+ %step = arith.constant 5 : index
+ // expected-note @below {{when applied to this matching payload}}
+ scf.for %i = %lb to %ub step %step {
+ arith.addi %i, %i : index
+ }
+ return
+ }
+
+ // expected-note @below {{failed to peel the last iteration}}
+ transform.named_sequence @peel(%arg0: !transform.op<"scf.for"> {transform.consumed}) {
+ transform.loop.peel %arg0 : (!transform.op<"scf.for">) -> (!transform.any_op, !transform.any_op)
+ transform.yield
+ }
+ transform.named_sequence @match_for(%arg0: !transform.any_op {transform.readonly}) -> !transform.any_op {
+ transform.match.operation_name %arg0 ["scf.for"] : !transform.any_op
+ transform.yield %arg0 : !transform.any_op
+ }
+ transform.named_sequence @main_suppress(%root: !transform.any_op) {
+ transform.sequence %root : !transform.any_op failures(suppress) {
+ ^bb0(%arg0: !transform.any_op):
+ transform.foreach_match in %arg0
+ @match_for -> @peel
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+ transform.yield
+ }
+ transform.named_sequence @__transform_main(%root: !transform.any_op) {
+ transform.sequence %root : !transform.any_op failures(propagate) {
+ ^bb0(%arg0: !transform.any_op):
+ // expected-error @below {{actions failed}}
+ transform.foreach_match in %arg0
+ @match_for -> @peel
+ : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Transform/ops-invalid.mlir b/mlir/test/Dialect/Transform/ops-invalid.mlir
index 73a5f36af929..cc04e65420c5 100644
--- a/mlir/test/Dialect/Transform/ops-invalid.mlir
+++ b/mlir/test/Dialect/Transform/ops-invalid.mlir
@@ -771,3 +771,14 @@ module attributes { transform.with_named_sequence } {
transform.yield %arg0 : !transform.any_op
}
}
+
+// -----
+
+module attributes { transform.with_named_sequence } {
+ transform.named_sequence @match_matmul(%entry: !transform.any_op) -> () {
+ %c3 = transform.param.constant 1 : i64 -> !transform.param<i64>
+ // expected-error @below {{op operand #0 must be TransformHandleTypeInterface instance}}
+ transform.print %c3 : !transform.param<i64>
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Vector/linearize.mlir b/mlir/test/Dialect/Vector/linearize.mlir
index 2cbf9bec7a41..f0e9b3a05c06 100644
--- a/mlir/test/Dialect/Vector/linearize.mlir
+++ b/mlir/test/Dialect/Vector/linearize.mlir
@@ -1,92 +1,156 @@
-// RUN: mlir-opt %s -split-input-file -test-vector-linearize | FileCheck %s
-// RUN: mlir-opt %s -split-input-file -test-vector-linearize=target-vector-bitwidth=128 | FileCheck %s --check-prefix=CHECK128
-// RUN: mlir-opt %s -split-input-file -test-vector-linearize=target-vector-bitwidth=0 | FileCheck %s --check-prefix=CHECK0
-
-// CHECK-LABEL: test_linearize
-// CHECK128-LABEL: test_linearize
-// CHECK0-LABEL: test_linearize
-// CHECK-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>)
-// CHECK128-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>)
-// CHECK: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
-// CHECK128: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+// RUN: mlir-opt %s -split-input-file -test-vector-linearize -verify-diagnostics | FileCheck %s --check-prefixes=ALL,DEFAULT
+// RUN: mlir-opt %s -split-input-file -test-vector-linearize=target-vector-bitwidth=128 -verify-diagnostics | FileCheck %s --check-prefixes=ALL,BW-128
+// RUN: mlir-opt %s -split-input-file -test-vector-linearize=target-vector-bitwidth=0 | FileCheck %s --check-prefixes=ALL,BW-0
+
+// ALL-LABEL: test_linearize
+// ALL-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>)
func.func @test_linearize(%arg0: vector<2x2xf32>) -> vector<2x2xf32> {
-// CHECK: %[[C1:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
-// CHECK128: %[[C1:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
-// CHECK0: %[[C1:.*]] = arith.constant dense<{{.*}}> : vector<2x2xf32>
+ // DEFAULT: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+ // DEFAULT: %[[CST:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
+ // DEFAULT: %[[RES:.*]] = vector.shape_cast %[[CST]] : vector<4xf32> to vector<2x2xf32>
+
+ // BW-128: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+ // BW-128: %[[CST:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
+ // BW-128: %[[RES:.*]] = vector.shape_cast %[[CST]] : vector<4xf32> to vector<2x2xf32>
+ // BW-0: %[[RES:.*]] = arith.constant dense<{{.*}}> : vector<2x2xf32>
%0 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : vector<2x2xf32>
-// CHECK: %[[RES:.*]] = vector.shape_cast %[[C1]] : vector<4xf32> to vector<2x2xf32>
-// CHECK128: %[[RES:.*]] = vector.shape_cast %[[C1]] : vector<4xf32> to vector<2x2xf32>
-// Arith and math ops are handled in generic way, check some of them
-// CHECK: %{{.*}} = math.sin %[[ARG]] : vector<4xf32>
-// CHECK128: %{{.*}} = math.sin %[[ARG]] : vector<4xf32>
-// CHECK0: %{{.*}} = math.sin %{{.*}} : vector<2x2xf32>
+
+ // DEFAULT: %{{.*}} = math.sin %[[ARG]] : vector<4xf32>
+ // BW-128: %{{.*}} = math.sin %[[ARG]] : vector<4xf32>
+ // BW-0: %{{.*}} = math.sin %{{.*}} : vector<2x2xf32>
%1 = math.sin %arg0 : vector<2x2xf32>
-// CHECK: %{{.*}} = arith.addf %[[ARG]], %[[C1]] : vector<4xf32>
-// CHECK128: %{{.*}} = arith.addf %[[ARG]], %[[C1]] : vector<4xf32>
-// CHECK0: %{{.*}} = arith.addf %{{.*}} : vector<2x2xf32>
+ // DEFAULT: %{{.*}} = arith.addf %[[ARG]], %[[CST]] : vector<4xf32>
+ // BW-128: %{{.*}} = arith.addf %[[ARG]], %[[CST]] : vector<4xf32>
+ // BW-0: %{{.*}} = arith.addf %{{.*}} : vector<2x2xf32>
%2 = arith.addf %arg0, %0 : vector<2x2xf32>
-// CHECK: return %[[RES]] : vector<2x2xf32>
-// CHECK128: return %[[RES]] : vector<2x2xf32>
+ // ALL: return %[[RES]] : vector<2x2xf32>
return %0 : vector<2x2xf32>
}
-// CHECK-LABEL: test_partial_linearize
-// CHECK128-LABEL: test_partial_linearize
-// CHECK0-LABEL: test_partial_linearize
-// CHECK-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>, %[[ORIG_ARG2:.*]]: vector<4x4xf32>)
-// CHECK128-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>, %[[ORIG_ARG2:.*]]: vector<4x4xf32>)
-// CHECK0-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>, %[[ORIG_ARG2:.*]]: vector<4x4xf32>)
-// CHECK: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
-// CHECK128: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
-// CHECK: %[[ARG2:.*]] = vector.shape_cast %[[ORIG_ARG2]] : vector<4x4xf32> to vector<16xf32>
+// -----
+
+// ALL-LABEL: test_partial_linearize
+// ALL-SAME: (%[[ORIG_ARG:.*]]: vector<2x2xf32>, %[[ORIG_ARG2:.*]]: vector<4x4xf32>)
func.func @test_partial_linearize(%arg0: vector<2x2xf32>, %arg1: vector<4x4xf32>) -> vector<2x2xf32> {
-// CHECK: %[[C1:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
-// CHECK128: %[[C1:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : vector<4xf32>
-// CHECK0: %[[C1:.*]] = arith.constant dense<{{.*}}> : vector<2x2xf32>
+ // DEFAULT: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+ // DEFAULT: %[[ARG2:.*]] = vector.shape_cast %[[ORIG_ARG2]] : vector<4x4xf32> to vector<16xf32>
+ // DEFAULT: %[[CST:.*]] = arith.constant dense<{{.*}}> : vector<4xf32>
+ // DEFAULT: %[[RES:.*]] = vector.shape_cast %[[CST]] : vector<4xf32> to vector<2x2xf32>
+ // BW-128: %[[ARG:.*]] = vector.shape_cast %[[ORIG_ARG]] : vector<2x2xf32> to vector<4xf32>
+ // BW-128: %[[CST:.*]] = arith.constant dense<{{.*}}> : vector<4xf32>
+ // BW-128: %[[RES:.*]] = vector.shape_cast %[[CST]] : vector<4xf32> to vector<2x2xf32>
+
+ // BW-0: %[[RES:.*]] = arith.constant dense<{{.*}}> : vector<2x2xf32>
%0 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : vector<2x2xf32>
-// CHECK: %[[RES:.*]] = vector.shape_cast %[[C1]] : vector<4xf32> to vector<2x2xf32>
-// CHECK128: %[[RES:.*]] = vector.shape_cast %[[C1]] : vector<4xf32> to vector<2x2xf32>
- // CHECK: %[[C2:.*]] = arith.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 1.000000e+00, 2.000000e+00, 5.000000e+00, 6.000000e+00]> : vector<16xf32>
- // CHECK128: %[[C2:.*]] = arith.constant dense<{{.*}}> : vector<4x4xf32>
- // CHECK0: %[[C2:.*]] = arith.constant dense<{{.*}}> : vector<4x4xf32>
+ // DEFAULT: %[[C2:.*]] = arith.constant dense<{{.*}}> : vector<16xf32>
+ // BW-128: %[[C2:.*]] = arith.constant dense<{{.*}}> : vector<4x4xf32>
+ // BW-0: %[[C2:.*]] = arith.constant dense<{{.*}}> : vector<4x4xf32>
%5 = arith.constant dense<[[1.0, 2.0, 3.0, 4.0], [1.0, 2.0,3.0, 4.0], [1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 5.0, 6.0]]> : vector<4x4xf32>
-// Arith and math ops are handled in generic way, check some of them
-// CHECK: %[[SIN:.*]] = math.sin %[[ARG]] : vector<4xf32>
-// CHECK128: %[[SIN:.*]] = math.sin %[[ARG]] : vector<4xf32>
-// CHECK0: %[[SIN:.*]] = math.sin %[[ORIG_ARG]] : vector<2x2xf32>
+
+ // Arith and math ops are handled in generic way, check some of them
+ // DEFAULT: %[[SIN:.*]] = math.sin %[[ARG]] : vector<4xf32>
+ // BW-128: %[[SIN:.*]] = math.sin %[[ARG]] : vector<4xf32>
+ // BW-0: %[[SIN:.*]] = math.sin %[[ORIG_ARG]] : vector<2x2xf32>
%1 = math.sin %arg0 : vector<2x2xf32>
- // CHECK: %[[SIN1:.*]] = math.sin %[[ARG2]] : vector<16xf32>
-// CHECK128: %[[SIN1:.*]] = math.sin %[[ORIG_ARG2]] : vector<4x4xf32>
-// CHECK0: %[[SIN1:.*]] = math.sin %[[ORIG_ARG2]] : vector<4x4xf32>
+ // DEFAULT: %[[SIN1:.*]] = math.sin %[[ARG2]] : vector<16xf32>
+ // BW-128: %[[SIN1:.*]] = math.sin %[[ORIG_ARG2]] : vector<4x4xf32>
+ // BW-0: %[[SIN1:.*]] = math.sin %[[ORIG_ARG2]] : vector<4x4xf32>
%6 = math.sin %arg1 : vector<4x4xf32>
-// CHECK: %{{.*}} = arith.addf %[[ARG]], %[[C1]] : vector<4xf32>
-// CHECK128: %{{.*}} = arith.addf %[[ARG]], %[[C1]] : vector<4xf32>
-// CHECK0: %{{.*}} = arith.addf %{{.*}} : vector<2x2xf32>
+ // DEFAULT: %{{.*}} = arith.addf %[[ARG]], %[[CST]] : vector<4xf32>
+ // BW-128: %{{.*}} = arith.addf %[[ARG]], %[[CST]] : vector<4xf32>
+ // BW-0: %{{.*}} = arith.addf %{{.*}} : vector<2x2xf32>
%2 = arith.addf %arg0, %0 : vector<2x2xf32>
- // CHECK: %[[ADD2:.*]] = arith.addf %[[ARG2]], %[[C2]] : vector<16xf32>
- // CHECK128: %[[ADD2:.*]] = arith.addf %[[ORIG_ARG2]], %[[C2]] : vector<4x4xf32>
- // CHECK0: %[[ADD2:.*]] = arith.addf %[[ORIG_ARG2]], %[[C2]] : vector<4x4xf32>
+ // DEFAULT: %[[ADD2:.*]] = arith.addf %[[ARG2]], %[[C2]] : vector<16xf32>
+ // BW-128: %[[ADD2:.*]] = arith.addf %[[ORIG_ARG2]], %[[C2]] : vector<4x4xf32>
+ // BW-0: %[[ADD2:.*]] = arith.addf %[[ORIG_ARG2]], %[[C2]] : vector<4x4xf32>
%7 = arith.addf %arg1, %5 : vector<4x4xf32>
-// CHECK: return %[[RES]] : vector<2x2xf32>
-// CHECK128: return %[[RES]] : vector<2x2xf32>
+
+ // ALL: return %[[RES]] : vector<2x2xf32>
return %0 : vector<2x2xf32>
}
-// CHECK-LABEL: test_index_no_linearize
-// CHECK128-LABEL: test_index_no_linearize
-// CHECK0-LABEL: test_index_no_linearize
+// -----
+
+// ALL-LABEL: test_index_no_linearize
func.func @test_index_no_linearize(%arg0: vector<2x2xindex>, %arg1: vector<2x2xindex>) -> vector<2x2xindex> {
- // CHECK: %[[ADD:.*]] = arith.addi {{.*}} : vector<2x2xindex>
- // CHECK128: %[[ADD:.*]] = arith.addi {{.*}} : vector<2x2xindex>
- // CHECK0: %[[ADD:.*]] = arith.addi {{.*}} : vector<2x2xindex>
+ // ALL: %[[ADD:.*]] = arith.addi {{.*}} : vector<2x2xindex>
%0 = arith.addi %arg0, %arg1 : vector<2x2xindex>
return %0 : vector<2x2xindex>
}
+
+// -----
+
+// vectorizable operation (arith.mulf) with tensor result types.
+
+// ALL-LABEL: test_tensor_no_linearize
+func.func @test_tensor_no_linearize(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<2x2xf32>) {
+ // ALL: %[[MULF:.*]] = arith.mulf %arg0, %arg1 : tensor<2x2xf32>
+ %0 = arith.mulf %arg0, %arg1 : tensor<2x2xf32>
+
+ return %0, %arg0 : tensor<2x2xf32>, tensor<2x2xf32>
+}
+
+// -----
+
+// ALL-LABEL: func.func @test_scalable_linearize(
+// ALL-SAME: %[[ARG_0:.*]]: vector<2x[2]xf32>) -> vector<2x[2]xf32> {
+func.func @test_scalable_linearize(%arg0: vector<2x[2]xf32>) -> vector<2x[2]xf32> {
+ // DEFAULT: %[[SC:.*]] = vector.shape_cast %[[ARG_0]] : vector<2x[2]xf32> to vector<[4]xf32>
+ // DEFAULT: %[[CST:.*]] = arith.constant dense<3.000000e+00> : vector<[4]xf32>
+ // BW-128: %[[SC:.*]] = vector.shape_cast %[[ARG_0]] : vector<2x[2]xf32> to vector<[4]xf32>
+ // BW-128: %[[CST:.*]] = arith.constant dense<3.000000e+00> : vector<[4]xf32>
+ // BW-0: %[[CST:.*]] = arith.constant dense<3.000000e+00> : vector<2x[2]xf32>
+ %0 = arith.constant dense<[[3., 3.], [3., 3.]]> : vector<2x[2]xf32>
+
+ // DEFAULT: %[[SIN:.*]] = math.sin %[[SC]] : vector<[4]xf32>
+ // BW-128: %[[SIN:.*]] = math.sin %[[SC]] : vector<[4]xf32>
+ // BW-0: %[[SIN:.*]] = math.sin %[[ARG_0]] : vector<2x[2]xf32>
+ %1 = math.sin %arg0 : vector<2x[2]xf32>
+
+ // DEFAULT: %[[ADDF:.*]] = arith.addf %[[SIN]], %[[CST]] : vector<[4]xf32>
+ // BW-128: %[[ADDF:.*]] = arith.addf %[[SIN]], %[[CST]] : vector<[4]xf32>
+ // BW-0: %[[RES:.*]] = arith.addf %[[CST]], %[[SIN]] : vector<2x[2]xf32>
+ %2 = arith.addf %0, %1 : vector<2x[2]xf32>
+
+ // DEFAULT: %[[RES:.*]] = vector.shape_cast %[[ADDF]] : vector<[4]xf32> to vector<2x[2]xf32>
+ // BW-128: %[[RES:.*]] = vector.shape_cast %[[ADDF]] : vector<[4]xf32> to vector<2x[2]xf32>
+ // ALL: return %[[RES]] : vector<2x[2]xf32>
+ return %2 : vector<2x[2]xf32>
+}
+
+// -----
+
+// ALL-LABEL: func.func @test_scalable_no_linearize(
+// ALL-SAME: %[[VAL_0:.*]]: vector<[2]x[2]xf32>) -> vector<[2]x[2]xf32> {
+func.func @test_scalable_no_linearize(%arg0: vector<[2]x[2]xf32>) -> vector<[2]x[2]xf32> {
+ // ALL: %[[CST:.*]] = arith.constant dense<2.000000e+00> : vector<[2]x[2]xf32>
+ %0 = arith.constant dense<[[2., 2.], [2., 2.]]> : vector<[2]x[2]xf32>
+
+ // ALL: %[[SIN:.*]] = math.sin %[[VAL_0]] : vector<[2]x[2]xf32>
+ %1 = math.sin %arg0 : vector<[2]x[2]xf32>
+
+ // ALL: %[[RES:.*]] = arith.addf %[[CST]], %[[SIN]] : vector<[2]x[2]xf32>
+ %2 = arith.addf %0, %1 : vector<[2]x[2]xf32>
+
+ // ALL: return %[[RES]] : vector<[2]x[2]xf32>
+ return %2 : vector<[2]x[2]xf32>
+}
+
+// -----
+
+func.func @test_scalable_no_linearize(%arg0: vector<2x[2]xf32>) -> vector<2x[2]xf32> {
+ // expected-error@+1 {{failed to legalize operation 'arith.constant' that was explicitly marked illegal}}
+ %0 = arith.constant dense<[[1., 1.], [3., 3.]]> : vector<2x[2]xf32>
+ %1 = math.sin %arg0 : vector<2x[2]xf32>
+ %2 = arith.addf %0, %1 : vector<2x[2]xf32>
+
+ return %2 : vector<2x[2]xf32>
+}
diff --git a/mlir/test/Dialect/Vector/test-scalable-bounds.mlir b/mlir/test/Dialect/Vector/test-scalable-bounds.mlir
new file mode 100644
index 000000000000..245a6f5c13ac
--- /dev/null
+++ b/mlir/test/Dialect/Vector/test-scalable-bounds.mlir
@@ -0,0 +1,161 @@
+// RUN: mlir-opt %s -test-affine-reify-value-bounds -cse -verify-diagnostics \
+// RUN: -verify-diagnostics -split-input-file | FileCheck %s
+
+#map_dim_i = affine_map<(d0)[s0] -> (-d0 + 32400, s0)>
+#map_dim_j = affine_map<(d0)[s0] -> (-d0 + 16, s0)>
+
+// Here the upper bound for min_i is 4 x vscale, as we know 4 x vscale is
+// always less than 32400. The bound for min_j is 16, as 16 is always less
+// 4 x vscale_max (vscale_max is the UB for vscale).
+
+// CHECK: #[[$SCALABLE_BOUND_MAP_0:.*]] = affine_map<()[s0] -> (s0 * 4)>
+
+// CHECK-LABEL: @fixed_size_loop_nest
+// CHECK-DAG: %[[VSCALE:.*]] = vector.vscale
+// CHECK-DAG: %[[UB_i:.*]] = affine.apply #[[$SCALABLE_BOUND_MAP_0]]()[%[[VSCALE]]]
+// CHECK-DAG: %[[UB_j:.*]] = arith.constant 16 : index
+// CHECK: "test.some_use"(%[[UB_i]], %[[UB_j]]) : (index, index) -> ()
+func.func @fixed_size_loop_nest() {
+ %c16 = arith.constant 16 : index
+ %c32400 = arith.constant 32400 : index
+ %c4 = arith.constant 4 : index
+ %c0 = arith.constant 0 : index
+ %vscale = vector.vscale
+ %c4_vscale = arith.muli %vscale, %c4 : index
+ scf.for %i = %c0 to %c32400 step %c4_vscale {
+ %min_i = affine.min #map_dim_i(%i)[%c4_vscale]
+ scf.for %j = %c0 to %c16 step %c4_vscale {
+ %min_j = affine.min #map_dim_j(%j)[%c4_vscale]
+ %bound_i = "test.reify_scalable_bound"(%min_i) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ %bound_j = "test.reify_scalable_bound"(%min_j) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound_i, %bound_j) : (index, index) -> ()
+ }
+ }
+ return
+}
+
+// -----
+
+#map_dynamic_dim = affine_map<(d0)[s0, s1] -> (-d0 + s1, s0)>
+
+// Here upper bounds for both min_i and min_j are both (conservatively)
+// 4 x vscale, as we know that is always the largest value they could take. As
+// if `dim < 4 x vscale` then 4 x vscale is an overestimate, and if
+// `dim > 4 x vscale` then the min will be clamped to 4 x vscale.
+
+// CHECK: #[[$SCALABLE_BOUND_MAP_1:.*]] = affine_map<()[s0] -> (s0 * 4)>
+
+// CHECK-LABEL: @dynamic_size_loop_nest
+// CHECK: %[[VSCALE:.*]] = vector.vscale
+// CHECK: %[[UB_ij:.*]] = affine.apply #[[$SCALABLE_BOUND_MAP_1]]()[%[[VSCALE]]]
+// CHECK: "test.some_use"(%[[UB_ij]], %[[UB_ij]]) : (index, index) -> ()
+func.func @dynamic_size_loop_nest(%dim0: index, %dim1: index) {
+ %c4 = arith.constant 4 : index
+ %c0 = arith.constant 0 : index
+ %vscale = vector.vscale
+ %c4_vscale = arith.muli %vscale, %c4 : index
+ scf.for %i = %c0 to %dim0 step %c4_vscale {
+ %min_i = affine.min #map_dynamic_dim(%i)[%c4_vscale, %dim0]
+ scf.for %j = %c0 to %dim1 step %c4_vscale {
+ %min_j = affine.min #map_dynamic_dim(%j)[%c4_vscale, %dim1]
+ %bound_i = "test.reify_scalable_bound"(%min_i) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ %bound_j = "test.reify_scalable_bound"(%min_j) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound_i, %bound_j) : (index, index) -> ()
+ }
+ }
+ return
+}
+
+// -----
+
+// Here the bound is just a value + a constant.
+
+// CHECK: #[[$SCALABLE_BOUND_MAP_2:.*]] = affine_map<()[s0] -> (s0 + 8)>
+
+// CHECK-LABEL: @add_to_vscale
+// CHECK: %[[VSCALE:.*]] = vector.vscale
+// CHECK: %[[SCALABLE_BOUND:.*]] = affine.apply #[[$SCALABLE_BOUND_MAP_2]]()[%[[VSCALE]]]
+// CHECK: "test.some_use"(%[[SCALABLE_BOUND]]) : (index) -> ()
+func.func @add_to_vscale() {
+ %vscale = vector.vscale
+ %c8 = arith.constant 8 : index
+ %vscale_plus_c8 = arith.addi %vscale, %c8 : index
+ %bound = "test.reify_scalable_bound"(%vscale_plus_c8) {type = "EQ", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound) : (index) -> ()
+ return
+}
+
+// -----
+
+// Here we know vscale is always 2 so we get a constant bound.
+
+// CHECK-LABEL: @vscale_fixed_size
+// CHECK: %[[C2:.*]] = arith.constant 2 : index
+// CHECK: "test.some_use"(%[[C2]]) : (index) -> ()
+func.func @vscale_fixed_size() {
+ %vscale = vector.vscale
+ %bound = "test.reify_scalable_bound"(%vscale) {type = "EQ", vscale_min = 2, vscale_max = 2} : (index) -> index
+ "test.some_use"(%bound) : (index) -> ()
+ return
+}
+
+// -----
+
+// Here we don't know the upper bound (%a is underspecified)
+
+func.func @unknown_bound(%a: index) {
+ %vscale = vector.vscale
+ %vscale_plus_a = arith.muli %vscale, %a : index
+ // expected-error @below{{could not reify bound}}
+ %bound = "test.reify_scalable_bound"(%vscale_plus_a) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound) : (index) -> ()
+ return
+}
+
+// -----
+
+// Here we have two vscale values (that have not been CSE'd), but they should
+// still be treated as equivalent.
+
+// CHECK: #[[$SCALABLE_BOUND_MAP_3:.*]] = affine_map<()[s0] -> (s0 * 6)>
+
+// CHECK-LABEL: @duplicate_vscale_values
+// CHECK: %[[VSCALE:.*]] = vector.vscale
+// CHECK: %[[SCALABLE_BOUND:.*]] = affine.apply #[[$SCALABLE_BOUND_MAP_3]]()[%[[VSCALE]]]
+// CHECK: "test.some_use"(%[[SCALABLE_BOUND]]) : (index) -> ()
+func.func @duplicate_vscale_values() {
+ %c4 = arith.constant 4 : index
+ %vscale_0 = vector.vscale
+
+ %c2 = arith.constant 2 : index
+ %vscale_1 = vector.vscale
+
+ %c4_vscale = arith.muli %vscale_0, %c4 : index
+ %c2_vscale = arith.muli %vscale_1, %c2 : index
+ %add = arith.addi %c2_vscale, %c4_vscale : index
+
+ %bound = "test.reify_scalable_bound"(%add) {type = "EQ", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound) : (index) -> ()
+ return
+}
+
+// -----
+
+// Test some non-scalable code to ensure that works too:
+
+#map_dim_i = affine_map<(d0)[s0] -> (-d0 + 1024, s0)>
+
+// CHECK-LABEL: @non_scalable_code
+// CHECK: %[[C4:.*]] = arith.constant 4 : index
+// CHECK: "test.some_use"(%[[C4]]) : (index) -> ()
+func.func @non_scalable_code() {
+ %c1024 = arith.constant 1024 : index
+ %c4 = arith.constant 4 : index
+ %c0 = arith.constant 0 : index
+ scf.for %i = %c0 to %c1024 step %c4 {
+ %min_i = affine.min #map_dim_i(%i)[%c4]
+ %bound_i = "test.reify_scalable_bound"(%min_i) {type = "UB", vscale_min = 1, vscale_max = 16} : (index) -> index
+ "test.some_use"(%bound_i) : (index) -> ()
+ }
+ return
+}
diff --git a/mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir b/mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir
index af6e636245b0..3a120a56056c 100644
--- a/mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir
@@ -30,6 +30,80 @@ func.func @cast_away_contraction_leading_one_dims(%arg0: vector<1x16x8xf32>, %ar
}
// -----
+// CHECK: #[[$MAP_0:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// CHECK: #[[$MAP_1:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// CHECK: #[[$MAP_2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+
+// CHECK-LABEL: func.func @cast_away_contraction_leading_one_dim_under_const_mask
+// CHECK: %[[MASK:.*]] = vector.constant_mask [15, 15, 8] : vector<16x16x8xi1>
+// CHECK: %[[R0:.*]] = vector.extract %{{.*}}[0] : vector<16x8xf32> from vector<1x16x8xf32>
+// CHECK: %[[R1:.*]] = vector.extract %{{.*}}[0] : vector<8x16xf32> from vector<1x8x16xf32>
+// CHECK: %[[R2:.*]] = vector.extract %{{.*}}[0] : vector<16x16xf32> from vector<1x16x16xf32>
+// CHECK: %[[CONTRACT:.*]] = vector.mask %[[MASK]] {
+// CHECK-SAME: vector.contract {indexing_maps = [#[[$MAP_0]], #[[$MAP_1]], #[[$MAP_2]]], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>}
+// CHECK-SAME: %[[R0]], %[[R1]], %[[R2]] : vector<16x8xf32>, vector<8x16xf32> into vector<16x16xf32>
+// CHECK-SAME: } : vector<16x16x8xi1> -> vector<16x16xf32>
+// CHECK: %[[RES:.*]] = vector.broadcast %[[CONTRACT]] : vector<16x16xf32> to vector<1x16x16xf32>
+// CHECK: return %[[RES]] : vector<1x16x16xf32>
+
+#contraction_accesses0 = [
+ affine_map<(l, i, j, k) -> (l, i, k)>,
+ affine_map<(l, i, j, k) -> (l, k, j)>,
+ affine_map<(l, i, j, k) -> (l, i, j)>
+]
+#contraction_trait0 = {
+ indexing_maps = #contraction_accesses0,
+ iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+}
+
+func.func @cast_away_contraction_leading_one_dim_under_const_mask(%arg0: vector<1x16x8xf32>, %arg1: vector<1x8x16xf32>, %arg2: vector<1x16x16xf32>) -> vector<1x16x16xf32> {
+ %mask = vector.constant_mask [1, 15, 15, 8] : vector<1x16x16x8xi1>
+ %0 = vector.mask %mask {
+ vector.contract #contraction_trait0 %arg0, %arg1, %arg2 : vector<1x16x8xf32>, vector<1x8x16xf32> into vector<1x16x16xf32>
+ } : vector<1x16x16x8xi1> -> vector<1x16x16xf32>
+ return %0 : vector<1x16x16xf32>
+}
+
+// -----
+// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
+// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
+// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
+
+// CHECK-LABEL: func.func @cast_away_contraction_leading_one_dim_under_mask
+// CHECK: %[[R0:.*]] = vector.extract %{{.*}} : vector<16x8xf32> from vector<1x16x8xf32>
+// CHECK: %[[R1:.*]] = vector.extract %{{.*}} : vector<8x16xf32> from vector<1x8x16xf32>
+// CHECK: %[[R2:.*]] = vector.extract %{{.*}} : vector<16x16xf32> from vector<1x16x16xf32>
+// CHECK: %[[M:.*]] = vector.extract %{{.*}} : vector<16x16x8xi1> from vector<1x16x16x8xi1>
+// CHECK: %[[CONTRACT:.*]] = vector.mask %[[M]] {
+// CHECK-SAME: vector.contract {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "reduction"], kind = #vector.kind<add>}
+// CHECK-SAME: %[[R0]], %[[R1]], %[[R2]] : vector<16x8xf32>, vector<8x16xf32> into vector<16x16xf32>
+// CHECK-SAME: } : vector<16x16x8xi1> -> vector<16x16xf32>
+// CHECK-NEXT: %[[RES:.*]] = vector.broadcast %[[CONTRACT]] : vector<16x16xf32> to vector<1x16x16xf32>
+// CHECK-NEXT: return %[[RES]] : vector<1x16x16xf32>
+
+#contraction_accesses0 = [
+ affine_map<(l, i, j, k) -> (l, i, k)>,
+ affine_map<(l, i, j, k) -> (l, k, j)>,
+ affine_map<(l, i, j, k) -> (l, i, j)>
+]
+#contraction_trait0 = {
+ indexing_maps = #contraction_accesses0,
+ iterator_types = ["parallel", "parallel", "parallel", "reduction"]
+}
+
+func.func @cast_away_contraction_leading_one_dim_under_mask(
+ %arg0: vector<1x16x8xf32>,
+ %arg1: vector<1x8x16xf32>,
+ %arg2: vector<1x16x16xf32>,
+ %mask: vector<1x16x16x8xi1>) -> vector<1x16x16xf32> {
+ %0 = vector.mask %mask {
+ vector.contract #contraction_trait0 %arg0, %arg1, %arg2 : vector<1x16x8xf32>, vector<1x8x16xf32> into vector<1x16x16xf32>
+ } : vector<1x16x16x8xi1> -> vector<1x16x16xf32>
+ return %0: vector<1x16x16xf32>
+}
+
+// -----
+
// CHECK-DAG: #[[$map0:.*]] = affine_map<(d0, d1) -> (d1)>
// CHECK-DAG: #[[$map1:.*]] = affine_map<(d0, d1) -> (d1, d0)>
// CHECK-DAG: #[[$map2:.*]] = affine_map<(d0, d1) -> (d0)>
@@ -164,36 +238,6 @@ func.func @cast_away_contraction_leading_one_dims_nonleadingunitdim_rank4_acctra
return %0: vector<1x1x2x16xf32>
}
-// -----
-
-// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>
-// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d3, d2)>
-// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
-
-// CHECK-LABEL: not_insert_cast_for_contraction_under_mask
-// CHECK: %[[MASK:.+]] = vector.constant_mask
-// CHECK: %[[CASTED_MASK:.+]] = vector.broadcast %[[MASK]]
-// CHECK: %[[RET:.+]] = vector.mask %[[CASTED_MASK]] {
-// CHECK-SAME: vector.contract {{.*}} : vector<1x16x8xf32>, vector<1x8x16xf32> into vector<1x16x16xf32> }
-// CHECK: return %[[RET]] : vector<1x16x16xf32>
-
-#contraction_accesses0 = [
- affine_map<(l, i, j, k) -> (l, i, k)>,
- affine_map<(l, i, j, k) -> (l, k, j)>,
- affine_map<(l, i, j, k) -> (l, i, j)>
-]
-#contraction_trait0 = {
- indexing_maps = #contraction_accesses0,
- iterator_types = ["parallel", "parallel", "parallel", "reduction"]
-}
-
-func.func @not_insert_cast_for_contraction_under_mask(%arg0: vector<1x16x8xf32>, %arg1: vector<1x8x16xf32>, %arg2: vector<1x16x16xf32>) -> vector<1x16x16xf32> {
- %mask = vector.constant_mask [1, 15, 15, 8] : vector<1x16x16x8xi1>
- %0 = vector.mask %mask {
- vector.contract #contraction_trait0 %arg0, %arg1, %arg2 : vector<1x16x8xf32>, vector<1x8x16xf32> into vector<1x16x16xf32>
- } : vector<1x16x16x8xi1> -> vector<1x16x16xf32>
- return %0 : vector<1x16x16xf32>
-}
// -----
// CHECK-LABEL: func @cast_away_extract_strided_slice_leading_one_dims
@@ -428,6 +472,8 @@ func.func @cast_away_elementwise_leading_one_dims(
return %0, %1, %2, %3: vector<1x1x8xf32>, vector<1x4xi1>, vector<1x4xf32>, vector<1x4xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_scalar
// CHECK-SAME: (%[[S:.+]]: f32, %[[V:.+]]: vector<1x1x4xf32>)
// CHECK: %[[EXTRACT:.+]] = vector.extract %[[V]][0, 0] : vector<4xf32> from vector<1x1x4xf32>
@@ -439,6 +485,8 @@ func.func @cast_away_insert_leading_one_dims_scalar(%s: f32, %v: vector<1x1x4xf3
return %0: vector<1x1x4xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_scalar_scalable(
// CHECK-SAME: %[[S:.*]]: f32,
// CHECK-SAME: %[[V:.*]]: vector<1x1x[4]xf32>) -> vector<1x1x[4]xf32> {
@@ -451,6 +499,8 @@ func.func @cast_away_insert_leading_one_dims_scalar_scalable(%s: f32, %v: vector
return %0: vector<1x1x[4]xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_scalar_skip_scalable_dim(
// CHECK-SAME: %[[S:.*]]: f32,
// CHECK-SAME: %[[V:.*]]: vector<1x[1]x4xf32>) -> vector<1x[1]x4xf32> {
@@ -463,6 +513,8 @@ func.func @cast_away_insert_leading_one_dims_scalar_skip_scalable_dim(%s: f32, %
return %0: vector<1x[1]x4xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_rank1
// CHECK-SAME: (%[[S:.+]]: vector<4xf32>, %[[V:.+]]: vector<1x1x4xf32>)
// CHECK: %[[BCAST:.+]] = vector.broadcast %[[S]] : vector<4xf32> to vector<1x1x4xf32>
@@ -472,6 +524,8 @@ func.func @cast_away_insert_leading_one_dims_rank1(%s: vector<4xf32>, %v: vector
return %0: vector<1x1x4xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_rank1_scalable(
// CHECK-SAME: %[[S:.*]]: vector<[4]xf32>,
// CHECK-SAME: %[[V:.*]]: vector<1x1x[4]xf32>) -> vector<1x1x[4]xf32> {
@@ -482,6 +536,8 @@ func.func @cast_away_insert_leading_one_dims_rank1_scalable(%s: vector<[4]xf32>,
return %0: vector<1x1x[4]xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_rank2
// CHECK-SAME: (%[[S:.+]]: vector<1x4xf32>, %[[V:.+]]: vector<1x1x4xf32>)
// CHECK: %[[EXTRACT:.+]] = vector.extract %[[S]][0] : vector<4xf32> from vector<1x4xf32>
@@ -492,6 +548,8 @@ func.func @cast_away_insert_leading_one_dims_rank2(%s: vector<1x4xf32>, %v: vect
return %0: vector<1x1x4xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_rank2_scalable(
// CHECK-SAME: %[[S:.*]]: vector<1x[4]xf32>,
// CHECK-SAME: %[[V:.*]]: vector<1x1x[4]xf32>) -> vector<1x1x[4]xf32> {
@@ -503,6 +561,8 @@ func.func @cast_away_insert_leading_one_dims_rank2_scalable(%s: vector<1x[4]xf32
return %0: vector<1x1x[4]xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_rank2_one_dest
// CHECK-SAME: (%[[S:.+]]: vector<1x4xf32>, %[[V:.+]]: vector<1x2x1x4xf32>)
// CHECK: %[[EXTRACTS:.+]] = vector.extract %[[S]][0] : vector<4xf32> from vector<1x4xf32>
@@ -515,6 +575,8 @@ func.func @cast_away_insert_leading_one_dims_rank2_one_dest(%s: vector<1x4xf32>,
return %0: vector<1x2x1x4xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_rank2_one_dest_scalable(
// CHECK-SAME: %[[S:.*]]: vector<1x[4]xf32>,
// CHECK-SAME: %[[V:.*]]: vector<1x2x1x[4]xf32>) -> vector<1x2x1x[4]xf32> {
@@ -528,6 +590,8 @@ func.func @cast_away_insert_leading_one_dims_rank2_one_dest_scalable(%s: vector<
return %0: vector<1x2x1x[4]xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_non_one_dest
// CHECK-SAME: (%[[S:.+]]: vector<1x4xf32>, %[[V:.+]]: vector<8x1x4xf32>)
// CHECK: %[[EXTRACT:.+]] = vector.extract %[[S]][0] : vector<4xf32> from vector<1x4xf32>
@@ -538,6 +602,8 @@ func.func @cast_away_insert_leading_one_dims_non_one_dest(%s: vector<1x4xf32>, %
return %0: vector<8x1x4xf32>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_non_one_dest_scalable(
// CHECK-SAME: %[[S:.*]]: vector<1x[4]xf32>,
// CHECK-SAME: %[[V:.*]]: vector<8x1x[4]xf32>) -> vector<8x1x[4]xf32> {
@@ -549,6 +615,8 @@ func.func @cast_away_insert_leading_one_dims_non_one_dest_scalable(%s: vector<1x
return %0: vector<8x1x[4]xf32>
}
+// -----
+
// CHECK-LABEL: func @cast_away_insert_leading_one_dims_one_two_dest
// CHECK-SAME: (%[[S:.+]]: vector<1x8xi1>, %[[V:.+]]: vector<1x1x8x1x8xi1>)
// CHECK: %[[EXTRACTS:.+]] = vector.extract %[[S]][0] : vector<8xi1> from vector<1x8xi1>
@@ -561,6 +629,8 @@ func.func @cast_away_insert_leading_one_dims_one_two_dest(%s: vector<1x8xi1>, %v
return %0: vector<1x1x8x1x8xi1>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_insert_leading_one_dims_one_two_dest_scalable(
// CHECK-SAME: %[[S:.*]]: vector<1x[8]xi1>,
// CHECK-SAME: %[[V:.*]]: vector<1x1x8x1x[8]xi1>) -> vector<1x1x8x1x[8]xi1> {
@@ -574,6 +644,8 @@ func.func @cast_away_insert_leading_one_dims_one_two_dest_scalable(%s: vector<1x
return %0: vector<1x1x8x1x[8]xi1>
}
+// -----
+
// CHECK-LABEL: func.func @cast_away_constant_mask() -> vector<1x1x8x2x1xi1> {
// CHECK: %[[MASK:.*]] = vector.constant_mask [6, 1, 1] : vector<8x2x1xi1>
// CHECK: %[[BCAST:.*]] = vector.broadcast %[[MASK]] : vector<8x2x1xi1> to vector<1x1x8x2x1xi1>
@@ -582,3 +654,13 @@ func.func @cast_away_constant_mask() -> vector<1x1x8x2x1xi1> {
%0 = vector.constant_mask [1, 1, 6, 1, 1] : vector<1x1x8x2x1xi1>
return %0: vector<1x1x8x2x1xi1>
}
+
+// -----
+
+// CHECK-LABEL: func.func @drop_unit_dims_scalar_cond_select(
+// CHECK: arith.select {{.*}} : vector<16xi1>
+func.func @drop_unit_dims_scalar_cond_select(%cond: i1, %arg0: vector<1x16xi1>, %arg1: vector<1x16xi1>) -> vector<1x16xi1> {
+ %sel = arith.select %cond, %arg0, %arg1 : vector<1x16xi1>
+ return %sel : vector<1x16xi1>
+}
+
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index 13e07f59a72a..31bd19c0be8e 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -41,6 +41,24 @@ func.func @permutation_with_mask_scalable(%2: memref<?x?xf32>, %dim_1: index, %d
return %1 : vector<8x[4]x2xf32>
}
+// CHECK: func.func @permutation_with_mask_transfer_write_scalable(
+// CHECK-SAME: %[[ARG_0:.*]]: vector<4x[8]xi16>,
+// CHECK-SAME: %[[ARG_1:.*]]: memref<1x4x?x1x1x1x1xi16>,
+// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>) {
+// CHECK: %[[C0:.*]] = arith.constant 0 : index
+// CHECK: %[[BCAST_1:.*]] = vector.broadcast %[[ARG_0]] : vector<4x[8]xi16> to vector<1x1x1x1x4x[8]xi16>
+// CHECK: %[[BCAST_2:.*]] = vector.broadcast %[[MASK]] : vector<4x[8]xi1> to vector<1x1x1x1x4x[8]xi1>
+// CHECK: %[[TRANSPOSE_1:.*]] = vector.transpose %[[BCAST_2]], [4, 5, 0, 1, 2, 3] : vector<1x1x1x1x4x[8]xi1> to vector<4x[8]x1x1x1x1xi1>
+// CHECK: %[[TRANSPOSE_2:.*]] = vector.transpose %[[BCAST_1]], [4, 5, 0, 1, 2, 3] : vector<1x1x1x1x4x[8]xi16> to vector<4x[8]x1x1x1x1xi16>
+// CHECK: vector.transfer_write %[[TRANSPOSE_2]], %[[ARG_1]]{{\[}}%[[C0]], %[[C0]], %[[C0]], %[[C0]], %[[C0]], %[[C0]], %[[C0]]], %[[TRANSPOSE_1]] {in_bounds = [true, true, true, true, true, true]} : vector<4x[8]x1x1x1x1xi16>, memref<1x4x?x1x1x1x1xi16>
+// CHECK: return
+func.func @permutation_with_mask_transfer_write_scalable(%arg0: vector<4x[8]xi16>, %arg1: memref<1x4x?x1x1x1x1xi16>, %mask: vector<4x[8]xi1>){
+ %c0 = arith.constant 0 : index
+ vector.transfer_write %arg0, %arg1[%c0, %c0, %c0, %c0, %c0, %c0, %c0], %mask {in_bounds = [true, true], permutation_map = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d1, d2)>
+} : vector<4x[8]xi16>, memref<1x4x?x1x1x1x1xi16>
+
+ return
+}
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%f = transform.structured.match ops{["func.func"]} in %module_op
diff --git a/mlir/test/IR/greedy-pattern-rewriter-driver.mlir b/mlir/test/IR/greedy-pattern-rewrite-driver-bottom-up.mlir
index f3da9a147fcb..f3da9a147fcb 100644
--- a/mlir/test/IR/greedy-pattern-rewriter-driver.mlir
+++ b/mlir/test/IR/greedy-pattern-rewrite-driver-bottom-up.mlir
diff --git a/mlir/test/IR/greedy-pattern-rewrite-driver-top-down.mlir b/mlir/test/IR/greedy-pattern-rewrite-driver-top-down.mlir
new file mode 100644
index 000000000000..a362d6f99b94
--- /dev/null
+++ b/mlir/test/IR/greedy-pattern-rewrite-driver-top-down.mlir
@@ -0,0 +1,58 @@
+// RUN: mlir-opt %s -test-patterns="max-iterations=1 top-down=true" \
+// RUN: --split-input-file | FileCheck %s
+
+// Tests for https://github.com/llvm/llvm-project/issues/86765. Ensure
+// that operands of a dead op are added to the worklist even if the same value
+// appears multiple times as an operand.
+
+// 2 uses of the same operand
+
+// CHECK: func.func @f(%arg0: i1) {
+// CHECK-NEXT: return
+// CHECK-NEXT: }
+func.func @f(%arg0: i1) {
+ %0 = arith.constant 0 : i32
+ %if = scf.if %arg0 -> (i32) {
+ scf.yield %0 : i32
+ } else {
+ scf.yield %0 : i32
+ }
+ %dead_leaf = arith.addi %if, %if : i32
+ return
+}
+
+// -----
+
+// 3 uses of the same operand
+
+// CHECK: func.func @f() {
+// CHECK-NEXT: return
+// CHECK-NEXT: }
+func.func @f() {
+ %0 = arith.constant 0 : i1
+ %if = scf.if %0 -> (i1) {
+ scf.yield %0 : i1
+ } else {
+ scf.yield %0 : i1
+ }
+ %dead_leaf = arith.select %if, %if, %if : i1
+ return
+}
+
+// -----
+
+// 2 uses of the same operand, op has 3 operands
+
+// CHECK: func.func @f() {
+// CHECK-NEXT: return
+// CHECK-NEXT: }
+func.func @f() {
+ %0 = arith.constant 0 : i1
+ %if = scf.if %0 -> (i1) {
+ scf.yield %0 : i1
+ } else {
+ scf.yield %0 : i1
+ }
+ %dead_leaf = arith.select %0, %if, %if : i1
+ return
+}
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
new file mode 100644
index 000000000000..56dce1c65401
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
@@ -0,0 +1,60 @@
+// DEFINE: %{compile} = mlir-opt %s \
+// DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \
+// DEFINE: -one-shot-bufferize="bufferize-function-boundaries" -lower-vector-mask -buffer-deallocation-pipeline -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
+// DEFINE: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t
+// DEFINE: %{entry_point} = conv
+// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\
+// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils
+
+// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s
+
+func.func @conv() {
+ // Define input/output tensors
+ %input_init = tensor.empty() : tensor<1x8x6xi32>
+ %output_init = tensor.empty() : tensor<1x7x6xi32>
+
+ %five = arith.constant 5 : i32
+ %zero = arith.constant 0 : i32
+ %input = linalg.fill ins(%five : i32) outs(%input_init : tensor<1x8x6xi32>) -> tensor<1x8x6xi32>
+ %output = linalg.fill ins(%zero : i32) outs(%output_init : tensor<1x7x6xi32>) -> tensor<1x7x6xi32>
+
+ // Define the filter tensor
+ %filter = arith.constant dense<[
+ [ 1, 2, 3, 4, 5, 6],
+ [ 11, 12, 13, 14, 15, 16]
+ ]> : tensor<2x6xi32>
+
+ // static sizes -> dynamic sizes
+ %input_dyn = tensor.cast %input_init : tensor<1x8x6xi32> to tensor<1x8x?xi32>
+ %output_dyn = tensor.cast %output : tensor<1x7x6xi32> to tensor<1x7x?xi32>
+ %filter_dyn = tensor.cast %filter : tensor<2x6xi32> to tensor<2x?xi32>
+
+ // Run the convolution
+ %res = linalg.depthwise_conv_1d_nwc_wc
+ ins(%input_dyn, %filter_dyn : tensor<1x8x?xi32>, tensor<2x?xi32>)
+ outs(%output_dyn : tensor<1x7x?xi32>) -> tensor<1x7x?xi32>
+
+ // Print the results
+ // CHECK: SVE: START OF TEST OUTPUT
+ vector.print str "SVE: START OF TEST OUTPUT\n"
+
+ // CHECK-NEXT: Unranked Memref base@ = {{.*}} rank = 3 offset = 0 sizes = [1, 7, 6] strides = [42, 6, 1] data =
+ // CHECK-COUNT-7: [60, 70, 80, 90, 100, 110]
+ %xf = tensor.cast %res : tensor<1x7x?xi32> to tensor<*xi32>
+ call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
+
+ // CHECK-NEXT: SVE: END OF TEST OUTPUT
+ vector.print str "SVE: END OF TEST OUTPUT\n"
+
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [1, 7, [8], 2] : !transform.any_op
+ transform.yield
+ }
+}
+
+func.func private @printMemrefI32(%ptr : tensor<*xi32>)
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
index a5b2ad1605cd..44b555c8c3ad 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -lower-vector-mask -one-shot-bufferize -test-lower-to-llvm | \
+// RUN: mlir-opt %s -transform-interpreter -test-transform-dialect-erase-schedule -lower-vector-mask -one-shot-bufferize -buffer-deallocation-pipeline -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd -e=entry -entry-point-result=void --march=aarch64 --mattr="+sve" -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils | \
// RUN: FileCheck %s
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
index 51a0c8f7c945..68e474fe5cef 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/matmul.mlir
@@ -1,6 +1,6 @@
// DEFINE: %{compile} = mlir-opt %s \
// DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \
-// DEFINE: -one-shot-bufferize -func-bufferize -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
+// DEFINE: -one-shot-bufferize="bufferize-function-boundaries" -buffer-deallocation-pipeline -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
// DEFINE: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t
// DEFINE: %{entry_point} = matmul_f32
// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/pack-unpack-mmt4d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/pack-unpack-mmt4d.mlir
new file mode 100644
index 000000000000..5680882dccb1
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/pack-unpack-mmt4d.mlir
@@ -0,0 +1,173 @@
+// DEFINE: %{compile} = mlir-opt %s \
+// DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \
+// DEFINE: -one-shot-bufferize="bufferize-function-boundaries" \
+// DEFINE: -buffer-deallocation-pipeline="private-function-dynamic-ownership" \
+// DEFINE: -cse -canonicalize -test-lower-to-llvm
+// DEFINE: %{entry_point} = main
+// DEFINE: %{run} = mlir-cpu-runner -e %{entry_point} -entry-point-result=void \
+// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils
+
+// RUN: %{compile} | %{run} | FileCheck %s
+
+/// End-to-end test for computing matrix-multiplication using linalg.mmt4d. In
+/// particular, demonstrates how the following MLIR sequence (implemented in @mmt4d):
+///
+/// A_pack = tensor.pack A
+/// B_pack = tensor.pack B
+/// C_pack = tensor.pack C
+/// out_pack = linalg.mmt4d(A_pack, B_pack, C_pack)
+///
+/// is equivalent to:
+///
+/// linalg.matmul(A, B, C)
+///
+/// (implemented in @matmul).
+
+func.func @main() {
+ // Allocate and initialise the inputs
+ %A_alloc = tensor.empty() : tensor<7x16xi32>
+ %B_alloc = tensor.empty() : tensor<16x13xi32>
+
+ %three = arith.constant 3 : i32
+ %four = arith.constant 4 : i32
+ %A = linalg.fill ins(%three : i32) outs(%A_alloc : tensor<7x16xi32>) -> tensor<7x16xi32>
+ %B = linalg.fill ins(%four : i32) outs(%B_alloc : tensor<16x13xi32>) -> tensor<16x13xi32>
+ %C = arith.constant dense<[
+ [ 1, 8, 15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85],
+ [ 2, 9, 16, 23, 30, 37, 44, 51, 58, 65, 72, 79, 86],
+ [ 3, 10, 17, 24, 31, 38, 45, 52, 59, 66, 73, 80, 87],
+ [ 4, 11, 18, 25, 32, 39, 46, 53, 60, 67, 74, 81, 88],
+ [ 5, 12, 19, 26, 33, 40, 47, 54, 61, 68, 75, 82, 89],
+ [ 6, 13, 20, 27, 34, 41, 48, 55, 62, 69, 76, 83, 90],
+ [ 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91]
+ ]> : tensor<7x13xi32>
+
+ // Matrix multiplication via linalg.mmt4d
+ // CHECK: Unranked Memref
+ // CHECK: [193, 200, 207, 214, 221, 228, 235, 242, 249, 256, 263, 270, 277]
+ // CHECK: [194, 201, 208, 215, 222, 229, 236, 243, 250, 257, 264, 271, 278]
+ // CHECK: [195, 202, 209, 216, 223, 230, 237, 244, 251, 258, 265, 272, 279]
+ // CHECK: [196, 203, 210, 217, 224, 231, 238, 245, 252, 259, 266, 273, 280]
+ // CHECK: [197, 204, 211, 218, 225, 232, 239, 246, 253, 260, 267, 274, 281]
+ // CHECK: [198, 205, 212, 219, 226, 233, 240, 247, 254, 261, 268, 275, 282]
+ // CHECK: [199, 206, 213, 220, 227, 234, 241, 248, 255, 262, 269, 276, 283]
+ %C_mmt4d = func.call @mmt4d(%A, %B, %C) : (tensor<7x16xi32>, tensor<16x13xi32>, tensor<7x13xi32>) -> tensor<7x13xi32>
+ %xf = tensor.cast %C_mmt4d : tensor<7x13xi32> to tensor<*xi32>
+ call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
+
+ // Matrix multiplication with linalg.matmul
+ // CHECK: Unranked Memref
+ // CHECK: [193, 200, 207, 214, 221, 228, 235, 242, 249, 256, 263, 270, 277]
+ // CHECK: [194, 201, 208, 215, 222, 229, 236, 243, 250, 257, 264, 271, 278]
+ // CHECK: [195, 202, 209, 216, 223, 230, 237, 244, 251, 258, 265, 272, 279]
+ // CHECK: [196, 203, 210, 217, 224, 231, 238, 245, 252, 259, 266, 273, 280]
+ // CHECK: [197, 204, 211, 218, 225, 232, 239, 246, 253, 260, 267, 274, 281]
+ // CHECK: [198, 205, 212, 219, 226, 233, 240, 247, 254, 261, 268, 275, 282]
+ // CHECK: [199, 206, 213, 220, 227, 234, 241, 248, 255, 262, 269, 276, 283]
+ %C_matmul = func.call @matmul(%A, %B, %C) : (tensor<7x16xi32>, tensor<16x13xi32>, tensor<7x13xi32>) -> tensor<7x13xi32>
+ %xf_2 = tensor.cast %C_matmul : tensor<7x13xi32> to tensor<*xi32>
+ call @printMemrefI32(%xf_2) : (tensor<*xi32>) -> ()
+
+ return
+}
+
+func.func private @matmul(%A: tensor<7x16xi32>, %B: tensor<16x13xi32>, %C: tensor<7x13xi32>) -> tensor<7x13xi32> {
+ %C_matmul = linalg.matmul ins(%A, %B: tensor<7x16xi32>, tensor<16x13xi32>)
+ outs(%C: tensor<7x13xi32>) -> tensor<7x13xi32>
+
+ return %C_matmul : tensor<7x13xi32>
+}
+
+func.func private @mmt4d(%A: tensor<7x16xi32>, %B: tensor<16x13xi32>, %C: tensor<7x13xi32>) -> tensor<7x13xi32> {
+ %zero = arith.constant 0 : i32
+
+ %A_pack_empty = tensor.empty() : tensor<2x16x8x1xi32>
+ %B_pack_empty = tensor.empty() : tensor<2x16x8x1xi32>
+ %C_pack_empty = tensor.empty() : tensor<2x2x8x8xi32>
+
+ // Pack matrices
+ %A_pack = tensor.pack %A padding_value(%zero : i32) inner_dims_pos = [0, 1] inner_tiles = [8, 1] into %A_pack_empty : tensor<7x16xi32> -> tensor<2x16x8x1xi32>
+ %B_pack = tensor.pack %B padding_value(%zero : i32) outer_dims_perm = [1, 0] inner_dims_pos = [1, 0] inner_tiles = [8, 1] into %B_pack_empty : tensor<16x13xi32> -> tensor<2x16x8x1xi32>
+ %C_pack = tensor.pack %C padding_value(%zero : i32) outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %C_pack_empty : tensor<7x13xi32> -> tensor<2x2x8x8xi32>
+
+ // MMT4D
+ %mmt4d = linalg.mmt4d ins(%A_pack, %B_pack : tensor<2x16x8x1xi32>, tensor<2x16x8x1xi32>) outs(%C_pack : tensor<2x2x8x8xi32>) -> tensor<2x2x8x8xi32>
+
+ // Unpack output
+ %C_out_empty = tensor.empty() : tensor<7x13xi32>
+ %C_out_unpack = tensor.unpack %mmt4d outer_dims_perm = [0, 1] inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %C_out_empty : tensor<2x2x8x8xi32> -> tensor<7x13xi32>
+
+ return %C_out_unpack : tensor<7x13xi32>
+}
+
+module @transforms attributes { transform.with_named_sequence } {
+ transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) {
+ %mmt4d = transform.collect_matching @match_mmt4d in %module : (!transform.any_op) -> (!transform.any_op)
+ %func = transform.get_parent_op %mmt4d {isolated_from_above} : (!transform.any_op) -> !transform.op<"func.func">
+
+ // Step 1: Tile
+ // Tile parallel dims
+ %tiled_linalg_op_p, %loops:4 = transform.structured.tile_using_for %mmt4d[1, 1, 0, 8, 8, 0]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
+ // Tile reduction dims
+ %tiled_linalg_op_r, %loops2:2 = transform.structured.tile_using_for %tiled_linalg_op_p[0, 0, 1, 0, 0, 1]
+ : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
+
+ // Step 2: Vectorize
+ transform.structured.vectorize %tiled_linalg_op_r : !transform.any_op
+
+ // Step 3: Simplify
+ // vector.multi_reduction --> vector.contract
+ // Generates a 6-dim vector.contract with the dim matching the original MMT4D Op
+ // and with the following split into parallel and reduction dims:
+ // * parallel, parallel, reduction, parallel, parallel, reduction
+ transform.apply_patterns to %func {
+ transform.apply_patterns.vector.reduction_to_contract
+ // Reduce the rank of xfer ops. This transforms vector.contract to be
+ // more matmul-like and to enable the lowering to outer product Ops.
+ transform.apply_patterns.vector.transfer_permutation_patterns
+ } : !transform.op<"func.func">
+
+ // Hoisting and LICM - not strictly required
+ %func_h = transform.structured.hoist_redundant_vector_transfers %func
+ : (!transform.op<"func.func">) -> !transform.op<"func.func">
+ %all_loops = transform.structured.match interface{LoopLikeInterface} in %func_h
+ : (!transform.op<"func.func">) -> !transform.any_op
+ transform.apply_licm to %all_loops : !transform.any_op
+ transform.loop.hoist_loop_invariant_subsets %all_loops : !transform.any_op
+
+ // Simplify the 6-dim vector.contract into a 3-dim matmul-like
+ // vector.contract with the following split into parallel and reduction
+ // dims:
+ // * parallel, parallel, reduction
+ transform.apply_patterns to %func_h {
+ transform.apply_patterns.vector.reduction_to_contract
+ transform.apply_patterns.vector.cast_away_vector_leading_one_dim
+ transform.apply_patterns.canonicalization
+ } : !transform.op<"func.func">
+
+ // Step 4. Lower tensor.pack
+ %pack = transform.structured.match ops{["tensor.pack"]} in %func_h
+ : (!transform.op<"func.func">) -> !transform.op<"tensor.pack">
+ transform.structured.lower_pack %pack : (!transform.op<"tensor.pack">)
+ -> (!transform.op<"tensor.pad">, !transform.op<"tensor.expand_shape">, !transform.op<"linalg.transpose">)
+
+ // Step 5. Lower tensor.unpack
+ %unpack = transform.structured.match ops{["tensor.unpack"]} in %func_h
+ : (!transform.op<"func.func">) -> !transform.op<"tensor.unpack">
+ transform.structured.lower_unpack %unpack : (!transform.op<"tensor.unpack">)
+ -> (!transform.op<"tensor.empty">,
+ !transform.op<"linalg.transpose">,
+ !transform.op<"tensor.collapse_shape">,
+ !transform.op<"tensor.extract_slice">)
+ transform.yield
+ }
+
+ transform.named_sequence @match_mmt4d(
+ %entry: !transform.any_op {transform.readonly}) -> !transform.any_op {
+ transform.match.operation_name %entry ["linalg.mmt4d"] : !transform.any_op
+ transform.yield %entry : !transform.any_op
+ }
+}
+
+func.func private @printMemrefI32(%ptr : tensor<*xi32>)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index 34d450c2403f..7ecccad212cd 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -285,8 +285,7 @@ module {
%has_runtime = sparse_tensor.has_runtime_library
scf.if %has_runtime {
// sparse_tensor.assemble copies buffers when running with the runtime
- // library. Deallocations are needed not needed when running in codgen
- // mode.
+ // library. Deallocations are not needed when running in codegen mode.
bufferization.dealloc_tensor %s4 : tensor<10x10xf64, #SortedCOO>
bufferization.dealloc_tensor %s5 : tensor<10x10xf64, #SortedCOOI32>
bufferization.dealloc_tensor %csr : tensor<2x2xf64, #CSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
index fe8836266a47..20ae7e86285c 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
@@ -146,8 +146,7 @@ module {
%has_runtime = sparse_tensor.has_runtime_library
scf.if %has_runtime {
// sparse_tensor.assemble copies buffers when running with the runtime
- // library. Deallocations are needed not needed when running in codgen
- // mode.
+ // library. Deallocations are not needed when running in codegen mode.
bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>
bufferization.dealloc_tensor %s2 : tensor<4x3x2xf32, #CSRDense>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
index 98d76ba350cb..7758ca77dce9 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_print.mlir
@@ -120,6 +120,14 @@
)
}>
+#COOAoS = #sparse_tensor.encoding<{
+ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
+}>
+
+#COOSoA = #sparse_tensor.encoding<{
+ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton(soa))
+}>
+
module {
//
@@ -161,6 +169,8 @@ module {
%h = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSCC>
%i = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSR0>
%j = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #BSC0>
+ %AoS = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #COOAoS>
+ %SoA = sparse_tensor.convert %x : tensor<4x8xi32> to tensor<4x8xi32, #COOSoA>
// CHECK-NEXT: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 5
@@ -274,19 +284,42 @@ module {
// CHECK-NEXT: ----
sparse_tensor.print %j : tensor<4x8xi32, #BSC0>
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: dim = ( 4, 8 )
+ // CHECK-NEXT: lvl = ( 4, 8 )
+ // CHECK-NEXT: pos[0] : ( 0, 5,
+ // CHECK-NEXT: crd[0] : ( 0, 0, 0, 2, 3, 2, 3, 3, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %AoS : tensor<4x8xi32, #COOAoS>
+
+ // CHECK-NEXT: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: dim = ( 4, 8 )
+ // CHECK-NEXT: lvl = ( 4, 8 )
+ // CHECK-NEXT: pos[0] : ( 0, 5,
+ // CHECK-NEXT: crd[0] : ( 0, 0, 3, 3, 3,
+ // CHECK-NEXT: crd[1] : ( 0, 2, 2, 3, 5,
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5,
+ // CHECK-NEXT: ----
+ sparse_tensor.print %SoA : tensor<4x8xi32, #COOSoA>
+
// Release the resources.
- bufferization.dealloc_tensor %XO : tensor<4x8xi32, #AllDense>
- bufferization.dealloc_tensor %XT : tensor<4x8xi32, #AllDenseT>
- bufferization.dealloc_tensor %a : tensor<4x8xi32, #CSR>
- bufferization.dealloc_tensor %b : tensor<4x8xi32, #DCSR>
- bufferization.dealloc_tensor %c : tensor<4x8xi32, #CSC>
- bufferization.dealloc_tensor %d : tensor<4x8xi32, #DCSC>
- bufferization.dealloc_tensor %e : tensor<4x8xi32, #BSR>
- bufferization.dealloc_tensor %f : tensor<4x8xi32, #BSRC>
- bufferization.dealloc_tensor %g : tensor<4x8xi32, #BSC>
- bufferization.dealloc_tensor %h : tensor<4x8xi32, #BSCC>
- bufferization.dealloc_tensor %i : tensor<4x8xi32, #BSR0>
- bufferization.dealloc_tensor %j : tensor<4x8xi32, #BSC0>
+ bufferization.dealloc_tensor %XO : tensor<4x8xi32, #AllDense>
+ bufferization.dealloc_tensor %XT : tensor<4x8xi32, #AllDenseT>
+ bufferization.dealloc_tensor %a : tensor<4x8xi32, #CSR>
+ bufferization.dealloc_tensor %b : tensor<4x8xi32, #DCSR>
+ bufferization.dealloc_tensor %c : tensor<4x8xi32, #CSC>
+ bufferization.dealloc_tensor %d : tensor<4x8xi32, #DCSC>
+ bufferization.dealloc_tensor %e : tensor<4x8xi32, #BSR>
+ bufferization.dealloc_tensor %f : tensor<4x8xi32, #BSRC>
+ bufferization.dealloc_tensor %g : tensor<4x8xi32, #BSC>
+ bufferization.dealloc_tensor %h : tensor<4x8xi32, #BSCC>
+ bufferization.dealloc_tensor %i : tensor<4x8xi32, #BSR0>
+ bufferization.dealloc_tensor %j : tensor<4x8xi32, #BSC0>
+ bufferization.dealloc_tensor %AoS : tensor<4x8xi32, #COOAoS>
+ bufferization.dealloc_tensor %SoA : tensor<4x8xi32, #COOSoA>
return
}
diff --git a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir
index 39fbb67512c6..a7013eacc984 100644
--- a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir
+++ b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir
@@ -2,6 +2,10 @@
// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
// RUN: -shared-libs=%mlir_c_runner_utils | \
// RUN: FileCheck %s
+// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" | \
+// RUN: mlir-cpu-runner -e main -entry-point-result=void \
+// RUN: -shared-libs=%mlir_c_runner_utils | \
+// RUN: FileCheck %s --check-prefix=SCHECK
func.func @transfer_read_2d(%A : memref<40xi32>, %base1: index) {
%i42 = arith.constant -42: i32
@@ -101,3 +105,29 @@ func.func @entry() {
// CHECK:( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2 )
// CHECK:( 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4 )
// CHECK:( 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 )
+
+// -----
+
+func.func @non_inline_function() -> (i64, i64, i64, i64, i64, i64) {
+ %MIN_INT_MINUS_ONE = arith.constant -9223372036854775807 : i64
+ %NEG_ONE = arith.constant -1 : i64
+ %MIN_INT = arith.constant -9223372036854775808 : i64
+ %ONE = arith.constant 1 : i64
+ %MAX_INT = arith.constant 9223372036854775807 : i64
+ return %MIN_INT_MINUS_ONE, %NEG_ONE, %MIN_INT, %ONE, %MAX_INT, %NEG_ONE : i64, i64, i64, i64, i64, i64
+}
+
+func.func @main() {
+ %0:6 = call @non_inline_function() : () -> (i64, i64, i64, i64, i64, i64)
+ %1 = arith.floordivsi %0#0, %0#1 : i64
+ %2 = arith.floordivsi %0#2, %0#3 : i64
+ %3 = arith.floordivsi %0#4, %0#5 : i64
+ vector.print %1 : i64
+ vector.print %2 : i64
+ vector.print %3 : i64
+ return
+}
+
+// SCHECK: 9223372036854775807
+// SCHECK: -9223372036854775808
+// SCHECK: -9223372036854775807
diff --git a/mlir/test/Interfaces/DataLayoutInterfaces/module.mlir b/mlir/test/Interfaces/DataLayoutInterfaces/module.mlir
index 096e7ceb3cbc..97286ce75806 100644
--- a/mlir/test/Interfaces/DataLayoutInterfaces/module.mlir
+++ b/mlir/test/Interfaces/DataLayoutInterfaces/module.mlir
@@ -2,11 +2,13 @@
module attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 12]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 32]>>} {
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 32]>,
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["index", 7]>>} {
// CHECK-LABEL: @module_level_layout
func.func @module_level_layout() {
// CHECK: alignment = 32
// CHECK: bitsize = 12
+ // CHECK: index = 7
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
diff --git a/mlir/test/Interfaces/DataLayoutInterfaces/query.mlir b/mlir/test/Interfaces/DataLayoutInterfaces/query.mlir
index 9f9240ac6f8c..d3bc91339d16 100644
--- a/mlir/test/Interfaces/DataLayoutInterfaces/query.mlir
+++ b/mlir/test/Interfaces/DataLayoutInterfaces/query.mlir
@@ -4,24 +4,34 @@
func.func @no_layout_builtin() {
// CHECK: alignment = 4
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 4
"test.data_layout_query"() : () -> i32
// CHECK: alignment = 8
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 8
// CHECK: size = 8
"test.data_layout_query"() : () -> f64
// CHECK: alignment = 4
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 4
// CHECK: size = 8
"test.data_layout_query"() : () -> complex<f32>
// CHECK: alignment = 1
// CHECK: bitsize = 14
+ // CHECK: index = 0
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> complex<i6>
+ // CHECK: alignment = 4
+ // CHECK: bitsize = 64
+ // CHECK: index = 64
+ // CHECK: preferred = 8
+ // CHECK: size = 8
+ "test.data_layout_query"() : () -> index
return
}
@@ -30,6 +40,7 @@ func.func @no_layout_builtin() {
func.func @no_layout_custom() {
// CHECK: alignment = 1
// CHECK: bitsize = 1
+ // CHECK: index = 1
// CHECK: preferred = 1
// CHECK: size = 1
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -41,6 +52,7 @@ func.func @layout_op_no_layout() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 1
// CHECK: bitsize = 1
+ // CHECK: index = 1
// CHECK: preferred = 1
// CHECK: size = 1
"test.data_layout_query"() : () -> !test.test_type_with_layout<1000>
@@ -54,13 +66,15 @@ func.func @layout_op() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 30
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 10]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>,
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["index", 30]>
>} : () -> ()
return
}
@@ -72,13 +86,15 @@ func.func @nested_inner_only() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 30
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 10]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>,
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["index", 30]>
>} : () -> ()
"test.maybe_terminator"() : () -> ()
}) : () -> ()
@@ -92,6 +108,7 @@ func.func @nested_outer_only() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 30
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -100,7 +117,8 @@ func.func @nested_outer_only() {
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 10]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>,
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["index", 30]>
>} : () -> ()
return
}
@@ -112,6 +130,7 @@ func.func @nested_middle_only() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 30
// CHECK: preferred = 1
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -120,7 +139,8 @@ func.func @nested_middle_only() {
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 10]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>,
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["index", 30]>
>} : () -> ()
"test.maybe_terminator"() : () -> ()
}) : () -> ()
@@ -134,6 +154,7 @@ func.func @nested_combine_with_missing() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 21
// CHECK: preferred = 30
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -146,13 +167,15 @@ func.func @nested_combine_with_missing() {
>} : () -> ()
// CHECK: alignment = 1
// CHECK: bitsize = 42
+ // CHECK: index = 21
// CHECK: preferred = 30
// CHECK: size = 6
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 42]>,
- #dlti.dl_entry<!test.test_type_with_layout<30>, ["preferred", 30]>
+ #dlti.dl_entry<!test.test_type_with_layout<30>, ["preferred", 30]>,
+ #dlti.dl_entry<!test.test_type_with_layout<40>, ["index", 21]>
>}: () -> ()
return
}
@@ -164,6 +187,7 @@ func.func @nested_combine_all() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 20
// CHECK: bitsize = 3
+ // CHECK: index = 40
// CHECK: preferred = 30
// CHECK: size = 1
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -174,16 +198,19 @@ func.func @nested_combine_all() {
>} : () -> ()
// CHECK: alignment = 20
// CHECK: bitsize = 10
+ // CHECK: index = 40
// CHECK: preferred = 30
// CHECK: size = 2
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
"test.maybe_terminator"() : () -> ()
}) { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<!test.test_type_with_layout<10>, ["size", 10]>,
- #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>
+ #dlti.dl_entry<!test.test_type_with_layout<20>, ["alignment", 20]>,
+ #dlti.dl_entry<!test.test_type_with_layout<40>, ["index", 40]>
>} : () -> ()
// CHECK: alignment = 1
// CHECK: bitsize = 42
+ // CHECK: index = 1
// CHECK: preferred = 30
// CHECK: size = 6
"test.data_layout_query"() : () -> !test.test_type_with_layout<10>
@@ -200,18 +227,22 @@ func.func @integers() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 8
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 8
"test.data_layout_query"() : () -> i32
// CHECK: alignment = 16
// CHECK: bitsize = 56
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> i56
// CHECK: alignment = 16
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> i64
// CHECK: alignment = 16
// CHECK: bitsize = 128
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> i128
"test.maybe_terminator"() : () -> ()
@@ -222,18 +253,22 @@ func.func @integers() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 8
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> i32
// CHECK: alignment = 16
// CHECK: bitsize = 56
+ // CHECK: index = 0
// CHECK: preferred = 32
"test.data_layout_query"() : () -> i56
// CHECK: alignment = 16
// CHECK: bitsize = 64
+ // CHECK: index = 0
// CHECK: preferred = 32
"test.data_layout_query"() : () -> i64
// CHECK: alignment = 16
// CHECK: bitsize = 128
+ // CHECK: index = 0
// CHECK: preferred = 32
"test.data_layout_query"() : () -> i128
"test.maybe_terminator"() : () -> ()
@@ -248,10 +283,12 @@ func.func @floats() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 8
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 8
"test.data_layout_query"() : () -> f32
// CHECK: alignment = 16
// CHECK: bitsize = 80
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> f80
"test.maybe_terminator"() : () -> ()
@@ -262,10 +299,12 @@ func.func @floats() {
"test.op_with_data_layout"() ({
// CHECK: alignment = 8
// CHECK: bitsize = 32
+ // CHECK: index = 0
// CHECK: preferred = 16
"test.data_layout_query"() : () -> f32
// CHECK: alignment = 16
// CHECK: bitsize = 80
+ // CHECK: index = 0
// CHECK: preferred = 32
"test.data_layout_query"() : () -> f80
"test.maybe_terminator"() : () -> ()
diff --git a/mlir/test/Interfaces/DataLayoutInterfaces/types.mlir b/mlir/test/Interfaces/DataLayoutInterfaces/types.mlir
index 55bb1d2eac91..82ae02cf92ad 100644
--- a/mlir/test/Interfaces/DataLayoutInterfaces/types.mlir
+++ b/mlir/test/Interfaces/DataLayoutInterfaces/types.mlir
@@ -40,6 +40,7 @@ module @index attributes { dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<index, 32>>} {
func.func @query() {
// CHECK: bitsize = 32
+ // CHECK: index = 32
"test.data_layout_query"() : () -> index
return
}
diff --git a/mlir/test/Target/LLVMIR/Import/basic.ll b/mlir/test/Target/LLVMIR/Import/basic.ll
index a059425d9780..448b0ebe2574 100644
--- a/mlir/test/Target/LLVMIR/Import/basic.ll
+++ b/mlir/test/Target/LLVMIR/Import/basic.ll
@@ -72,26 +72,26 @@ define i32 @useFreezeOp(i32 %x) {
; Varadic function definition
%struct.va_list = type { ptr }
-declare void @llvm.va_start(ptr)
-declare void @llvm.va_copy(ptr, ptr)
-declare void @llvm.va_end(ptr)
+declare void @llvm.va_start.p0(ptr)
+declare void @llvm.va_copy.p0(ptr, ptr)
+declare void @llvm.va_end.p0(ptr)
; CHECK-LABEL: llvm.func @variadic_function
define void @variadic_function(i32 %X, ...) {
; CHECK: %[[ALLOCA0:.+]] = llvm.alloca %{{.*}} x !llvm.struct<"struct.va_list", (ptr)> {{.*}} : (i32) -> !llvm.ptr
%ap = alloca %struct.va_list
; CHECK: llvm.intr.vastart %[[ALLOCA0]]
- call void @llvm.va_start(ptr %ap)
+ call void @llvm.va_start.p0(ptr %ap)
; CHECK: %[[ALLOCA1:.+]] = llvm.alloca %{{.*}} x !llvm.ptr {{.*}} : (i32) -> !llvm.ptr
%aq = alloca ptr
; CHECK: llvm.intr.vacopy %[[ALLOCA0]] to %[[ALLOCA1]]
- call void @llvm.va_copy(ptr %aq, ptr %ap)
+ call void @llvm.va_copy.p0(ptr %aq, ptr %ap)
; CHECK: llvm.intr.vaend %[[ALLOCA1]]
- call void @llvm.va_end(ptr %aq)
+ call void @llvm.va_end.p0(ptr %aq)
; CHECK: llvm.intr.vaend %[[ALLOCA0]]
- call void @llvm.va_end(ptr %ap)
+ call void @llvm.va_end.p0(ptr %ap)
; CHECK: llvm.return
ret void
}
diff --git a/mlir/test/Target/LLVMIR/Import/debug-info.ll b/mlir/test/Target/LLVMIR/Import/debug-info.ll
index a7947eb0d444..959a5a1cd971 100644
--- a/mlir/test/Target/LLVMIR/Import/debug-info.ll
+++ b/mlir/test/Target/LLVMIR/Import/debug-info.ll
@@ -601,8 +601,9 @@ declare !dbg !1 void @declaration()
; CHECK: #di_subprogram = #llvm.di_subprogram<
; CHECK-NOT: id = distinct
+; CHECK-NOT: subprogramFlags =
!llvm.module.flags = !{!0}
!0 = !{i32 2, !"Debug Info Version", i32 3}
-!1 = !DISubprogram(name: "declaration", scope: !2, file: !2, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+!1 = !DISubprogram(name: "declaration", scope: !2, file: !2, flags: DIFlagPrototyped, spFlags: 0)
!2 = !DIFile(filename: "debug-info.ll", directory: "/")
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 1ec9005458c5..0cefb4f8983a 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -599,11 +599,11 @@ define void @ushl_sat_test(i32 %0, i32 %1, <8 x i32> %2, <8 x i32> %3) {
; CHECK-LABEL: llvm.func @va_intrinsics_test
define void @va_intrinsics_test(ptr %0, ptr %1) {
; CHECK: llvm.intr.vastart %{{.*}}
- call void @llvm.va_start(ptr %0)
+ call void @llvm.va_start.p0(ptr %0)
; CHECK: llvm.intr.vacopy %{{.*}} to %{{.*}}
- call void @llvm.va_copy(ptr %1, ptr %0)
+ call void @llvm.va_copy.p0(ptr %1, ptr %0)
; CHECK: llvm.intr.vaend %{{.*}}
- call void @llvm.va_end(ptr %0)
+ call void @llvm.va_end.p0(ptr %0)
ret void
}
@@ -894,6 +894,23 @@ define float @ssa_copy(float %0) {
ret float %2
}
+; CHECK-LABEL: experimental_constrained_fptrunc
+define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) {
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} towardzero ignore : f64 to f32
+ %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %s, metadata !"round.towardzero", metadata !"fpexcept.ignore")
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearest maytrap : f64 to f32
+ %2 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %s, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} upward strict : f64 to f32
+ %3 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %s, metadata !"round.upward", metadata !"fpexcept.strict")
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} downward ignore : f64 to f32
+ %4 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %s, metadata !"round.downward", metadata !"fpexcept.ignore")
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearestaway ignore : f64 to f32
+ %5 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %s, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+ ; CHECK: llvm.intr.experimental.constrained.fptrunc %{{.*}} tonearestaway ignore : vector<4xf64> to vector<4xf16>
+ %6 = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double> %v, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+ ret void
+}
+
declare float @llvm.fmuladd.f32(float, float, float)
declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare float @llvm.fma.f32(float, float, float)
@@ -1059,9 +1076,9 @@ declare ptr @llvm.stacksave.p0()
declare ptr addrspace(1) @llvm.stacksave.p1()
declare void @llvm.stackrestore.p0(ptr)
declare void @llvm.stackrestore.p1(ptr addrspace(1))
-declare void @llvm.va_start(ptr)
-declare void @llvm.va_copy(ptr, ptr)
-declare void @llvm.va_end(ptr)
+declare void @llvm.va_start.p0(ptr)
+declare void @llvm.va_copy.p0(ptr, ptr)
+declare void @llvm.va_end.p0(ptr)
declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
@@ -1120,3 +1137,5 @@ declare void @llvm.assume(i1)
declare float @llvm.ssa.copy.f32(float returned)
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float>, i64)
+declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
+declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
diff --git a/mlir/test/Target/LLVMIR/data-layout.mlir b/mlir/test/Target/LLVMIR/data-layout.mlir
index e61972a0dd97..881d6727e2a1 100644
--- a/mlir/test/Target/LLVMIR/data-layout.mlir
+++ b/mlir/test/Target/LLVMIR/data-layout.mlir
@@ -6,7 +6,7 @@
// CHECK: S128-
// CHECK: i64:64:128
// CHECK: f80:128:256
-// CHECK: p0:32:64:128
+// CHECK: p0:32:64:128:32
// CHECK: p1:32:32:32:16
module attributes {dlti.dl_spec = #dlti.dl_spec<
#dlti.dl_entry<"dlti.endianness", "big">,
diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
index c34f9187d4df..785a525caab8 100644
--- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
@@ -172,14 +172,14 @@ llvm.func @empty_types() {
#di_file = #llvm.di_file<"foo.mlir" in "/test/">
#di_subprogram = #llvm.di_subprogram<
- scope = #di_file, name = "func_decl_with_subprogram", file = #di_file, subprogramFlags = "Optimized"
+ scope = #di_file, name = "func_decl_with_subprogram", file = #di_file
>
// CHECK-LABEL: declare !dbg
// CHECK-SAME: ![[SUBPROGRAM:.*]] i32 @func_decl_with_subprogram(
llvm.func @func_decl_with_subprogram() -> (i32) loc(fused<#di_subprogram>["foo.mlir":2:1])
-// CHECK: ![[SUBPROGRAM]] = !DISubprogram(name: "func_decl_with_subprogram", scope: ![[FILE:.*]], file: ![[FILE]], spFlags: DISPFlagOptimized)
+// CHECK: ![[SUBPROGRAM]] = !DISubprogram(name: "func_decl_with_subprogram", scope: ![[FILE:.*]], file: ![[FILE]], spFlags: 0)
// CHECK: ![[FILE]] = !DIFile(filename: "foo.mlir", directory: "/test/")
// -----
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index fc2e0fd201a7..0013522582a7 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -964,6 +964,35 @@ llvm.func @ssa_copy(%arg: f32) -> f32 {
llvm.return %0 : f32
}
+// CHECK-LABEL: @experimental_constrained_fptrunc
+llvm.func @experimental_constrained_fptrunc(%s: f64, %v: vector<4xf32>) {
+ // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(
+ // CHECK: metadata !"round.towardzero"
+ // CHECK: metadata !"fpexcept.ignore"
+ %0 = llvm.intr.experimental.constrained.fptrunc %s towardzero ignore : f64 to f32
+ // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(
+ // CHECK: metadata !"round.tonearest"
+ // CHECK: metadata !"fpexcept.maytrap"
+ %1 = llvm.intr.experimental.constrained.fptrunc %s tonearest maytrap : f64 to f32
+ // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(
+ // CHECK: metadata !"round.upward"
+ // CHECK: metadata !"fpexcept.strict"
+ %2 = llvm.intr.experimental.constrained.fptrunc %s upward strict : f64 to f32
+ // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(
+ // CHECK: metadata !"round.downward"
+ // CHECK: metadata !"fpexcept.ignore"
+ %3 = llvm.intr.experimental.constrained.fptrunc %s downward ignore : f64 to f32
+ // CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(
+ // CHECK: metadata !"round.tonearestaway"
+ // CHECK: metadata !"fpexcept.ignore"
+ %4 = llvm.intr.experimental.constrained.fptrunc %s tonearestaway ignore : f64 to f32
+ // CHECK: call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(
+ // CHECK: metadata !"round.upward"
+ // CHECK: metadata !"fpexcept.strict"
+ %5 = llvm.intr.experimental.constrained.fptrunc %v upward strict : vector<4xf32> to vector<4xf16>
+ llvm.return
+}
+
// Check that intrinsics are declared with appropriate types.
// CHECK-DAG: declare float @llvm.fma.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
@@ -1126,3 +1155,5 @@ llvm.func @ssa_copy(%arg: f32) -> f32 {
// CHECK-DAG: declare ptr addrspace(1) @llvm.stacksave.p1()
// CHECK-DAG: declare void @llvm.stackrestore.p0(ptr)
// CHECK-DAG: declare void @llvm.stackrestore.p1(ptr addrspace(1))
+// CHECK-DAG: declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+// CHECK-DAG: declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata)
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index c38c7ea587d2..97f37939551d 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -2251,14 +2251,14 @@ llvm.func @vararg_function(%arg0: i32, ...) {
%1 = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCA0:.+]] = alloca %struct.va_list, align 8
%2 = llvm.alloca %1 x !llvm.struct<"struct.va_list", (ptr)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
- // CHECK: call void @llvm.va_start(ptr %[[ALLOCA0]])
+ // CHECK: call void @llvm.va_start.p0(ptr %[[ALLOCA0]])
llvm.intr.vastart %2 : !llvm.ptr
// CHECK: %[[ALLOCA1:.+]] = alloca ptr, align 8
%4 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i32) -> !llvm.ptr
- // CHECK: call void @llvm.va_copy(ptr %[[ALLOCA1]], ptr %[[ALLOCA0]])
+ // CHECK: call void @llvm.va_copy.p0(ptr %[[ALLOCA1]], ptr %[[ALLOCA0]])
llvm.intr.vacopy %2 to %4 : !llvm.ptr, !llvm.ptr
- // CHECK: call void @llvm.va_end(ptr %[[ALLOCA1]])
- // CHECK: call void @llvm.va_end(ptr %[[ALLOCA0]])
+ // CHECK: call void @llvm.va_end.p0(ptr %[[ALLOCA1]])
+ // CHECK: call void @llvm.va_end.p0(ptr %[[ALLOCA0]])
llvm.intr.vaend %4 : !llvm.ptr
llvm.intr.vaend %2 : !llvm.ptr
// CHECK: ret void
diff --git a/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir b/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
index e8c388627a0a..7cb22dbb10b1 100644
--- a/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-fortran-allocatable-types-host.mlir
@@ -26,7 +26,7 @@ module attributes {omp.is_target_device = false} {
%14 = llvm.sub %11, %2 : i64
%15 = omp.map.bounds lower_bound(%7 : i64) upper_bound(%14 : i64) extent(%11 : i64) stride(%13 : i64) start_idx(%9 : i64) {stride_in_bytes = true}
%16 = llvm.getelementptr %3[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
- %17 = omp.map.info var_ptr(%16 : !llvm.ptr, f32) map_clauses(tofrom) capture(ByRef) bounds(%15) -> !llvm.ptr {name = "full_arr"}
+ %17 = omp.map.info var_ptr(%3 : !llvm.ptr, f32) var_ptr_ptr(%16 : !llvm.ptr) map_clauses(tofrom) capture(ByRef) bounds(%15) -> !llvm.ptr {name = "full_arr"}
%18 = omp.map.info var_ptr(%3 : !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>) map_clauses(tofrom) capture(ByRef) members(%17 : !llvm.ptr) -> !llvm.ptr {name = "full_arr"}
%19 = llvm.getelementptr %6[0, 7, %7, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
%20 = llvm.load %19 : !llvm.ptr -> i64
@@ -81,20 +81,19 @@ module attributes {omp.is_target_device = false} {
// CHECK: %[[ARR_SECT_SIZE2:.*]] = add i64 %[[ARR_SECT_SIZE3]], 1
// CHECK: %[[ARR_SECT_SIZE1:.*]] = mul i64 1, %[[ARR_SECT_SIZE2]]
// CHECK: %[[ARR_SECT_SIZE:.*]] = mul i64 %[[ARR_SECT_SIZE1]], 4
-// CHECK: %[[FULL_ARR_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEfull_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEfull_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
// CHECK: %[[LFULL_ARR:.*]] = load ptr, ptr @_QFEfull_arr, align 8
// CHECK: %[[FULL_ARR_PTR:.*]] = getelementptr inbounds float, ptr %[[LFULL_ARR]], i64 0
-// CHECK: %[[ARR_SECT_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEsect_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEsect_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
// CHECK: %[[ARR_SECT_OFFSET1:.*]] = mul i64 %[[ARR_SECT_OFFSET2]], 1
// CHECK: %[[LARR_SECT:.*]] = load ptr, ptr @_QFEsect_arr, align 8
// CHECK: %[[ARR_SECT_PTR:.*]] = getelementptr inbounds i32, ptr %[[LARR_SECT]], i64 %[[ARR_SECT_OFFSET1]]
+// CHECK: %[[SCALAR_PTR_LOAD:.*]] = load ptr, ptr %[[SCALAR_BASE]], align 8
+// CHECK: %[[FULL_ARR_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEfull_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEfull_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
+// CHECK: %[[ARR_SECT_DESC_SIZE:.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr getelementptr inbounds ({ ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr @_QFEsect_arr, i32 1) to i64), i64 ptrtoint (ptr @_QFEsect_arr to i64)), ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
// CHECK: %[[SCALAR_DESC_SZ4:.*]] = getelementptr { ptr, i64, i32, i8, i8, i8, i8 }, ptr %[[SCALAR_ALLOCA]], i32 1
// CHECK: %[[SCALAR_DESC_SZ3:.*]] = ptrtoint ptr %[[SCALAR_DESC_SZ4]] to i64
// CHECK: %[[SCALAR_DESC_SZ2:.*]] = ptrtoint ptr %[[SCALAR_ALLOCA]] to i64
// CHECK: %[[SCALAR_DESC_SZ1:.*]] = sub i64 %[[SCALAR_DESC_SZ3]], %[[SCALAR_DESC_SZ2]]
// CHECK: %[[SCALAR_DESC_SZ:.*]] = sdiv exact i64 %[[SCALAR_DESC_SZ1]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64)
-// CHECK: %[[SCALAR_PTR_LOAD:.*]] = load ptr, ptr %[[SCALAR_BASE]], align 8
-// CHECK: %[[SCALAR_PTR:.*]] = getelementptr inbounds float, ptr %[[SCALAR_PTR_LOAD]], i64 0
// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 0
// CHECK: store ptr @_QFEfull_arr, ptr %[[OFFLOADBASEPTRS]], align 8
@@ -145,4 +144,4 @@ module attributes {omp.is_target_device = false} {
// CHECK: %[[OFFLOADBASEPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_baseptrs, i32 0, i32 8
// CHECK: store ptr %[[SCALAR_BASE]], ptr %[[OFFLOADBASEPTRS]], align 8
// CHECK: %[[OFFLOADPTRS:.*]] = getelementptr inbounds [9 x ptr], ptr %.offload_ptrs, i32 0, i32 8
-// CHECK: store ptr %[[SCALAR_PTR]], ptr %[[OFFLOADPTRS]], align 8
+// CHECK: store ptr %[[SCALAR_PTR_LOAD]], ptr %[[OFFLOADPTRS]], align 8
diff --git a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
index 4b1d5d58f14e..2f629675442d 100644
--- a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
@@ -66,16 +66,17 @@ llvm.func @_QPopenmp_target_data_region(%0 : !llvm.ptr) {
// CHECK: %[[VAL_2:.*]] = alloca [1 x ptr], align 8
// CHECK: br label %[[VAL_3:.*]]
// CHECK: entry: ; preds = %[[VAL_4:.*]]
+// CHECK: %[[ARR_OFFSET:.*]] = getelementptr inbounds [1024 x i32], ptr %[[ARR_DATA:.*]], i64 0, i64 0
// CHECK: %[[VAL_5:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0
-// CHECK: store ptr %[[VAL_6:.*]], ptr %[[VAL_5]], align 8
+// CHECK: store ptr %[[ARR_DATA]], ptr %[[VAL_5]], align 8
// CHECK: %[[VAL_7:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0
-// CHECK: store ptr %[[VAL_6]], ptr %[[VAL_7]], align 8
+// CHECK: store ptr %[[ARR_OFFSET]], ptr %[[VAL_7]], align 8
// CHECK: %[[VAL_8:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_2]], i64 0, i64 0
// CHECK: store ptr null, ptr %[[VAL_8]], align 8
// CHECK: %[[VAL_9:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0
// CHECK: %[[VAL_10:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0
// CHECK: call void @__tgt_target_data_begin_mapper(ptr @2, i64 -1, i32 1, ptr %[[VAL_9]], ptr %[[VAL_10]], ptr @.offload_sizes, ptr @.offload_maptypes, ptr @.offload_mapnames, ptr null)
-// CHECK: %[[VAL_11:.*]] = getelementptr [1024 x i32], ptr %[[VAL_6]], i32 0, i64 0
+// CHECK: %[[VAL_11:.*]] = getelementptr [1024 x i32], ptr %[[ARR_DATA]], i32 0, i64 0
// CHECK: store i32 99, ptr %[[VAL_11]], align 4
// CHECK: %[[VAL_12:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_0]], i32 0, i32 0
// CHECK: %[[VAL_13:.*]] = getelementptr inbounds [1 x ptr], ptr %[[VAL_1]], i32 0, i32 0
@@ -153,16 +154,18 @@ llvm.func @_QPomp_target_enter_exit(%1 : !llvm.ptr, %3 : !llvm.ptr) {
// CHECK: entry: ; preds = %[[VAL_12:.*]]
// CHECK: br i1 %[[VAL_9]], label %[[VAL_13:.*]], label %[[VAL_14:.*]]
// CHECK: omp_if.then: ; preds = %[[VAL_11]]
+// CHECK: %[[ARR_OFFSET1:.*]] = getelementptr inbounds [1024 x i32], ptr %[[VAL_16:.*]], i64 0, i64 0
+// CHECK: %[[ARR_OFFSET2:.*]] = getelementptr inbounds [512 x i32], ptr %[[VAL_20:.*]], i64 0, i64 0
// CHECK: %[[VAL_15:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_3]], i32 0, i32 0
// CHECK: store ptr %[[VAL_16:.*]], ptr %[[VAL_15]], align 8
// CHECK: %[[VAL_17:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_4]], i32 0, i32 0
-// CHECK: store ptr %[[VAL_16]], ptr %[[VAL_17]], align 8
+// CHECK: store ptr %[[ARR_OFFSET1]], ptr %[[VAL_17]], align 8
// CHECK: %[[VAL_18:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_5]], i64 0, i64 0
// CHECK: store ptr null, ptr %[[VAL_18]], align 8
// CHECK: %[[VAL_19:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_3]], i32 0, i32 1
// CHECK: store ptr %[[VAL_20:.*]], ptr %[[VAL_19]], align 8
// CHECK: %[[VAL_21:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_4]], i32 0, i32 1
-// CHECK: store ptr %[[VAL_20]], ptr %[[VAL_21]], align 8
+// CHECK: store ptr %[[ARR_OFFSET2]], ptr %[[VAL_21]], align 8
// CHECK: %[[VAL_22:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_5]], i64 0, i64 1
// CHECK: store ptr null, ptr %[[VAL_22]], align 8
// CHECK: %[[VAL_23:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_3]], i32 0, i32 0
@@ -176,26 +179,28 @@ llvm.func @_QPomp_target_enter_exit(%1 : !llvm.ptr, %3 : !llvm.ptr) {
// CHECK: %[[VAL_27:.*]] = icmp sgt i32 %[[VAL_26]], 10
// CHECK: %[[VAL_28:.*]] = load i32, ptr %[[VAL_6]], align 4
// CHECK: br i1 %[[VAL_27]], label %[[VAL_29:.*]], label %[[VAL_30:.*]]
-// CHECK: omp_if.then1: ; preds = %[[VAL_25]]
+// CHECK: omp_if.then2: ; preds = %[[VAL_25]]
+// CHECK: %[[ARR_OFFSET3:.*]] = getelementptr inbounds [1024 x i32], ptr %[[VAL_16]], i64 0, i64 0
+// CHECK: %[[ARR_OFFSET4:.*]] = getelementptr inbounds [512 x i32], ptr %[[VAL_20]], i64 0, i64 0
// CHECK: %[[VAL_31:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_0]], i32 0, i32 0
// CHECK: store ptr %[[VAL_16]], ptr %[[VAL_31]], align 8
// CHECK: %[[VAL_32:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_1]], i32 0, i32 0
-// CHECK: store ptr %[[VAL_16]], ptr %[[VAL_32]], align 8
+// CHECK: store ptr %[[ARR_OFFSET3]], ptr %[[VAL_32]], align 8
// CHECK: %[[VAL_33:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_2]], i64 0, i64 0
// CHECK: store ptr null, ptr %[[VAL_33]], align 8
// CHECK: %[[VAL_34:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_0]], i32 0, i32 1
// CHECK: store ptr %[[VAL_20]], ptr %[[VAL_34]], align 8
// CHECK: %[[VAL_35:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_1]], i32 0, i32 1
-// CHECK: store ptr %[[VAL_20]], ptr %[[VAL_35]], align 8
+// CHECK: store ptr %[[ARR_OFFSET4]], ptr %[[VAL_35]], align 8
// CHECK: %[[VAL_36:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_2]], i64 0, i64 1
// CHECK: store ptr null, ptr %[[VAL_36]], align 8
// CHECK: %[[VAL_37:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_0]], i32 0, i32 0
// CHECK: %[[VAL_38:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_1]], i32 0, i32 0
// CHECK: call void @__tgt_target_data_end_mapper(ptr @3, i64 -1, i32 2, ptr %[[VAL_37]], ptr %[[VAL_38]], ptr @.offload_sizes.1, ptr @.offload_maptypes.2, ptr @.offload_mapnames.3, ptr null)
// CHECK: br label %[[VAL_39:.*]]
-// CHECK: omp_if.else5: ; preds = %[[VAL_25]]
+// CHECK: omp_if.else8: ; preds = %[[VAL_25]]
// CHECK: br label %[[VAL_39]]
-// CHECK: omp_if.end6: ; preds = %[[VAL_30]], %[[VAL_29]]
+// CHECK: omp_if.end9: ; preds = %[[VAL_30]], %[[VAL_29]]
// CHECK: ret void
// -----
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index 2cf86b50d432..d2c2c12d3238 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -989,6 +989,15 @@ func.func @tensor_arith.floordivsi_by_one(%arg0: tensor<4x5xi32>) -> tensor<4x5x
return %res : tensor<4x5xi32>
}
+// CHECK-LABEL: func @arith.floordivsi_by_one_overflow
+func.func @arith.floordivsi_by_one_overflow() -> i64 {
+ %neg_one = arith.constant -1 : i64
+ %min_int = arith.constant -9223372036854775808 : i64
+ // CHECK: arith.floordivsi
+ %poision = arith.floordivsi %min_int, %neg_one : i64
+ return %poision : i64
+}
+
// -----
// CHECK-LABEL: func @arith.ceildivsi_by_one
diff --git a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
index 39671a930f2e..5e160b720db6 100644
--- a/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
+++ b/mlir/test/lib/Dialect/Affine/TestReifyValueBounds.cpp
@@ -13,6 +13,7 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/Vector/IR/ScalableValueBoundsConstraintSet.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/ValueBoundsOpInterface.h"
#include "mlir/Pass/Pass.h"
@@ -75,7 +76,8 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
WalkResult result = funcOp.walk([&](Operation *op) {
// Look for test.reify_bound ops.
if (op->getName().getStringRef() == "test.reify_bound" ||
- op->getName().getStringRef() == "test.reify_constant_bound") {
+ op->getName().getStringRef() == "test.reify_constant_bound" ||
+ op->getName().getStringRef() == "test.reify_scalable_bound") {
if (op->getNumOperands() != 1 || op->getNumResults() != 1 ||
!op->getResultTypes()[0].isIndex()) {
op->emitOpError("invalid op");
@@ -110,6 +112,9 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
bool constant =
op->getName().getStringRef() == "test.reify_constant_bound";
+ bool scalable = !constant && op->getName().getStringRef() ==
+ "test.reify_scalable_bound";
+
// Prepare stop condition. By default, reify in terms of the op's
// operands. No stop condition is used when a constant was requested.
std::function<bool(Value, std::optional<int64_t>)> stopCondition =
@@ -137,6 +142,37 @@ static LogicalResult testReifyValueBounds(func::FuncOp funcOp,
if (succeeded(reifiedConst))
reified =
FailureOr<OpFoldResult>(rewriter.getIndexAttr(*reifiedConst));
+ } else if (scalable) {
+ unsigned vscaleMin = 0;
+ unsigned vscaleMax = 0;
+ if (auto attr = "vscale_min"; op->hasAttrOfType<IntegerAttr>(attr)) {
+ vscaleMin = unsigned(op->getAttrOfType<IntegerAttr>(attr).getInt());
+ } else {
+ op->emitOpError("expected `vscale_min` to be provided");
+ return WalkResult::skip();
+ }
+ if (auto attr = "vscale_max"; op->hasAttrOfType<IntegerAttr>(attr)) {
+ vscaleMax = unsigned(op->getAttrOfType<IntegerAttr>(attr).getInt());
+ } else {
+ op->emitOpError("expected `vscale_max` to be provided");
+ return WalkResult::skip();
+ }
+
+ auto loc = op->getLoc();
+ auto reifiedScalable =
+ vector::ScalableValueBoundsConstraintSet::computeScalableBound(
+ value, dim, vscaleMin, vscaleMax, *boundType);
+ if (succeeded(reifiedScalable)) {
+ SmallVector<std::pair<Value, std::optional<int64_t>>, 1>
+ vscaleOperand;
+ if (reifiedScalable->map.getNumInputs() == 1) {
+ // The only possible input to the bound is vscale.
+ vscaleOperand.push_back(std::make_pair(
+ rewriter.create<vector::VectorScaleOp>(loc), std::nullopt));
+ }
+ reified = affine::materializeComputedBound(
+ rewriter, loc, reifiedScalable->map, vscaleOperand);
+ }
} else {
if (dim) {
if (useArithOps) {
diff --git a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
index 740562e77830..3da48ffa403e 100644
--- a/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
+++ b/mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
@@ -36,19 +36,21 @@ struct TestDataLayoutQuery
return;
const DataLayout &layout = layouts.getAbove(op);
- unsigned size = layout.getTypeSize(op.getType());
- unsigned bitsize = layout.getTypeSizeInBits(op.getType());
- unsigned alignment = layout.getTypeABIAlignment(op.getType());
- unsigned preferred = layout.getTypePreferredAlignment(op.getType());
+ llvm::TypeSize size = layout.getTypeSize(op.getType());
+ llvm::TypeSize bitsize = layout.getTypeSizeInBits(op.getType());
+ uint64_t alignment = layout.getTypeABIAlignment(op.getType());
+ uint64_t preferred = layout.getTypePreferredAlignment(op.getType());
+ uint64_t index = layout.getTypeIndexBitwidth(op.getType()).value_or(0);
Attribute allocaMemorySpace = layout.getAllocaMemorySpace();
Attribute programMemorySpace = layout.getProgramMemorySpace();
Attribute globalMemorySpace = layout.getGlobalMemorySpace();
- unsigned stackAlignment = layout.getStackAlignment();
+ uint64_t stackAlignment = layout.getStackAlignment();
op->setAttrs(
{builder.getNamedAttr("size", builder.getIndexAttr(size)),
builder.getNamedAttr("bitsize", builder.getIndexAttr(bitsize)),
builder.getNamedAttr("alignment", builder.getIndexAttr(alignment)),
builder.getNamedAttr("preferred", builder.getIndexAttr(preferred)),
+ builder.getNamedAttr("index", builder.getIndexAttr(index)),
builder.getNamedAttr("alloca_memory_space",
allocaMemorySpace == Attribute()
? builder.getUI32IntegerAttr(0)
diff --git a/mlir/test/lib/Dialect/Test/TestTypeDefs.td b/mlir/test/lib/Dialect/Test/TestTypeDefs.td
index 1957845c842f..492642b711e0 100644
--- a/mlir/test/lib/Dialect/Test/TestTypeDefs.td
+++ b/mlir/test/lib/Dialect/Test/TestTypeDefs.td
@@ -148,7 +148,8 @@ def TestType : Test_Type<"Test", [
}
def TestTypeWithLayoutType : Test_Type<"TestTypeWithLayout", [
- DeclareTypeInterfaceMethods<DataLayoutTypeInterface, ["areCompatible"]>
+ DeclareTypeInterfaceMethods<DataLayoutTypeInterface, ["getIndexBitwidth",
+ "areCompatible"]>
]> {
let mnemonic = "test_type_with_layout";
let parameters = (ins "unsigned":$key);
diff --git a/mlir/test/lib/Dialect/Test/TestTypes.cpp b/mlir/test/lib/Dialect/Test/TestTypes.cpp
index 2f4c9b689069..7a195eb25a3b 100644
--- a/mlir/test/lib/Dialect/Test/TestTypes.cpp
+++ b/mlir/test/lib/Dialect/Test/TestTypes.cpp
@@ -276,6 +276,12 @@ uint64_t TestTypeWithLayoutType::getPreferredAlignment(
return extractKind(params, "preferred");
}
+std::optional<uint64_t>
+TestTypeWithLayoutType::getIndexBitwidth(const DataLayout &dataLayout,
+ DataLayoutEntryListRef params) const {
+ return extractKind(params, "index");
+}
+
bool TestTypeWithLayoutType::areCompatible(
DataLayoutEntryListRef oldLayout, DataLayoutEntryListRef newLayout) const {
unsigned old = extractKind(oldLayout, "alignment");
@@ -297,7 +303,7 @@ TestTypeWithLayoutType::verifyEntries(DataLayoutEntryListRef params,
(void)kind;
assert(kind &&
(kind.getValue() == "size" || kind.getValue() == "alignment" ||
- kind.getValue() == "preferred") &&
+ kind.getValue() == "preferred" || kind.getValue() == "index") &&
"unexpected kind");
assert(llvm::isa<IntegerAttr>(array.getValue().back()));
}
diff --git a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.h b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.h
index ddc38b993564..60dc959b0050 100644
--- a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.h
+++ b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.h
@@ -16,8 +16,8 @@
#include "mlir/Bytecode/BytecodeOpInterface.h"
#include "mlir/Dialect/PDL/IR/PDLTypes.h"
-#include "mlir/Dialect/Transform/IR/MatchInterfaces.h"
#include "mlir/Dialect/Transform/IR/TransformTypes.h"
+#include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.h"
#include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h"
#include "mlir/IR/OpImplementation.h"
diff --git a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td
index 75134b25882f..4f2cf34f7d33 100644
--- a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td
+++ b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.td
@@ -17,7 +17,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/AttrTypeBase.td"
include "mlir/IR/OpBase.td"
-include "mlir/Dialect/Transform/IR/MatchInterfaces.td"
+include "mlir/Dialect/Transform/Interfaces/MatchInterfaces.td"
include "mlir/Dialect/Transform/IR/TransformDialect.td"
include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td"
include "mlir/Dialect/PDL/IR/PDLTypes.td"
diff --git a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
index f14fb18706d1..006225999105 100644
--- a/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
+++ b/mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
@@ -489,7 +489,9 @@ struct TestFlattenVectorTransferPatterns
Option<unsigned> targetVectorBitwidth{
*this, "target-vector-bitwidth",
llvm::cl::desc(
- "Minimum vector bitwidth to enable the flattening transformation"),
+ "Minimum vector bitwidth to enable the flattening transformation. "
+ "For scalable vectors this is the base size, i.e. the size "
+ "corresponding to vscale=1."),
llvm::cl::init(std::numeric_limits<unsigned>::max())};
void runOnOperation() override {
diff --git a/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir b/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
index e2229a392bbf..340ef30bf59c 100644
--- a/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
+++ b/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
@@ -190,6 +190,12 @@ func.func @func_powff64(%a : f64, %b : f64) {
return
}
+func.func @func_powff32(%a : f32, %b : f32) {
+ %r = math.powf %a, %b : f32
+ vector.print %r : f32
+ return
+}
+
func.func @powf() {
// CHECK-NEXT: 16
%a = arith.constant 4.0 : f64
@@ -230,7 +236,17 @@ func.func @powf() {
%j = arith.constant 29385.0 : f64
%j_p = arith.constant 23598.0 : f64
call @func_powff64(%j, %j_p) : (f64, f64) -> ()
- return
+
+ // CHECK-NEXT: -nan
+ %k = arith.constant 1.0 : f64
+ %k_p = arith.constant 0xfff0000001000000 : f64
+ call @func_powff64(%k, %k_p) : (f64, f64) -> ()
+
+ // CHECK-NEXT: -nan
+ %l = arith.constant 1.0 : f32
+ %l_p = arith.constant 0xffffffff : f32
+ call @func_powff32(%l, %l_p) : (f32, f32) -> ()
+ return
}
// -------------------------------------------------------------------------- //
diff --git a/mlir/test/mlir-opt/split-markers.mlir b/mlir/test/mlir-opt/split-markers.mlir
index 665a37f31770..f372654bcc8d 100644
--- a/mlir/test/mlir-opt/split-markers.mlir
+++ b/mlir/test/mlir-opt/split-markers.mlir
@@ -1,12 +1,17 @@
// Check near-miss mechanics:
// RUN: mlir-opt --split-input-file --verify-diagnostics %s 2> %t \
-// RUN: && FileCheck --input-file %t %s
+// RUN: && FileCheck --input-file %t --check-prefix=CHECK-DEFAULT %s
// RUN: cat %t
// Check that (1) custom input splitter and (2) custom output splitters work.
-// RUN: mlir-opt %s -split-input-file="// CHECK: ""----" \
+// RUN: mlir-opt %s -split-input-file="// CHECK-DEFAULT: ""----" \
// RUN: -output-split-marker="// ---- next split ----" \
-// RUN: | FileCheck --check-prefix=CHECK-SPLITTERS %s
+// RUN: | FileCheck --check-prefix=CHECK-CUSTOM %s
+
+// Check that (3) the input is not split if `-split-input-file` is not given.
+// RUN: mlir-opt %s 2> %t \
+// RUN: || FileCheck --input-file %t --check-prefix=CHECK-NOSPLIT %s
+// RUN: cat %t
func.func @main() {return}
@@ -14,22 +19,25 @@ func.func @main() {return}
// expected-note @+1 {{see existing symbol definition here}}
func.func @foo() { return }
-// CHECK: warning: near miss with file split marker
-// CHECK: ----
+// CHECK-DEFAULT: warning: near miss with file split marker
+// CHECK-DEFAULT: ----
// ----
+// CHECK-NOSPLIT: error: redefinition of symbol named 'main'
+func.func @main() {return}
+
// expected-error @+1 {{redefinition of symbol named 'foo'}}
func.func @foo() { return }
-// CHECK: warning: near miss with file split marker
-// CHECK: ----
+// CHECK-DEFAULT: warning: near miss with file split marker
+// CHECK-DEFAULT: ----
// ----
func.func @bar2() {return }
// No error flagged at the end for a near miss.
// ----
-// CHECK-SPLITTERS: module
-// CHECK-SPLITTERS: ---- next split ----
-// CHECK-SPLITTERS: module
-// CHECK-SPLITTERS: ---- next split ----
-// CHECK-SPLITTERS: module
+// CHECK-CUSTOM: module
+// CHECK-CUSTOM: ---- next split ----
+// CHECK-CUSTOM: module
+// CHECK-CUSTOM: ---- next split ----
+// CHECK-CUSTOM: module
diff --git a/mlir/test/mlir-pdll/split-markers.pdll b/mlir/test/mlir-pdll/split-markers.pdll
index 45e409a83836..2b314538004f 100644
--- a/mlir/test/mlir-pdll/split-markers.pdll
+++ b/mlir/test/mlir-pdll/split-markers.pdll
@@ -9,6 +9,10 @@
// RUN: -split-input-file="// ""=====" -output-split-marker "// #####" \
// RUN: | FileCheck -check-prefix=CHECK-CUSTOM %s
+// Check that (5) the input is not split if `-split-input-file` is not given.
+// RUN: mlir-pdll %s \
+// RUN: | FileCheck -check-prefix=CHECK-NOSPLIT %s
+
// CHECK-DEFAULT: Module
// CHECK-DEFAULT-NEXT: PatternDecl
// CHECK-DEFAULT-NOT: PatternDecl
@@ -25,6 +29,14 @@
// CHECK-CUSTOM-NEXT: PatternDecl
// CHECK-CUSTOM-NOT: PatternDecl
+// CHECK-NOSPLIT: Module
+// CHECK-NOSPLIT-NEXT: PatternDecl
+// CHECK-NOSPLIT-NOT: Module
+// CHECK-NOSPLIT: PatternDecl
+// CHECK-NOSPLIT-NOT: Module
+// CHECK-NOSPLIT: PatternDecl
+// CHECK-NOSPLIT-NOT: Module
+
Pattern => erase op<test.op>;
// -----
diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
index 23bc9b00dc90..2c7acec3b1b8 100644
--- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
+++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp
@@ -272,6 +272,10 @@ static LogicalResult emitOneMLIRBuilder(const Record &record, raw_ostream &os,
bs << "moduleImport.matchLocalVariableAttr";
} else if (name == "_label_attr") {
bs << "moduleImport.matchLabelAttr";
+ } else if (name == "_fpExceptionBehavior_attr") {
+ bs << "moduleImport.matchFPExceptionBehaviorAttr";
+ } else if (name == "_roundingMode_attr") {
+ bs << "moduleImport.matchRoundingModeAttr";
} else if (name == "_resultType") {
bs << "moduleImport.convertType(inst->getType())";
} else if (name == "_location") {
diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index 0d81912afb61..3a697520dfad 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -3057,7 +3057,7 @@ void OpEmitter::genCodeForAddingArgAndRegionForBuilder(
body << llvm::formatv(
"static_cast<int32_t>(std::accumulate({0}.begin(), {0}.end(), 0, "
"[](int32_t curSum, ::mlir::ValueRange range) {{ return curSum + "
- "range.size(); }))",
+ "static_cast<int32_t>(range.size()); }))",
operandName);
} else {
body << "static_cast<int32_t>(" << getArgumentName(op, i) << ".size())";
diff --git a/mlir/unittests/IR/InterfaceAttachmentTest.cpp b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
index 16de34c45ec6..58049a9969e3 100644
--- a/mlir/unittests/IR/InterfaceAttachmentTest.cpp
+++ b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
@@ -431,8 +431,8 @@ TEST(InterfaceAttachmentTest, PromisedInterfaces) {
attr.hasPromiseOrImplementsInterface<TestExternalAttrInterface>());
// Add a promise `TestExternalAttrInterface`.
- testDialect->declarePromisedInterface<test::SimpleAAttr,
- TestExternalAttrInterface>();
+ testDialect->declarePromisedInterface<TestExternalAttrInterface,
+ test::SimpleAAttr>();
EXPECT_TRUE(
attr.hasPromiseOrImplementsInterface<TestExternalAttrInterface>());
diff --git a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
index 794e19710fad..d6b8d7392f32 100644
--- a/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
+++ b/mlir/unittests/Interfaces/DataLayoutInterfacesTest.cpp
@@ -345,6 +345,8 @@ TEST(DataLayout, NullSpec) {
EXPECT_EQ(layout.getTypeABIAlignment(Float16Type::get(&ctx)), 16u);
EXPECT_EQ(layout.getTypePreferredAlignment(IntegerType::get(&ctx, 42)), 128u);
EXPECT_EQ(layout.getTypePreferredAlignment(Float16Type::get(&ctx)), 32u);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(Float16Type::get(&ctx)), std::nullopt);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(IndexType::get(&ctx)), 64u);
EXPECT_EQ(layout.getAllocaMemorySpace(), Attribute());
EXPECT_EQ(layout.getProgramMemorySpace(), Attribute());
@@ -373,6 +375,8 @@ TEST(DataLayout, EmptySpec) {
EXPECT_EQ(layout.getTypeABIAlignment(Float16Type::get(&ctx)), 16u);
EXPECT_EQ(layout.getTypePreferredAlignment(IntegerType::get(&ctx, 42)), 128u);
EXPECT_EQ(layout.getTypePreferredAlignment(Float16Type::get(&ctx)), 32u);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(Float16Type::get(&ctx)), std::nullopt);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(IndexType::get(&ctx)), 64u);
EXPECT_EQ(layout.getAllocaMemorySpace(), Attribute());
EXPECT_EQ(layout.getProgramMemorySpace(), Attribute());
@@ -385,6 +389,7 @@ TEST(DataLayout, SpecWithEntries) {
"dltest.op_with_layout"() { dltest.layout = #dltest.spec<
#dlti.dl_entry<i42, 5>,
#dlti.dl_entry<i16, 6>,
+ #dlti.dl_entry<index, 42>,
#dlti.dl_entry<"dltest.alloca_memory_space", 5 : i32>,
#dlti.dl_entry<"dltest.program_memory_space", 3 : i32>,
#dlti.dl_entry<"dltest.global_memory_space", 2 : i32>,
@@ -408,6 +413,8 @@ TEST(DataLayout, SpecWithEntries) {
EXPECT_EQ(layout.getTypeABIAlignment(Float16Type::get(&ctx)), 8u);
EXPECT_EQ(layout.getTypePreferredAlignment(IntegerType::get(&ctx, 42)), 16u);
EXPECT_EQ(layout.getTypePreferredAlignment(Float16Type::get(&ctx)), 16u);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(Float16Type::get(&ctx)), std::nullopt);
+ EXPECT_EQ(layout.getTypeIndexBitwidth(IndexType::get(&ctx)), 42u);
EXPECT_EQ(layout.getTypeSize(IntegerType::get(&ctx, 32)), 32u);
EXPECT_EQ(layout.getTypeSize(Float32Type::get(&ctx)), 32u);
diff --git a/openmp/libomptarget/CMakeLists.txt b/openmp/libomptarget/CMakeLists.txt
index b382137b70ee..531198fae016 100644
--- a/openmp/libomptarget/CMakeLists.txt
+++ b/openmp/libomptarget/CMakeLists.txt
@@ -145,8 +145,7 @@ add_subdirectory(DeviceRTL)
add_subdirectory(tools)
# Build target agnostic offloading library.
-set(LIBOMPTARGET_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src)
-add_subdirectory(${LIBOMPTARGET_SRC_DIR})
+add_subdirectory(src)
# Add tests.
add_subdirectory(test)
diff --git a/openmp/libomptarget/DeviceRTL/src/Debug.cpp b/openmp/libomptarget/DeviceRTL/src/Debug.cpp
index aecc33c0497a..31cd54e3de35 100644
--- a/openmp/libomptarget/DeviceRTL/src/Debug.cpp
+++ b/openmp/libomptarget/DeviceRTL/src/Debug.cpp
@@ -33,10 +33,10 @@ void __assert_fail(const char *expr, const char *file, unsigned line,
void __assert_fail_internal(const char *expr, const char *msg, const char *file,
unsigned line, const char *function) {
if (msg) {
- PRINTF("%s:%u: %s: Assertion %s (`%s') failed.\n", file, line, function,
+ PRINTF("%s:%u: %s: Assertion %s (`%s`) failed.\n", file, line, function,
msg, expr);
} else {
- PRINTF("%s:%u: %s: Assertion `%s' failed.\n", file, line, function, expr);
+ PRINTF("%s:%u: %s: Assertion `%s` failed.\n", file, line, function, expr);
}
__builtin_trap();
}
diff --git a/openmp/libomptarget/include/PluginManager.h b/openmp/libomptarget/include/PluginManager.h
index 77684285ddf1..eece7525e25e 100644
--- a/openmp/libomptarget/include/PluginManager.h
+++ b/openmp/libomptarget/include/PluginManager.h
@@ -45,24 +45,6 @@ struct PluginAdaptorTy {
static llvm::Expected<std::unique_ptr<PluginAdaptorTy>>
create(const std::string &Name);
- /// Initialize as many devices as possible for this plugin adaptor. Devices
- /// that fail to initialize are ignored.
- void initDevices(PluginManager &PM);
-
- bool isUsed() const { return DeviceOffset >= 0; }
-
- /// Return the number of devices visible to the underlying plugin.
- int32_t getNumberOfPluginDevices() const { return NumberOfPluginDevices; }
-
- /// Return the number of devices successfully initialized and visible to the
- /// user.
- int32_t getNumberOfUserDevices() const { return NumberOfUserDevices; }
-
- /// RTL index, index is the number of devices of other RTLs that were
- /// registered before, i.e. the OpenMP index of the first device to be
- /// registered with this RTL.
- int32_t DeviceOffset = -1;
-
/// Name of the shared object file representing the plugin.
std::string Name;
@@ -76,16 +58,6 @@ struct PluginAdaptorTy {
#include "Shared/PluginAPI.inc"
#undef PLUGIN_API_HANDLE
- llvm::DenseSet<const __tgt_device_image *> UsedImages;
-
-private:
- /// Number of devices the underling plugins sees.
- int32_t NumberOfPluginDevices = -1;
-
- /// Number of devices exposed to the user. This can be less than the number of
- /// devices for the plugin if some failed to initialize.
- int32_t NumberOfUserDevices = 0;
-
/// Create a plugin adaptor for filename \p Name with a dynamic library \p DL.
PluginAdaptorTy(const std::string &Name,
std::unique_ptr<llvm::sys::DynamicLibrary> DL);
@@ -120,6 +92,11 @@ struct PluginManager {
std::make_unique<DeviceImageTy>(TgtBinDesc, TgtDeviceImage));
}
+ /// Initialize as many devices as possible for this plugin adaptor. Devices
+ /// that fail to initialize are ignored. Returns the offset the devices were
+ /// registered at.
+ void initDevices(PluginAdaptorTy &RTL);
+
/// Return the device presented to the user as device \p DeviceNo if it is
/// initialized and ready. Otherwise return an error explaining the problem.
llvm::Expected<DeviceTy &> getDevice(uint32_t DeviceNo);
@@ -169,12 +146,7 @@ struct PluginManager {
return Devices.getExclusiveAccessor();
}
- int getNumUsedPlugins() const {
- int NCI = 0;
- for (auto &P : PluginAdaptors)
- NCI += P->isUsed();
- return NCI;
- }
+ int getNumUsedPlugins() const { return DeviceOffsets.size(); }
// Initialize all plugins.
void initAllPlugins();
@@ -195,6 +167,15 @@ private:
// List of all plugin adaptors, in use or not.
llvm::SmallVector<std::unique_ptr<PluginAdaptorTy>> PluginAdaptors;
+ // Mapping of plugin adaptors to offsets in the device table.
+ llvm::DenseMap<const PluginAdaptorTy *, int32_t> DeviceOffsets;
+
+ // Mapping of plugin adaptors to the number of used devices.
+ llvm::DenseMap<const PluginAdaptorTy *, int32_t> DeviceUsed;
+
+ // Set of all device images currently in use.
+ llvm::DenseSet<const __tgt_device_image *> UsedImages;
+
/// Executable images and information extracted from the input images passed
/// to the runtime.
llvm::SmallVector<std::unique_ptr<DeviceImageTy>> DeviceImages;
diff --git a/openmp/libomptarget/plugins-nextgen/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
index 75540f055844..dbd82ac94517 100644
--- a/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/CMakeLists.txt
@@ -10,7 +10,65 @@
#
##===----------------------------------------------------------------------===##
+# Common interface to handle creating a plugin library.
+set(common_dir ${CMAKE_CURRENT_SOURCE_DIR}/common)
add_subdirectory(common)
+function(add_target_library target_name lib_name)
+ add_llvm_library(${target_name} SHARED
+ LINK_COMPONENTS
+ ${LLVM_TARGETS_TO_BUILD}
+ AggressiveInstCombine
+ Analysis
+ BinaryFormat
+ BitReader
+ BitWriter
+ CodeGen
+ Core
+ Extensions
+ InstCombine
+ Instrumentation
+ IPO
+ IRReader
+ Linker
+ MC
+ Object
+ Passes
+ Remarks
+ ScalarOpts
+ Support
+ Target
+ TargetParser
+ TransformUtils
+ Vectorize
+
+ NO_INSTALL_RPATH
+ BUILDTREE_ONLY
+ )
+
+ llvm_update_compile_flags(${target_name})
+ target_link_libraries(${target_name} PRIVATE
+ PluginCommon ${OPENMP_PTHREAD_LIB})
+
+ target_compile_definitions(${target_name} PRIVATE TARGET_NAME=${lib_name})
+ target_compile_definitions(${target_name} PRIVATE
+ DEBUG_PREFIX="TARGET ${lib_name} RTL")
+
+ if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
+ # On FreeBSD, the 'environ' symbol is undefined at link time, but resolved by
+ # the dynamic linker at runtime. Therefore, allow the symbol to be undefined
+ # when creating a shared library.
+ target_link_libraries(${target_name} PRIVATE "-Wl,--allow-shlib-undefined")
+ else()
+ target_link_libraries(${target_name} PRIVATE "-Wl,-z,defs")
+ endif()
+
+ if(LIBOMP_HAVE_VERSION_SCRIPT_FLAG)
+ target_link_libraries(${target_name} PRIVATE
+ "-Wl,--version-script=${common_dir}/../exports")
+ endif()
+ set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET protected)
+endfunction()
+
add_subdirectory(amdgpu)
add_subdirectory(cuda)
add_subdirectory(host)
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
index 8fbfe4d9b13f..40df77102c78 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
@@ -27,76 +27,23 @@ if(NOT (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(ppc64le)|(aarch64)$" AND CMAKE
return()
endif()
-################################################################################
-# Define the suffix for the runtime messaging dumps.
-add_definitions(-DTARGET_NAME=AMDGPU)
-
-# Define debug prefix. TODO: This should be automatized in the Debug.h but it
-# requires changing the original plugins.
-add_definitions(-DDEBUG_PREFIX="TARGET AMDGPU RTL")
+# Create the library and add the default arguments.
+add_target_library(omptarget.rtl.amdgpu AMDGPU)
-set(LIBOMPTARGET_DLOPEN_LIBHSA OFF)
-option(LIBOMPTARGET_FORCE_DLOPEN_LIBHSA "Build with dlopened libhsa" ${LIBOMPTARGET_DLOPEN_LIBHSA})
-
-if (${hsa-runtime64_FOUND} AND NOT LIBOMPTARGET_FORCE_DLOPEN_LIBHSA)
- libomptarget_say("Building AMDGPU NextGen plugin linked against libhsa")
- set(LIBOMPTARGET_EXTRA_SOURCE)
- set(LIBOMPTARGET_DEP_LIBRARIES hsa-runtime64::hsa-runtime64)
-else()
- libomptarget_say("Building AMDGPU NextGen plugin for dlopened libhsa")
- include_directories(dynamic_hsa)
- set(LIBOMPTARGET_EXTRA_SOURCE dynamic_hsa/hsa.cpp)
- set(LIBOMPTARGET_DEP_LIBRARIES)
-endif()
+target_sources(omptarget.rtl.amdgpu PRIVATE src/rtl.cpp)
+target_include_directories(omptarget.rtl.amdgpu PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}/utils)
-if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
- # On FreeBSD, the 'environ' symbol is undefined at link time, but resolved by
- # the dynamic linker at runtime. Therefore, allow the symbol to be undefined
- # when creating a shared library.
- set(LDFLAGS_UNDEFINED "-Wl,--allow-shlib-undefined")
+option(LIBOMPTARGET_FORCE_DLOPEN_LIBHSA "Build with dlopened libhsa" OFF)
+if(hsa-runtime64_FOUND AND NOT LIBOMPTARGET_FORCE_DLOPEN_LIBHSA)
+ libomptarget_say("Building AMDGPU plugin linked against libhsa")
+ target_link_libraries(omptarget.rtl.amdgpu PRIVATE hsa-runtime64::hsa-runtime64)
else()
- set(LDFLAGS_UNDEFINED "-Wl,-z,defs")
+ libomptarget_say("Building AMDGPU plugin for dlopened libhsa")
+ target_include_directories(omptarget.rtl.amdgpu PRIVATE dynamic_hsa)
+ target_sources(omptarget.rtl.amdgpu PRIVATE dynamic_hsa/hsa.cpp)
endif()
-add_llvm_library(omptarget.rtl.amdgpu SHARED
- src/rtl.cpp
- ${LIBOMPTARGET_EXTRA_SOURCE}
-
- ADDITIONAL_HEADER_DIRS
- ${LIBOMPTARGET_INCLUDE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/utils
-
- LINK_COMPONENTS
- Support
- Object
-
- LINK_LIBS
- PRIVATE
- PluginCommon
- ${LIBOMPTARGET_DEP_LIBRARIES}
- ${OPENMP_PTHREAD_LIB}
- ${LDFLAGS_UNDEFINED}
-
- NO_INSTALL_RPATH
- BUILDTREE_ONLY
-)
-
-if ((OMPT_TARGET_DEFAULT) AND (LIBOMPTARGET_OMPT_SUPPORT))
- target_link_libraries(omptarget.rtl.amdgpu PRIVATE OMPT)
-endif()
-
-if (LIBOMP_HAVE_VERSION_SCRIPT_FLAG)
- target_link_libraries(omptarget.rtl.amdgpu PRIVATE
- "-Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/../exports")
-endif()
-
-target_include_directories(
- omptarget.rtl.amdgpu
- PRIVATE
- ${LIBOMPTARGET_INCLUDE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/utils
-)
-
# Configure testing for the AMDGPU plugin. We will build tests if we could a
# functional AMD GPU on the system, or if manually specifies by the user.
option(LIBOMPTARGET_FORCE_AMDGPU_TESTS "Build AMDGPU libomptarget tests" OFF)
@@ -114,5 +61,4 @@ endif()
# Install plugin under the lib destination folder.
install(TARGETS omptarget.rtl.amdgpu LIBRARY DESTINATION "${OPENMP_INSTALL_LIBDIR}")
set_target_properties(omptarget.rtl.amdgpu PROPERTIES
- INSTALL_RPATH "$ORIGIN" BUILD_RPATH "$ORIGIN:${CMAKE_CURRENT_BINARY_DIR}/.."
- CXX_VISIBILITY_PRESET protected)
+ INSTALL_RPATH "$ORIGIN" BUILD_RPATH "$ORIGIN:${CMAKE_CURRENT_BINARY_DIR}/..")
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp b/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
index fce7454bf280..a0fdde951b74 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -371,7 +371,8 @@ private:
struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy {
/// Create an empty memory manager.
- AMDGPUMemoryManagerTy() : MemoryPool(nullptr), MemoryManager(nullptr) {}
+ AMDGPUMemoryManagerTy(AMDGPUPluginTy &Plugin)
+ : Plugin(Plugin), MemoryPool(nullptr), MemoryManager(nullptr) {}
/// Initialize the memory manager from a memory pool.
Error init(AMDGPUMemoryPoolTy &MemoryPool) {
@@ -429,6 +430,9 @@ private:
return OFFLOAD_SUCCESS;
}
+ /// The underlying plugin that owns this memory manager.
+ AMDGPUPluginTy &Plugin;
+
/// The memory pool used to allocate memory.
AMDGPUMemoryPoolTy *MemoryPool;
@@ -1744,9 +1748,10 @@ protected:
/// HSA host agent. We aggregate all its resources into the same instance.
struct AMDHostDeviceTy : public AMDGenericDeviceTy {
/// Create a host device from an array of host agents.
- AMDHostDeviceTy(const llvm::SmallVector<hsa_agent_t> &HostAgents)
- : AMDGenericDeviceTy(), Agents(HostAgents), ArgsMemoryManager(),
- PinnedMemoryManager() {
+ AMDHostDeviceTy(AMDGPUPluginTy &Plugin,
+ const llvm::SmallVector<hsa_agent_t> &HostAgents)
+ : AMDGenericDeviceTy(), Agents(HostAgents), ArgsMemoryManager(Plugin),
+ PinnedMemoryManager(Plugin) {
assert(HostAgents.size() && "No host agent found");
}
@@ -1840,9 +1845,10 @@ private:
/// generic device class.
struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
// Create an AMDGPU device with a device id and default AMDGPU grid values.
- AMDGPUDeviceTy(int32_t DeviceId, int32_t NumDevices,
+ AMDGPUDeviceTy(GenericPluginTy &Plugin, int32_t DeviceId, int32_t NumDevices,
AMDHostDeviceTy &HostDevice, hsa_agent_t Agent)
- : GenericDeviceTy(DeviceId, NumDevices, {0}), AMDGenericDeviceTy(),
+ : GenericDeviceTy(Plugin, DeviceId, NumDevices, {0}),
+ AMDGenericDeviceTy(),
OMPX_NumQueues("LIBOMPTARGET_AMDGPU_NUM_HSA_QUEUES", 4),
OMPX_QueueSize("LIBOMPTARGET_AMDGPU_HSA_QUEUE_SIZE", 512),
OMPX_DefaultTeamsPerCU("LIBOMPTARGET_AMDGPU_TEAMS_PER_CU", 4),
@@ -2088,7 +2094,7 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
/// Allocate and construct an AMDGPU kernel.
Expected<GenericKernelTy &> constructKernel(const char *Name) override {
// Allocate and construct the AMDGPU kernel.
- AMDGPUKernelTy *AMDGPUKernel = Plugin::get().allocate<AMDGPUKernelTy>();
+ AMDGPUKernelTy *AMDGPUKernel = Plugin.allocate<AMDGPUKernelTy>();
if (!AMDGPUKernel)
return Plugin::error("Failed to allocate memory for AMDGPU kernel");
@@ -2138,8 +2144,7 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
Expected<DeviceImageTy *> loadBinaryImpl(const __tgt_device_image *TgtImage,
int32_t ImageId) override {
// Allocate and initialize the image object.
- AMDGPUDeviceImageTy *AMDImage =
- Plugin::get().allocate<AMDGPUDeviceImageTy>();
+ AMDGPUDeviceImageTy *AMDImage = Plugin.allocate<AMDGPUDeviceImageTy>();
new (AMDImage) AMDGPUDeviceImageTy(ImageId, *this, TgtImage);
// Load the HSA executable.
@@ -2397,6 +2402,27 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
AsyncInfoWrapperTy &AsyncInfoWrapper) override {
AMDGPUDeviceTy &DstDevice = static_cast<AMDGPUDeviceTy &>(DstGenericDevice);
+ // For large transfers use synchronous behavior.
+ if (Size >= OMPX_MaxAsyncCopyBytes) {
+ if (AsyncInfoWrapper.hasQueue())
+ if (auto Err = synchronize(AsyncInfoWrapper))
+ return Err;
+
+ AMDGPUSignalTy Signal;
+ if (auto Err = Signal.init())
+ return Err;
+
+ if (auto Err = utils::asyncMemCopy(
+ useMultipleSdmaEngines(), DstPtr, DstDevice.getAgent(), SrcPtr,
+ getAgent(), (uint64_t)Size, 0, nullptr, Signal.get()))
+ return Err;
+
+ if (auto Err = Signal.wait(getStreamBusyWaitMicroseconds()))
+ return Err;
+
+ return Signal.deinit();
+ }
+
AMDGPUStreamTy *Stream = nullptr;
if (auto Err = getStream(AsyncInfoWrapper, Stream))
return Err;
@@ -2697,7 +2723,7 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
}
Error setDeviceHeapSize(uint64_t Value) override {
for (DeviceImageTy *Image : LoadedImages)
- if (auto Err = setupDeviceMemoryPool(Plugin::get(), *Image, Value))
+ if (auto Err = setupDeviceMemoryPool(Plugin, *Image, Value))
return Err;
DeviceMemoryPoolSize = Value;
return Plugin::success();
@@ -2737,7 +2763,7 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy {
return utils::iterateAgentMemoryPools(
Agent, [&](hsa_amd_memory_pool_t HSAMemoryPool) {
AMDGPUMemoryPoolTy *MemoryPool =
- Plugin::get().allocate<AMDGPUMemoryPoolTy>();
+ Plugin.allocate<AMDGPUMemoryPoolTy>();
new (MemoryPool) AMDGPUMemoryPoolTy(HSAMemoryPool);
AllMemoryPools.push_back(MemoryPool);
return HSA_STATUS_SUCCESS;
@@ -3090,7 +3116,7 @@ struct AMDGPUPluginTy final : public GenericPluginTy {
// Initialize the host device using host agents.
HostDevice = allocate<AMDHostDeviceTy>();
- new (HostDevice) AMDHostDeviceTy(HostAgents);
+ new (HostDevice) AMDHostDeviceTy(*this, HostAgents);
// Setup the memory pools of available for the host.
if (auto Err = HostDevice->init())
@@ -3115,6 +3141,18 @@ struct AMDGPUPluginTy final : public GenericPluginTy {
return Plugin::check(Status, "Error in hsa_shut_down: %s");
}
+ /// Creates an AMDGPU device.
+ GenericDeviceTy *createDevice(GenericPluginTy &Plugin, int32_t DeviceId,
+ int32_t NumDevices) override {
+ return new AMDGPUDeviceTy(Plugin, DeviceId, NumDevices, getHostDevice(),
+ getKernelAgent(DeviceId));
+ }
+
+ /// Creates an AMDGPU global handler.
+ GenericGlobalHandlerTy *createGlobalHandler() override {
+ return new AMDGPUGlobalHandlerTy();
+ }
+
Triple::ArchType getTripleArch() const override { return Triple::amdgcn; }
/// Get the ELF code for recognizing the compatible image binary.
@@ -3237,7 +3275,9 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
// 56 bytes per allocation.
uint32_t AllArgsSize = KernelArgsSize + ImplicitArgsSize;
- AMDHostDeviceTy &HostDevice = Plugin::get<AMDGPUPluginTy>().getHostDevice();
+ AMDGPUPluginTy &AMDGPUPlugin =
+ static_cast<AMDGPUPluginTy &>(GenericDevice.Plugin);
+ AMDHostDeviceTy &HostDevice = AMDGPUPlugin.getHostDevice();
AMDGPUMemoryManagerTy &ArgsMemoryManager = HostDevice.getArgsMemoryManager();
void *AllArgs = nullptr;
@@ -3347,20 +3387,10 @@ Error AMDGPUKernelTy::printLaunchInfoDetails(GenericDeviceTy &GenericDevice,
return Plugin::success();
}
-GenericPluginTy *Plugin::createPlugin() { return new AMDGPUPluginTy(); }
-
-GenericDeviceTy *Plugin::createDevice(int32_t DeviceId, int32_t NumDevices) {
- AMDGPUPluginTy &Plugin = get<AMDGPUPluginTy &>();
- return new AMDGPUDeviceTy(DeviceId, NumDevices, Plugin.getHostDevice(),
- Plugin.getKernelAgent(DeviceId));
-}
-
-GenericGlobalHandlerTy *Plugin::createGlobalHandler() {
- return new AMDGPUGlobalHandlerTy();
-}
+GenericPluginTy *PluginTy::createPlugin() { return new AMDGPUPluginTy(); }
template <typename... ArgsTy>
-Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) {
+static Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) {
hsa_status_t ResultCode = static_cast<hsa_status_t>(Code);
if (ResultCode == HSA_STATUS_SUCCESS || ResultCode == HSA_STATUS_INFO_BREAK)
return Error::success();
@@ -3384,7 +3414,7 @@ void *AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr,
}
assert(Ptr && "Invalid pointer");
- auto &KernelAgents = Plugin::get<AMDGPUPluginTy>().getKernelAgents();
+ auto &KernelAgents = Plugin.getKernelAgents();
// Allow all kernel agents to access the allocation.
if (auto Err = MemoryPool->enableAccess(Ptr, Size, KernelAgents)) {
@@ -3427,7 +3457,8 @@ void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) {
}
if (Alloc) {
- auto &KernelAgents = Plugin::get<AMDGPUPluginTy>().getKernelAgents();
+ auto &KernelAgents =
+ static_cast<AMDGPUPluginTy &>(Plugin).getKernelAgents();
// Inherently necessary for host or shared allocations
// Also enabled for device memory to allow device to device memcpy
diff --git a/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
index 085d44307165..a7350e662a7c 100644
--- a/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
@@ -19,6 +19,7 @@ add_library(PluginCommon OBJECT
src/RPC.cpp
src/Utils/ELF.cpp
)
+add_dependencies(PluginCommon intrinsics_gen)
# Only enable JIT for those targets that LLVM can support.
string(TOUPPER "${LLVM_TARGETS_TO_BUILD}" TargetsSupported)
@@ -26,45 +27,6 @@ foreach(Target ${TargetsSupported})
target_compile_definitions(PluginCommon PRIVATE "LIBOMPTARGET_JIT_${Target}")
endforeach()
-# This is required when using LLVM libraries.
-llvm_update_compile_flags(PluginCommon)
-
-if (LLVM_LINK_LLVM_DYLIB)
- set(llvm_libs LLVM)
-else()
- llvm_map_components_to_libnames(llvm_libs
- ${LLVM_TARGETS_TO_BUILD}
- AggressiveInstCombine
- Analysis
- BinaryFormat
- BitReader
- BitWriter
- CodeGen
- Core
- Extensions
- InstCombine
- Instrumentation
- IPO
- IRReader
- Linker
- MC
- Object
- Passes
- Remarks
- ScalarOpts
- Support
- Target
- TargetParser
- TransformUtils
- Vectorize
- )
-endif()
-
-target_link_libraries(PluginCommon
- PUBLIC
- ${llvm_libs}
-)
-
# Include the RPC server from the `libc` project if availible.
if(TARGET llvmlibc_rpc_server AND ${LIBOMPTARGET_GPU_LIBC_SUPPORT})
target_link_libraries(PluginCommon PRIVATE llvmlibc_rpc_server)
@@ -82,8 +44,10 @@ elseif(${LIBOMPTARGET_GPU_LIBC_SUPPORT})
endif()
endif()
-if ((OMPT_TARGET_DEFAULT) AND (LIBOMPTARGET_OMPT_SUPPORT))
- target_link_libraries(PluginCommon PUBLIC OMPT)
+# If we have OMPT enabled include it in the list of sources.
+if (OMPT_TARGET_DEFAULT AND LIBOMPTARGET_OMPT_SUPPORT)
+ target_sources(PluginCommon PRIVATE OMPT/OmptCallback.cpp)
+ target_include_directories(PluginCommon PRIVATE OMPT)
endif()
# Define the TARGET_NAME and DEBUG_PREFIX.
@@ -95,16 +59,12 @@ target_compile_definitions(PluginCommon PRIVATE
target_compile_options(PluginCommon PUBLIC ${offload_compile_flags})
target_link_options(PluginCommon PUBLIC ${offload_link_flags})
-target_include_directories(PluginCommon
- PRIVATE
- ${LIBOMPTARGET_INCLUDE_DIR}
- PUBLIC
+target_include_directories(PluginCommon PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include
+ ${LIBOMPTARGET_LLVM_INCLUDE_DIRS}
+ ${LIBOMPTARGET_INCLUDE_DIR}
)
set_target_properties(PluginCommon PROPERTIES
POSITION_INDEPENDENT_CODE ON
CXX_VISIBILITY_PRESET protected)
-
-add_subdirectory(OMPT)
-
diff --git a/openmp/libomptarget/plugins-nextgen/common/OMPT/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/common/OMPT/CMakeLists.txt
deleted file mode 100644
index be4c743665b3..000000000000
--- a/openmp/libomptarget/plugins-nextgen/common/OMPT/CMakeLists.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-##===----------------------------------------------------------------------===##
-#
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-#
-##===----------------------------------------------------------------------===##
-#
-# Aggregation of parts which can be used by OpenMP tools
-#
-##===----------------------------------------------------------------------===##
-
-# NOTE: Don't try to build `OMPT` using `add_llvm_library` because we
-# don't want to export `OMPT` while `add_llvm_library` requires that.
-add_library(OMPT OBJECT
- OmptCallback.cpp)
-
-# This is required when using LLVM libraries.
-llvm_update_compile_flags(OMPT)
-
-if (LLVM_LINK_LLVM_DYLIB)
- set(llvm_libs LLVM)
-else()
- llvm_map_components_to_libnames(llvm_libs
- ${LLVM_TARGETS_TO_BUILD}
- AggressiveInstCombine
- Analysis
- BinaryFormat
- BitReader
- BitWriter
- CodeGen
- Core
- Extensions
- InstCombine
- Instrumentation
- IPO
- IRReader
- Linker
- MC
- Object
- Passes
- Remarks
- ScalarOpts
- Support
- Target
- TargetParser
- TransformUtils
- Vectorize
- )
-endif()
-
-target_link_libraries(OMPT
- PUBLIC
- ${llvm_libs}
-)
-
-# Define the TARGET_NAME and DEBUG_PREFIX.
-target_compile_definitions(OMPT PRIVATE
- TARGET_NAME="OMPT"
- DEBUG_PREFIX="OMPT"
-)
-
-target_include_directories(OMPT
- INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}
- PRIVATE ${LIBOMPTARGET_INCLUDE_DIR}
-)
-
-set_target_properties(OMPT PROPERTIES
- POSITION_INDEPENDENT_CODE ON
- CXX_VISIBILITY_PRESET protected)
diff --git a/openmp/libomptarget/plugins-nextgen/common/include/PluginInterface.h b/openmp/libomptarget/plugins-nextgen/common/include/PluginInterface.h
index b7be7b645ba3..79e8464bfda5 100644
--- a/openmp/libomptarget/plugins-nextgen/common/include/PluginInterface.h
+++ b/openmp/libomptarget/plugins-nextgen/common/include/PluginInterface.h
@@ -610,7 +610,7 @@ public:
struct GenericDeviceTy : public DeviceAllocatorTy {
/// Construct a device with its device id within the plugin, the number of
/// devices in the plugin and the grid values for that kind of device.
- GenericDeviceTy(int32_t DeviceId, int32_t NumDevices,
+ GenericDeviceTy(GenericPluginTy &Plugin, int32_t DeviceId, int32_t NumDevices,
const llvm::omp::GV &GridValues);
/// Get the device identifier within the corresponding plugin. Notice that
@@ -860,6 +860,9 @@ struct GenericDeviceTy : public DeviceAllocatorTy {
/// Allocate and construct a kernel object.
virtual Expected<GenericKernelTy &> constructKernel(const char *Name) = 0;
+ /// Reference to the underlying plugin that created this device.
+ GenericPluginTy &Plugin;
+
private:
/// Get and set the stack size and heap size for the device. If not used, the
/// plugin can implement the setters as no-op and setting the output
@@ -976,6 +979,14 @@ struct GenericPluginTy {
Error deinit();
virtual Error deinitImpl() = 0;
+ /// Create a new device for the underlying plugin.
+ virtual GenericDeviceTy *createDevice(GenericPluginTy &Plugin,
+ int32_t DeviceID,
+ int32_t NumDevices) = 0;
+
+ /// Create a new global handler for the underlying plugin.
+ virtual GenericGlobalHandlerTy *createGlobalHandler() = 0;
+
/// Get the reference to the device with a certain device id.
GenericDeviceTy &getDevice(int32_t DeviceId) {
assert(isValidDeviceId(DeviceId) && "Invalid device id");
@@ -1054,6 +1065,132 @@ protected:
return (DeviceId >= 0 && DeviceId < getNumDevices());
}
+public:
+ // TODO: This plugin interface needs to be cleaned up.
+
+ /// Returns non-zero if the provided \p Image can be executed by the runtime.
+ int32_t is_valid_binary(__tgt_device_image *Image);
+
+ /// Initialize the device inside of the plugin.
+ int32_t init_device(int32_t DeviceId);
+
+ /// Return the number of devices this plugin can support.
+ int32_t number_of_devices();
+
+ /// Initializes the OpenMP register requires information.
+ int64_t init_requires(int64_t RequiresFlags);
+
+ /// Returns non-zero if the data can be exchanged between the two devices.
+ int32_t is_data_exchangable(int32_t SrcDeviceId, int32_t DstDeviceId);
+
+ /// Initializes the record and replay mechanism inside the plugin.
+ int32_t initialize_record_replay(int32_t DeviceId, int64_t MemorySize,
+ void *VAddr, bool isRecord, bool SaveOutput,
+ uint64_t &ReqPtrArgOffset);
+
+ /// Loads the associated binary into the plugin and returns a handle to it.
+ int32_t load_binary(int32_t DeviceId, __tgt_device_image *TgtImage,
+ __tgt_device_binary *Binary);
+
+ /// Allocates memory that is accessively to the given device.
+ void *data_alloc(int32_t DeviceId, int64_t Size, void *HostPtr, int32_t Kind);
+
+ /// Deallocates memory on the given device.
+ int32_t data_delete(int32_t DeviceId, void *TgtPtr, int32_t Kind);
+
+ /// Locks / pins host memory using the plugin runtime.
+ int32_t data_lock(int32_t DeviceId, void *Ptr, int64_t Size,
+ void **LockedPtr);
+
+ /// Unlocks / unpins host memory using the plugin runtime.
+ int32_t data_unlock(int32_t DeviceId, void *Ptr);
+
+ /// Notify the runtime about a new mapping that has been created outside.
+ int32_t data_notify_mapped(int32_t DeviceId, void *HstPtr, int64_t Size);
+
+ /// Notify t he runtime about a mapping that has been deleted.
+ int32_t data_notify_unmapped(int32_t DeviceId, void *HstPtr);
+
+ /// Copy data to the given device.
+ int32_t data_submit(int32_t DeviceId, void *TgtPtr, void *HstPtr,
+ int64_t Size);
+
+ /// Copy data to the given device asynchronously.
+ int32_t data_submit_async(int32_t DeviceId, void *TgtPtr, void *HstPtr,
+ int64_t Size, __tgt_async_info *AsyncInfoPtr);
+
+ /// Copy data from the given device.
+ int32_t data_retrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr,
+ int64_t Size);
+
+ /// Copy data from the given device asynchornously.
+ int32_t data_retrieve_async(int32_t DeviceId, void *HstPtr, void *TgtPtr,
+ int64_t Size, __tgt_async_info *AsyncInfoPtr);
+
+ /// Exchange memory addresses between two devices.
+ int32_t data_exchange(int32_t SrcDeviceId, void *SrcPtr, int32_t DstDeviceId,
+ void *DstPtr, int64_t Size);
+
+ /// Exchange memory addresses between two devices asynchronously.
+ int32_t data_exchange_async(int32_t SrcDeviceId, void *SrcPtr,
+ int DstDeviceId, void *DstPtr, int64_t Size,
+ __tgt_async_info *AsyncInfo);
+
+ /// Begin executing a kernel on the given device.
+ int32_t launch_kernel(int32_t DeviceId, void *TgtEntryPtr, void **TgtArgs,
+ ptrdiff_t *TgtOffsets, KernelArgsTy *KernelArgs,
+ __tgt_async_info *AsyncInfoPtr);
+
+ /// Synchronize an asyncrhonous queue with the plugin runtime.
+ int32_t synchronize(int32_t DeviceId, __tgt_async_info *AsyncInfoPtr);
+
+ /// Query the current state of an asynchronous queue.
+ int32_t query_async(int32_t DeviceId, __tgt_async_info *AsyncInfoPtr);
+
+ /// Prints information about the given devices supported by the plugin.
+ void print_device_info(int32_t DeviceId);
+
+ /// Creates an event in the given plugin if supported.
+ int32_t create_event(int32_t DeviceId, void **EventPtr);
+
+ /// Records an event that has occurred.
+ int32_t record_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr);
+
+ /// Wait until an event has occurred.
+ int32_t wait_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr);
+
+ /// Syncrhonize execution until an event is done.
+ int32_t sync_event(int32_t DeviceId, void *EventPtr);
+
+ /// Remove the event from the plugin.
+ int32_t destroy_event(int32_t DeviceId, void *EventPtr);
+
+ /// Remove the event from the plugin.
+ void set_info_flag(uint32_t NewInfoLevel);
+
+ /// Creates an asynchronous queue for the given plugin.
+ int32_t init_async_info(int32_t DeviceId, __tgt_async_info **AsyncInfoPtr);
+
+ /// Creates device information to be used for diagnostics.
+ int32_t init_device_info(int32_t DeviceId, __tgt_device_info *DeviceInfo,
+ const char **ErrStr);
+
+ /// Sets the offset into the devices for use by OMPT.
+ int32_t set_device_offset(int32_t DeviceIdOffset);
+
+ /// Returns if the plugin can support auotmatic copy.
+ int32_t use_auto_zero_copy(int32_t DeviceId);
+
+ /// Look up a global symbol in the given binary.
+ int32_t get_global(__tgt_device_binary Binary, uint64_t Size,
+ const char *Name, void **DevicePtr);
+
+ /// Look up a kernel function in the given binary.
+ int32_t get_function(__tgt_device_binary Binary, const char *Name,
+ void **KernelPtr);
+
private:
/// Number of devices available for the plugin.
int32_t NumDevices = 0;
@@ -1085,29 +1222,53 @@ private:
RPCServerTy *RPCServer;
};
+namespace Plugin {
+/// Create a success error. This is the same as calling Error::success(), but
+/// it is recommended to use this one for consistency with Plugin::error() and
+/// Plugin::check().
+static Error success() { return Error::success(); }
+
+/// Create a string error.
+template <typename... ArgsTy>
+static Error error(const char *ErrFmt, ArgsTy... Args) {
+ return createStringError(inconvertibleErrorCode(), ErrFmt, Args...);
+}
+
+/// Check the plugin-specific error code and return an error or success
+/// accordingly. In case of an error, create a string error with the error
+/// description. The ErrFmt should follow the format:
+/// "Error in <function name>[<optional info>]: %s"
+/// The last format specifier "%s" is mandatory and will be used to place the
+/// error code's description. Notice this function should be only called from
+/// the plugin-specific code.
+/// TODO: Refactor this, must be defined individually by each plugin.
+template <typename... ArgsTy>
+static Error check(int32_t ErrorCode, const char *ErrFmt, ArgsTy... Args);
+} // namespace Plugin
+
/// Class for simplifying the getter operation of the plugin. Anywhere on the
/// code, the current plugin can be retrieved by Plugin::get(). The class also
/// declares functions to create plugin-specific object instances. The check(),
/// createPlugin(), createDevice() and createGlobalHandler() functions should be
/// defined by each plugin implementation.
-class Plugin {
+class PluginTy {
// Reference to the plugin instance.
static GenericPluginTy *SpecificPlugin;
- Plugin() {
+ PluginTy() {
if (auto Err = init())
REPORT("Failed to initialize plugin: %s\n",
toString(std::move(Err)).data());
}
- ~Plugin() {
+ ~PluginTy() {
if (auto Err = deinit())
REPORT("Failed to deinitialize plugin: %s\n",
toString(std::move(Err)).data());
}
- Plugin(const Plugin &) = delete;
- void operator=(const Plugin &) = delete;
+ PluginTy(const PluginTy &) = delete;
+ void operator=(const PluginTy &) = delete;
/// Create and intialize the plugin instance.
static Error init() {
@@ -1158,7 +1319,7 @@ public:
// This static variable will initialize the underlying plugin instance in
// case there was no previous explicit initialization. The initialization is
// thread safe.
- static Plugin Plugin;
+ static PluginTy Plugin;
assert(SpecificPlugin && "Plugin is not active");
return *SpecificPlugin;
@@ -1170,35 +1331,8 @@ public:
/// Indicate whether the plugin is active.
static bool isActive() { return SpecificPlugin != nullptr; }
- /// Create a success error. This is the same as calling Error::success(), but
- /// it is recommended to use this one for consistency with Plugin::error() and
- /// Plugin::check().
- static Error success() { return Error::success(); }
-
- /// Create a string error.
- template <typename... ArgsTy>
- static Error error(const char *ErrFmt, ArgsTy... Args) {
- return createStringError(inconvertibleErrorCode(), ErrFmt, Args...);
- }
-
- /// Check the plugin-specific error code and return an error or success
- /// accordingly. In case of an error, create a string error with the error
- /// description. The ErrFmt should follow the format:
- /// "Error in <function name>[<optional info>]: %s"
- /// The last format specifier "%s" is mandatory and will be used to place the
- /// error code's description. Notice this function should be only called from
- /// the plugin-specific code.
- template <typename... ArgsTy>
- static Error check(int32_t ErrorCode, const char *ErrFmt, ArgsTy... Args);
-
/// Create a plugin instance.
static GenericPluginTy *createPlugin();
-
- /// Create a plugin-specific device.
- static GenericDeviceTy *createDevice(int32_t DeviceId, int32_t NumDevices);
-
- /// Create a plugin-specific global handler.
- static GenericGlobalHandlerTy *createGlobalHandler();
};
/// Auxiliary interface class for GenericDeviceResourceManagerTy. This class
diff --git a/openmp/libomptarget/plugins-nextgen/common/include/RPC.h b/openmp/libomptarget/plugins-nextgen/common/include/RPC.h
index 2e39b3f299c8..b621cc0da458 100644
--- a/openmp/libomptarget/plugins-nextgen/common/include/RPC.h
+++ b/openmp/libomptarget/plugins-nextgen/common/include/RPC.h
@@ -16,6 +16,7 @@
#ifndef OPENMP_LIBOMPTARGET_PLUGINS_NEXTGEN_COMMON_RPC_H
#define OPENMP_LIBOMPTARGET_PLUGINS_NEXTGEN_COMMON_RPC_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@@ -32,8 +33,6 @@ class DeviceImageTy;
/// these routines will perform no action.
struct RPCServerTy {
public:
- RPCServerTy(uint32_t NumDevices);
-
/// Check if this device image is using an RPC server. This checks for the
/// precense of an externally visible symbol in the device image that will
/// be present whenever RPC code is called.
@@ -56,7 +55,9 @@ public:
/// memory associated with the k
llvm::Error deinitDevice(plugin::GenericDeviceTy &Device);
- ~RPCServerTy();
+private:
+ /// Array from this device's identifier to its attached devices.
+ llvm::SmallVector<uintptr_t> Handles;
};
} // namespace llvm::omp::target
diff --git a/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp b/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
index f39f913d85ee..55e2865d6aae 100644
--- a/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
@@ -39,7 +39,7 @@ using namespace omp;
using namespace target;
using namespace plugin;
-GenericPluginTy *Plugin::SpecificPlugin = nullptr;
+GenericPluginTy *PluginTy::SpecificPlugin = nullptr;
// TODO: Fix any thread safety issues for multi-threaded kernel recording.
struct RecordReplayTy {
@@ -438,7 +438,7 @@ Error GenericKernelTy::init(GenericDeviceTy &GenericDevice,
// Retrieve kernel environment object for the kernel.
GlobalTy KernelEnv(std::string(Name) + "_kernel_environment",
sizeof(KernelEnvironment), &KernelEnvironment);
- GenericGlobalHandlerTy &GHandler = Plugin::get().getGlobalHandler();
+ GenericGlobalHandlerTy &GHandler = GenericDevice.Plugin.getGlobalHandler();
if (auto Err =
GHandler.readGlobalFromImage(GenericDevice, *ImagePtr, KernelEnv)) {
[[maybe_unused]] std::string ErrStr = toString(std::move(Err));
@@ -710,9 +710,10 @@ uint64_t GenericKernelTy::getNumBlocks(GenericDeviceTy &GenericDevice,
return std::min(PreferredNumBlocks, GenericDevice.getBlockLimit());
}
-GenericDeviceTy::GenericDeviceTy(int32_t DeviceId, int32_t NumDevices,
+GenericDeviceTy::GenericDeviceTy(GenericPluginTy &Plugin, int32_t DeviceId,
+ int32_t NumDevices,
const llvm::omp::GV &OMPGridValues)
- : MemoryManager(nullptr), OMP_TeamLimit("OMP_TEAM_LIMIT"),
+ : Plugin(Plugin), MemoryManager(nullptr), OMP_TeamLimit("OMP_TEAM_LIMIT"),
OMP_NumTeams("OMP_NUM_TEAMS"),
OMP_TeamsThreadLimit("OMP_TEAMS_THREAD_LIMIT"),
OMPX_DebugKind("LIBOMPTARGET_DEVICE_RTL_DEBUG"),
@@ -1488,10 +1489,10 @@ Error GenericPluginTy::init() {
assert(Devices.size() == 0 && "Plugin already initialized");
Devices.resize(NumDevices, nullptr);
- GlobalHandler = Plugin::createGlobalHandler();
+ GlobalHandler = createGlobalHandler();
assert(GlobalHandler && "Invalid global handler");
- RPCServer = new RPCServerTy(NumDevices);
+ RPCServer = new RPCServerTy();
assert(RPCServer && "Invalid RPC server");
return Plugin::success();
@@ -1522,7 +1523,7 @@ Error GenericPluginTy::initDevice(int32_t DeviceId) {
assert(!Devices[DeviceId] && "Device already initialized");
// Create the device and save the reference.
- GenericDeviceTy *Device = Plugin::createDevice(DeviceId, NumDevices);
+ GenericDeviceTy *Device = createDevice(*this, DeviceId, NumDevices);
assert(Device && "Invalid device");
// Save the device reference into the list.
@@ -1565,36 +1566,7 @@ Expected<bool> GenericPluginTy::checkELFImage(StringRef Image) const {
return isELFCompatible(Image);
}
-bool llvm::omp::target::plugin::libomptargetSupportsRPC() {
-#ifdef LIBOMPTARGET_RPC_SUPPORT
- return true;
-#else
- return false;
-#endif
-}
-
-/// Exposed library API function, basically wrappers around the GenericDeviceTy
-/// functionality with the same name. All non-async functions are redirected
-/// to the async versions right away with a NULL AsyncInfoPtr.
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-int32_t __tgt_rtl_init_plugin() {
- auto Err = Plugin::initIfNeeded();
- if (Err) {
- [[maybe_unused]] std::string ErrStr = toString(std::move(Err));
- DP("Failed to init plugin: %s", ErrStr.c_str());
- return OFFLOAD_FAIL;
- }
-
- return OFFLOAD_SUCCESS;
-}
-
-int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *Image) {
- if (!Plugin::isActive())
- return false;
-
+int32_t GenericPluginTy::is_valid_binary(__tgt_device_image *Image) {
StringRef Buffer(reinterpret_cast<const char *>(Image->ImageStart),
target::getPtrDiff(Image->ImageEnd, Image->ImageStart));
@@ -1609,13 +1581,13 @@ int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *Image) {
case file_magic::elf_executable:
case file_magic::elf_shared_object:
case file_magic::elf_core: {
- auto MatchOrErr = Plugin::get().checkELFImage(Buffer);
+ auto MatchOrErr = checkELFImage(Buffer);
if (Error Err = MatchOrErr.takeError())
return HandleError(std::move(Err));
return *MatchOrErr;
}
case file_magic::bitcode: {
- auto MatchOrErr = Plugin::get().getJIT().checkBitcodeImage(Buffer);
+ auto MatchOrErr = getJIT().checkBitcodeImage(Buffer);
if (Error Err = MatchOrErr.takeError())
return HandleError(std::move(Err));
return *MatchOrErr;
@@ -1625,8 +1597,8 @@ int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *Image) {
}
}
-int32_t __tgt_rtl_init_device(int32_t DeviceId) {
- auto Err = Plugin::get().initDevice(DeviceId);
+int32_t GenericPluginTy::init_device(int32_t DeviceId) {
+ auto Err = initDevice(DeviceId);
if (Err) {
REPORT("Failure to initialize device %d: %s\n", DeviceId,
toString(std::move(Err)).data());
@@ -1636,24 +1608,24 @@ int32_t __tgt_rtl_init_device(int32_t DeviceId) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_number_of_devices() { return Plugin::get().getNumDevices(); }
+int32_t GenericPluginTy::number_of_devices() { return getNumDevices(); }
-int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
- Plugin::get().setRequiresFlag(RequiresFlags);
+int64_t GenericPluginTy::init_requires(int64_t RequiresFlags) {
+ setRequiresFlag(RequiresFlags);
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_is_data_exchangable(int32_t SrcDeviceId,
- int32_t DstDeviceId) {
- return Plugin::get().isDataExchangable(SrcDeviceId, DstDeviceId);
+int32_t GenericPluginTy::is_data_exchangable(int32_t SrcDeviceId,
+ int32_t DstDeviceId) {
+ return isDataExchangable(SrcDeviceId, DstDeviceId);
}
-int32_t __tgt_rtl_initialize_record_replay(int32_t DeviceId, int64_t MemorySize,
- void *VAddr, bool isRecord,
- bool SaveOutput,
- uint64_t &ReqPtrArgOffset) {
- GenericPluginTy &Plugin = Plugin::get();
- GenericDeviceTy &Device = Plugin.getDevice(DeviceId);
+int32_t GenericPluginTy::initialize_record_replay(int32_t DeviceId,
+ int64_t MemorySize,
+ void *VAddr, bool isRecord,
+ bool SaveOutput,
+ uint64_t &ReqPtrArgOffset) {
+ GenericDeviceTy &Device = getDevice(DeviceId);
RecordReplayTy::RRStatusTy Status =
isRecord ? RecordReplayTy::RRStatusTy::RRRecording
: RecordReplayTy::RRStatusTy::RRReplaying;
@@ -1672,12 +1644,12 @@ int32_t __tgt_rtl_initialize_record_replay(int32_t DeviceId, int64_t MemorySize,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_load_binary(int32_t DeviceId, __tgt_device_image *TgtImage,
- __tgt_device_binary *Binary) {
- GenericPluginTy &Plugin = Plugin::get();
- GenericDeviceTy &Device = Plugin.getDevice(DeviceId);
+int32_t GenericPluginTy::load_binary(int32_t DeviceId,
+ __tgt_device_image *TgtImage,
+ __tgt_device_binary *Binary) {
+ GenericDeviceTy &Device = getDevice(DeviceId);
- auto ImageOrErr = Device.loadBinary(Plugin, TgtImage);
+ auto ImageOrErr = Device.loadBinary(*this, TgtImage);
if (!ImageOrErr) {
auto Err = ImageOrErr.takeError();
REPORT("Failure to load binary image %p on device %d: %s\n", TgtImage,
@@ -1693,10 +1665,10 @@ int32_t __tgt_rtl_load_binary(int32_t DeviceId, __tgt_device_image *TgtImage,
return OFFLOAD_SUCCESS;
}
-void *__tgt_rtl_data_alloc(int32_t DeviceId, int64_t Size, void *HostPtr,
- int32_t Kind) {
- auto AllocOrErr = Plugin::get().getDevice(DeviceId).dataAlloc(
- Size, HostPtr, (TargetAllocTy)Kind);
+void *GenericPluginTy::data_alloc(int32_t DeviceId, int64_t Size, void *HostPtr,
+ int32_t Kind) {
+ auto AllocOrErr =
+ getDevice(DeviceId).dataAlloc(Size, HostPtr, (TargetAllocTy)Kind);
if (!AllocOrErr) {
auto Err = AllocOrErr.takeError();
REPORT("Failure to allocate device memory: %s\n",
@@ -1708,9 +1680,10 @@ void *__tgt_rtl_data_alloc(int32_t DeviceId, int64_t Size, void *HostPtr,
return *AllocOrErr;
}
-int32_t __tgt_rtl_data_delete(int32_t DeviceId, void *TgtPtr, int32_t Kind) {
+int32_t GenericPluginTy::data_delete(int32_t DeviceId, void *TgtPtr,
+ int32_t Kind) {
auto Err =
- Plugin::get().getDevice(DeviceId).dataDelete(TgtPtr, (TargetAllocTy)Kind);
+ getDevice(DeviceId).dataDelete(TgtPtr, static_cast<TargetAllocTy>(Kind));
if (Err) {
REPORT("Failure to deallocate device pointer %p: %s\n", TgtPtr,
toString(std::move(Err)).data());
@@ -1720,9 +1693,9 @@ int32_t __tgt_rtl_data_delete(int32_t DeviceId, void *TgtPtr, int32_t Kind) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_lock(int32_t DeviceId, void *Ptr, int64_t Size,
- void **LockedPtr) {
- auto LockedPtrOrErr = Plugin::get().getDevice(DeviceId).dataLock(Ptr, Size);
+int32_t GenericPluginTy::data_lock(int32_t DeviceId, void *Ptr, int64_t Size,
+ void **LockedPtr) {
+ auto LockedPtrOrErr = getDevice(DeviceId).dataLock(Ptr, Size);
if (!LockedPtrOrErr) {
auto Err = LockedPtrOrErr.takeError();
REPORT("Failure to lock memory %p: %s\n", Ptr,
@@ -1739,8 +1712,8 @@ int32_t __tgt_rtl_data_lock(int32_t DeviceId, void *Ptr, int64_t Size,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_unlock(int32_t DeviceId, void *Ptr) {
- auto Err = Plugin::get().getDevice(DeviceId).dataUnlock(Ptr);
+int32_t GenericPluginTy::data_unlock(int32_t DeviceId, void *Ptr) {
+ auto Err = getDevice(DeviceId).dataUnlock(Ptr);
if (Err) {
REPORT("Failure to unlock memory %p: %s\n", Ptr,
toString(std::move(Err)).data());
@@ -1750,9 +1723,9 @@ int32_t __tgt_rtl_data_unlock(int32_t DeviceId, void *Ptr) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_notify_mapped(int32_t DeviceId, void *HstPtr,
- int64_t Size) {
- auto Err = Plugin::get().getDevice(DeviceId).notifyDataMapped(HstPtr, Size);
+int32_t GenericPluginTy::data_notify_mapped(int32_t DeviceId, void *HstPtr,
+ int64_t Size) {
+ auto Err = getDevice(DeviceId).notifyDataMapped(HstPtr, Size);
if (Err) {
REPORT("Failure to notify data mapped %p: %s\n", HstPtr,
toString(std::move(Err)).data());
@@ -1762,8 +1735,8 @@ int32_t __tgt_rtl_data_notify_mapped(int32_t DeviceId, void *HstPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_notify_unmapped(int32_t DeviceId, void *HstPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).notifyDataUnmapped(HstPtr);
+int32_t GenericPluginTy::data_notify_unmapped(int32_t DeviceId, void *HstPtr) {
+ auto Err = getDevice(DeviceId).notifyDataUnmapped(HstPtr);
if (Err) {
REPORT("Failure to notify data unmapped %p: %s\n", HstPtr,
toString(std::move(Err)).data());
@@ -1773,17 +1746,16 @@ int32_t __tgt_rtl_data_notify_unmapped(int32_t DeviceId, void *HstPtr) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_submit(int32_t DeviceId, void *TgtPtr, void *HstPtr,
- int64_t Size) {
- return __tgt_rtl_data_submit_async(DeviceId, TgtPtr, HstPtr, Size,
- /*AsyncInfoPtr=*/nullptr);
+int32_t GenericPluginTy::data_submit(int32_t DeviceId, void *TgtPtr,
+ void *HstPtr, int64_t Size) {
+ return data_submit_async(DeviceId, TgtPtr, HstPtr, Size,
+ /*AsyncInfoPtr=*/nullptr);
}
-int32_t __tgt_rtl_data_submit_async(int32_t DeviceId, void *TgtPtr,
- void *HstPtr, int64_t Size,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).dataSubmit(TgtPtr, HstPtr, Size,
- AsyncInfoPtr);
+int32_t GenericPluginTy::data_submit_async(int32_t DeviceId, void *TgtPtr,
+ void *HstPtr, int64_t Size,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).dataSubmit(TgtPtr, HstPtr, Size, AsyncInfoPtr);
if (Err) {
REPORT("Failure to copy data from host to device. Pointers: host "
"= " DPxMOD ", device = " DPxMOD ", size = %" PRId64 ": %s\n",
@@ -1795,17 +1767,17 @@ int32_t __tgt_rtl_data_submit_async(int32_t DeviceId, void *TgtPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_retrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr,
- int64_t Size) {
- return __tgt_rtl_data_retrieve_async(DeviceId, HstPtr, TgtPtr, Size,
- /*AsyncInfoPtr=*/nullptr);
+int32_t GenericPluginTy::data_retrieve(int32_t DeviceId, void *HstPtr,
+ void *TgtPtr, int64_t Size) {
+ return data_retrieve_async(DeviceId, HstPtr, TgtPtr, Size,
+ /*AsyncInfoPtr=*/nullptr);
}
-int32_t __tgt_rtl_data_retrieve_async(int32_t DeviceId, void *HstPtr,
- void *TgtPtr, int64_t Size,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).dataRetrieve(HstPtr, TgtPtr,
- Size, AsyncInfoPtr);
+int32_t GenericPluginTy::data_retrieve_async(int32_t DeviceId, void *HstPtr,
+ void *TgtPtr, int64_t Size,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err =
+ getDevice(DeviceId).dataRetrieve(HstPtr, TgtPtr, Size, AsyncInfoPtr);
if (Err) {
REPORT("Faliure to copy data from device to host. Pointers: host "
"= " DPxMOD ", device = " DPxMOD ", size = %" PRId64 ": %s\n",
@@ -1817,20 +1789,19 @@ int32_t __tgt_rtl_data_retrieve_async(int32_t DeviceId, void *HstPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_data_exchange(int32_t SrcDeviceId, void *SrcPtr,
- int32_t DstDeviceId, void *DstPtr,
- int64_t Size) {
- return __tgt_rtl_data_exchange_async(SrcDeviceId, SrcPtr, DstDeviceId, DstPtr,
- Size,
- /*AsyncInfoPtr=*/nullptr);
+int32_t GenericPluginTy::data_exchange(int32_t SrcDeviceId, void *SrcPtr,
+ int32_t DstDeviceId, void *DstPtr,
+ int64_t Size) {
+ return data_exchange_async(SrcDeviceId, SrcPtr, DstDeviceId, DstPtr, Size,
+ /*AsyncInfoPtr=*/nullptr);
}
-int32_t __tgt_rtl_data_exchange_async(int32_t SrcDeviceId, void *SrcPtr,
- int DstDeviceId, void *DstPtr,
- int64_t Size,
- __tgt_async_info *AsyncInfo) {
- GenericDeviceTy &SrcDevice = Plugin::get().getDevice(SrcDeviceId);
- GenericDeviceTy &DstDevice = Plugin::get().getDevice(DstDeviceId);
+int32_t GenericPluginTy::data_exchange_async(int32_t SrcDeviceId, void *SrcPtr,
+ int DstDeviceId, void *DstPtr,
+ int64_t Size,
+ __tgt_async_info *AsyncInfo) {
+ GenericDeviceTy &SrcDevice = getDevice(SrcDeviceId);
+ GenericDeviceTy &DstDevice = getDevice(DstDeviceId);
auto Err = SrcDevice.dataExchange(SrcPtr, DstDevice, DstPtr, Size, AsyncInfo);
if (Err) {
REPORT("Failure to copy data from device (%d) to device (%d). Pointers: "
@@ -1843,12 +1814,12 @@ int32_t __tgt_rtl_data_exchange_async(int32_t SrcDeviceId, void *SrcPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_launch_kernel(int32_t DeviceId, void *TgtEntryPtr,
- void **TgtArgs, ptrdiff_t *TgtOffsets,
- KernelArgsTy *KernelArgs,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).launchKernel(
- TgtEntryPtr, TgtArgs, TgtOffsets, *KernelArgs, AsyncInfoPtr);
+int32_t GenericPluginTy::launch_kernel(int32_t DeviceId, void *TgtEntryPtr,
+ void **TgtArgs, ptrdiff_t *TgtOffsets,
+ KernelArgsTy *KernelArgs,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).launchKernel(TgtEntryPtr, TgtArgs, TgtOffsets,
+ *KernelArgs, AsyncInfoPtr);
if (Err) {
REPORT("Failure to run target region " DPxMOD " in device %d: %s\n",
DPxPTR(TgtEntryPtr), DeviceId, toString(std::move(Err)).data());
@@ -1858,9 +1829,9 @@ int32_t __tgt_rtl_launch_kernel(int32_t DeviceId, void *TgtEntryPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_synchronize(int32_t DeviceId,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).synchronize(AsyncInfoPtr);
+int32_t GenericPluginTy::synchronize(int32_t DeviceId,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).synchronize(AsyncInfoPtr);
if (Err) {
REPORT("Failure to synchronize stream %p: %s\n", AsyncInfoPtr->Queue,
toString(std::move(Err)).data());
@@ -1870,9 +1841,9 @@ int32_t __tgt_rtl_synchronize(int32_t DeviceId,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_query_async(int32_t DeviceId,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).queryAsync(AsyncInfoPtr);
+int32_t GenericPluginTy::query_async(int32_t DeviceId,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).queryAsync(AsyncInfoPtr);
if (Err) {
REPORT("Failure to query stream %p: %s\n", AsyncInfoPtr->Queue,
toString(std::move(Err)).data());
@@ -1882,14 +1853,14 @@ int32_t __tgt_rtl_query_async(int32_t DeviceId,
return OFFLOAD_SUCCESS;
}
-void __tgt_rtl_print_device_info(int32_t DeviceId) {
- if (auto Err = Plugin::get().getDevice(DeviceId).printInfo())
+void GenericPluginTy::print_device_info(int32_t DeviceId) {
+ if (auto Err = getDevice(DeviceId).printInfo())
REPORT("Failure to print device %d info: %s\n", DeviceId,
toString(std::move(Err)).data());
}
-int32_t __tgt_rtl_create_event(int32_t DeviceId, void **EventPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).createEvent(EventPtr);
+int32_t GenericPluginTy::create_event(int32_t DeviceId, void **EventPtr) {
+ auto Err = getDevice(DeviceId).createEvent(EventPtr);
if (Err) {
REPORT("Failure to create event: %s\n", toString(std::move(Err)).data());
return OFFLOAD_FAIL;
@@ -1898,10 +1869,9 @@ int32_t __tgt_rtl_create_event(int32_t DeviceId, void **EventPtr) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_record_event(int32_t DeviceId, void *EventPtr,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err =
- Plugin::get().getDevice(DeviceId).recordEvent(EventPtr, AsyncInfoPtr);
+int32_t GenericPluginTy::record_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).recordEvent(EventPtr, AsyncInfoPtr);
if (Err) {
REPORT("Failure to record event %p: %s\n", EventPtr,
toString(std::move(Err)).data());
@@ -1911,10 +1881,9 @@ int32_t __tgt_rtl_record_event(int32_t DeviceId, void *EventPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_wait_event(int32_t DeviceId, void *EventPtr,
- __tgt_async_info *AsyncInfoPtr) {
- auto Err =
- Plugin::get().getDevice(DeviceId).waitEvent(EventPtr, AsyncInfoPtr);
+int32_t GenericPluginTy::wait_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr) {
+ auto Err = getDevice(DeviceId).waitEvent(EventPtr, AsyncInfoPtr);
if (Err) {
REPORT("Failure to wait event %p: %s\n", EventPtr,
toString(std::move(Err)).data());
@@ -1924,8 +1893,8 @@ int32_t __tgt_rtl_wait_event(int32_t DeviceId, void *EventPtr,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_sync_event(int32_t DeviceId, void *EventPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).syncEvent(EventPtr);
+int32_t GenericPluginTy::sync_event(int32_t DeviceId, void *EventPtr) {
+ auto Err = getDevice(DeviceId).syncEvent(EventPtr);
if (Err) {
REPORT("Failure to synchronize event %p: %s\n", EventPtr,
toString(std::move(Err)).data());
@@ -1935,8 +1904,8 @@ int32_t __tgt_rtl_sync_event(int32_t DeviceId, void *EventPtr) {
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_destroy_event(int32_t DeviceId, void *EventPtr) {
- auto Err = Plugin::get().getDevice(DeviceId).destroyEvent(EventPtr);
+int32_t GenericPluginTy::destroy_event(int32_t DeviceId, void *EventPtr) {
+ auto Err = getDevice(DeviceId).destroyEvent(EventPtr);
if (Err) {
REPORT("Failure to destroy event %p: %s\n", EventPtr,
toString(std::move(Err)).data());
@@ -1946,16 +1915,16 @@ int32_t __tgt_rtl_destroy_event(int32_t DeviceId, void *EventPtr) {
return OFFLOAD_SUCCESS;
}
-void __tgt_rtl_set_info_flag(uint32_t NewInfoLevel) {
+void GenericPluginTy::set_info_flag(uint32_t NewInfoLevel) {
std::atomic<uint32_t> &InfoLevel = getInfoLevelInternal();
InfoLevel.store(NewInfoLevel);
}
-int32_t __tgt_rtl_init_async_info(int32_t DeviceId,
- __tgt_async_info **AsyncInfoPtr) {
+int32_t GenericPluginTy::init_async_info(int32_t DeviceId,
+ __tgt_async_info **AsyncInfoPtr) {
assert(AsyncInfoPtr && "Invalid async info");
- auto Err = Plugin::get().getDevice(DeviceId).initAsyncInfo(AsyncInfoPtr);
+ auto Err = getDevice(DeviceId).initAsyncInfo(AsyncInfoPtr);
if (Err) {
REPORT("Failure to initialize async info at " DPxMOD " on device %d: %s\n",
DPxPTR(*AsyncInfoPtr), DeviceId, toString(std::move(Err)).data());
@@ -1965,12 +1934,12 @@ int32_t __tgt_rtl_init_async_info(int32_t DeviceId,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_init_device_info(int32_t DeviceId,
- __tgt_device_info *DeviceInfo,
- const char **ErrStr) {
+int32_t GenericPluginTy::init_device_info(int32_t DeviceId,
+ __tgt_device_info *DeviceInfo,
+ const char **ErrStr) {
*ErrStr = "";
- auto Err = Plugin::get().getDevice(DeviceId).initDeviceInfo(DeviceInfo);
+ auto Err = getDevice(DeviceId).initDeviceInfo(DeviceInfo);
if (Err) {
REPORT("Failure to initialize device info at " DPxMOD " on device %d: %s\n",
DPxPTR(DeviceInfo), DeviceId, toString(std::move(Err)).data());
@@ -1980,31 +1949,30 @@ int32_t __tgt_rtl_init_device_info(int32_t DeviceId,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_set_device_offset(int32_t DeviceIdOffset) {
- Plugin::get().setDeviceIdStartIndex(DeviceIdOffset);
+int32_t GenericPluginTy::set_device_offset(int32_t DeviceIdOffset) {
+ setDeviceIdStartIndex(DeviceIdOffset);
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_use_auto_zero_copy(int32_t DeviceId) {
+int32_t GenericPluginTy::use_auto_zero_copy(int32_t DeviceId) {
// Automatic zero-copy only applies to programs that did
// not request unified_shared_memory and are deployed on an
// APU with XNACK enabled.
- if (Plugin::get().getRequiresFlags() & OMP_REQ_UNIFIED_SHARED_MEMORY)
+ if (getRequiresFlags() & OMP_REQ_UNIFIED_SHARED_MEMORY)
return false;
- return Plugin::get().getDevice(DeviceId).useAutoZeroCopy();
+ return getDevice(DeviceId).useAutoZeroCopy();
}
-int32_t __tgt_rtl_get_global(__tgt_device_binary Binary, uint64_t Size,
- const char *Name, void **DevicePtr) {
+int32_t GenericPluginTy::get_global(__tgt_device_binary Binary, uint64_t Size,
+ const char *Name, void **DevicePtr) {
assert(Binary.handle && "Invalid device binary handle");
DeviceImageTy &Image = *reinterpret_cast<DeviceImageTy *>(Binary.handle);
- GenericPluginTy &Plugin = Plugin::get();
GenericDeviceTy &Device = Image.getDevice();
GlobalTy DeviceGlobal(Name, Size);
- GenericGlobalHandlerTy &GHandler = Plugin.getGlobalHandler();
+ GenericGlobalHandlerTy &GHandler = getGlobalHandler();
if (auto Err =
GHandler.getGlobalMetadataFromDevice(Device, Image, DeviceGlobal)) {
REPORT("Failure to look up global address: %s\n",
@@ -2022,8 +1990,8 @@ int32_t __tgt_rtl_get_global(__tgt_device_binary Binary, uint64_t Size,
return OFFLOAD_SUCCESS;
}
-int32_t __tgt_rtl_get_function(__tgt_device_binary Binary, const char *Name,
- void **KernelPtr) {
+int32_t GenericPluginTy::get_function(__tgt_device_binary Binary,
+ const char *Name, void **KernelPtr) {
assert(Binary.handle && "Invalid device binary handle");
DeviceImageTy &Image = *reinterpret_cast<DeviceImageTy *>(Binary.handle);
@@ -2046,6 +2014,212 @@ int32_t __tgt_rtl_get_function(__tgt_device_binary Binary, const char *Name,
return OFFLOAD_SUCCESS;
}
+bool llvm::omp::target::plugin::libomptargetSupportsRPC() {
+#ifdef LIBOMPTARGET_RPC_SUPPORT
+ return true;
+#else
+ return false;
+#endif
+}
+
+/// Exposed library API function, basically wrappers around the GenericDeviceTy
+/// functionality with the same name. All non-async functions are redirected
+/// to the async versions right away with a NULL AsyncInfoPtr.
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int32_t __tgt_rtl_init_plugin() {
+ auto Err = PluginTy::initIfNeeded();
+ if (Err) {
+ [[maybe_unused]] std::string ErrStr = toString(std::move(Err));
+ DP("Failed to init plugin: %s", ErrStr.c_str());
+ return OFFLOAD_FAIL;
+ }
+
+ return OFFLOAD_SUCCESS;
+}
+
+int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *Image) {
+ if (!PluginTy::isActive())
+ return false;
+
+ return PluginTy::get().is_valid_binary(Image);
+}
+
+int32_t __tgt_rtl_init_device(int32_t DeviceId) {
+ return PluginTy::get().init_device(DeviceId);
+}
+
+int32_t __tgt_rtl_number_of_devices() {
+ return PluginTy::get().number_of_devices();
+}
+
+int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
+ return PluginTy::get().init_requires(RequiresFlags);
+}
+
+int32_t __tgt_rtl_is_data_exchangable(int32_t SrcDeviceId,
+ int32_t DstDeviceId) {
+ return PluginTy::get().is_data_exchangable(SrcDeviceId, DstDeviceId);
+}
+
+int32_t __tgt_rtl_initialize_record_replay(int32_t DeviceId, int64_t MemorySize,
+ void *VAddr, bool isRecord,
+ bool SaveOutput,
+ uint64_t &ReqPtrArgOffset) {
+ return PluginTy::get().initialize_record_replay(
+ DeviceId, MemorySize, VAddr, isRecord, SaveOutput, ReqPtrArgOffset);
+}
+
+int32_t __tgt_rtl_load_binary(int32_t DeviceId, __tgt_device_image *TgtImage,
+ __tgt_device_binary *Binary) {
+ return PluginTy::get().load_binary(DeviceId, TgtImage, Binary);
+}
+
+void *__tgt_rtl_data_alloc(int32_t DeviceId, int64_t Size, void *HostPtr,
+ int32_t Kind) {
+ return PluginTy::get().data_alloc(DeviceId, Size, HostPtr, Kind);
+}
+
+int32_t __tgt_rtl_data_delete(int32_t DeviceId, void *TgtPtr, int32_t Kind) {
+ return PluginTy::get().data_delete(DeviceId, TgtPtr, Kind);
+}
+
+int32_t __tgt_rtl_data_lock(int32_t DeviceId, void *Ptr, int64_t Size,
+ void **LockedPtr) {
+ return PluginTy::get().data_lock(DeviceId, Ptr, Size, LockedPtr);
+}
+
+int32_t __tgt_rtl_data_unlock(int32_t DeviceId, void *Ptr) {
+ return PluginTy::get().data_unlock(DeviceId, Ptr);
+}
+
+int32_t __tgt_rtl_data_notify_mapped(int32_t DeviceId, void *HstPtr,
+ int64_t Size) {
+ return PluginTy::get().data_notify_mapped(DeviceId, HstPtr, Size);
+}
+
+int32_t __tgt_rtl_data_notify_unmapped(int32_t DeviceId, void *HstPtr) {
+ return PluginTy::get().data_notify_unmapped(DeviceId, HstPtr);
+}
+
+int32_t __tgt_rtl_data_submit(int32_t DeviceId, void *TgtPtr, void *HstPtr,
+ int64_t Size) {
+ return PluginTy::get().data_submit(DeviceId, TgtPtr, HstPtr, Size);
+}
+
+int32_t __tgt_rtl_data_submit_async(int32_t DeviceId, void *TgtPtr,
+ void *HstPtr, int64_t Size,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().data_submit_async(DeviceId, TgtPtr, HstPtr, Size,
+ AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_data_retrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr,
+ int64_t Size) {
+ return PluginTy::get().data_retrieve(DeviceId, HstPtr, TgtPtr, Size);
+}
+
+int32_t __tgt_rtl_data_retrieve_async(int32_t DeviceId, void *HstPtr,
+ void *TgtPtr, int64_t Size,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().data_retrieve_async(DeviceId, HstPtr, TgtPtr, Size,
+ AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_data_exchange(int32_t SrcDeviceId, void *SrcPtr,
+ int32_t DstDeviceId, void *DstPtr,
+ int64_t Size) {
+ return PluginTy::get().data_exchange(SrcDeviceId, SrcPtr, DstDeviceId, DstPtr,
+ Size);
+}
+
+int32_t __tgt_rtl_data_exchange_async(int32_t SrcDeviceId, void *SrcPtr,
+ int DstDeviceId, void *DstPtr,
+ int64_t Size,
+ __tgt_async_info *AsyncInfo) {
+ return PluginTy::get().data_exchange_async(SrcDeviceId, SrcPtr, DstDeviceId,
+ DstPtr, Size, AsyncInfo);
+}
+
+int32_t __tgt_rtl_launch_kernel(int32_t DeviceId, void *TgtEntryPtr,
+ void **TgtArgs, ptrdiff_t *TgtOffsets,
+ KernelArgsTy *KernelArgs,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().launch_kernel(DeviceId, TgtEntryPtr, TgtArgs,
+ TgtOffsets, KernelArgs, AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_synchronize(int32_t DeviceId,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().synchronize(DeviceId, AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_query_async(int32_t DeviceId,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().query_async(DeviceId, AsyncInfoPtr);
+}
+
+void __tgt_rtl_print_device_info(int32_t DeviceId) {
+ PluginTy::get().print_device_info(DeviceId);
+}
+
+int32_t __tgt_rtl_create_event(int32_t DeviceId, void **EventPtr) {
+ return PluginTy::get().create_event(DeviceId, EventPtr);
+}
+
+int32_t __tgt_rtl_record_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().record_event(DeviceId, EventPtr, AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_wait_event(int32_t DeviceId, void *EventPtr,
+ __tgt_async_info *AsyncInfoPtr) {
+ return PluginTy::get().wait_event(DeviceId, EventPtr, AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_sync_event(int32_t DeviceId, void *EventPtr) {
+ return PluginTy::get().sync_event(DeviceId, EventPtr);
+}
+
+int32_t __tgt_rtl_destroy_event(int32_t DeviceId, void *EventPtr) {
+ return PluginTy::get().destroy_event(DeviceId, EventPtr);
+}
+
+void __tgt_rtl_set_info_flag(uint32_t NewInfoLevel) {
+ return PluginTy::get().set_info_flag(NewInfoLevel);
+}
+
+int32_t __tgt_rtl_init_async_info(int32_t DeviceId,
+ __tgt_async_info **AsyncInfoPtr) {
+ return PluginTy::get().init_async_info(DeviceId, AsyncInfoPtr);
+}
+
+int32_t __tgt_rtl_init_device_info(int32_t DeviceId,
+ __tgt_device_info *DeviceInfo,
+ const char **ErrStr) {
+ return PluginTy::get().init_device_info(DeviceId, DeviceInfo, ErrStr);
+}
+
+int32_t __tgt_rtl_set_device_offset(int32_t DeviceIdOffset) {
+ return PluginTy::get().set_device_offset(DeviceIdOffset);
+}
+
+int32_t __tgt_rtl_use_auto_zero_copy(int32_t DeviceId) {
+ return PluginTy::get().use_auto_zero_copy(DeviceId);
+}
+
+int32_t __tgt_rtl_get_global(__tgt_device_binary Binary, uint64_t Size,
+ const char *Name, void **DevicePtr) {
+ return PluginTy::get().get_global(Binary, Size, Name, DevicePtr);
+}
+
+int32_t __tgt_rtl_get_function(__tgt_device_binary Binary, const char *Name,
+ void **KernelPtr) {
+ return PluginTy::get().get_function(Binary, Name, KernelPtr);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp b/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
index f46b27701b5b..fab0f6838f4a 100644
--- a/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
+++ b/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
@@ -21,14 +21,6 @@ using namespace llvm;
using namespace omp;
using namespace target;
-RPCServerTy::RPCServerTy(uint32_t NumDevices) {
-#ifdef LIBOMPTARGET_RPC_SUPPORT
- // If this fails then something is catastrophically wrong, just exit.
- if (rpc_status_t Err = rpc_init(NumDevices))
- FATAL_MESSAGE(1, "Error initializing the RPC server: %d\n", Err);
-#endif
-}
-
llvm::Expected<bool>
RPCServerTy::isDeviceUsingRPC(plugin::GenericDeviceTy &Device,
plugin::GenericGlobalHandlerTy &Handler,
@@ -44,7 +36,6 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
plugin::GenericGlobalHandlerTy &Handler,
plugin::DeviceImageTy &Image) {
#ifdef LIBOMPTARGET_RPC_SUPPORT
- uint32_t DeviceId = Device.getDeviceId();
auto Alloc = [](uint64_t Size, void *Data) {
plugin::GenericDeviceTy &Device =
*reinterpret_cast<plugin::GenericDeviceTy *>(Data);
@@ -52,10 +43,12 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
};
uint64_t NumPorts =
std::min(Device.requestedRPCPortCount(), RPC_MAXIMUM_PORT_COUNT);
- if (rpc_status_t Err = rpc_server_init(DeviceId, NumPorts,
+ rpc_device_t RPCDevice;
+ if (rpc_status_t Err = rpc_server_init(&RPCDevice, NumPorts,
Device.getWarpSize(), Alloc, &Device))
return plugin::Plugin::error(
- "Failed to initialize RPC server for device %d: %d", DeviceId, Err);
+ "Failed to initialize RPC server for device %d: %d",
+ Device.getDeviceId(), Err);
// Register a custom opcode handler to perform plugin specific allocation.
auto MallocHandler = [](rpc_port_t Port, void *Data) {
@@ -70,10 +63,10 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
Data);
};
if (rpc_status_t Err =
- rpc_register_callback(DeviceId, RPC_MALLOC, MallocHandler, &Device))
+ rpc_register_callback(RPCDevice, RPC_MALLOC, MallocHandler, &Device))
return plugin::Plugin::error(
- "Failed to register RPC malloc handler for device %d: %d\n", DeviceId,
- Err);
+ "Failed to register RPC malloc handler for device %d: %d\n",
+ Device.getDeviceId(), Err);
// Register a custom opcode handler to perform plugin specific deallocation.
auto FreeHandler = [](rpc_port_t Port, void *Data) {
@@ -88,10 +81,10 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
Data);
};
if (rpc_status_t Err =
- rpc_register_callback(DeviceId, RPC_FREE, FreeHandler, &Device))
+ rpc_register_callback(RPCDevice, RPC_FREE, FreeHandler, &Device))
return plugin::Plugin::error(
- "Failed to register RPC free handler for device %d: %d\n", DeviceId,
- Err);
+ "Failed to register RPC free handler for device %d: %d\n",
+ Device.getDeviceId(), Err);
// Get the address of the RPC client from the device.
void *ClientPtr;
@@ -104,17 +97,20 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device,
sizeof(void *), nullptr))
return Err;
- const void *ClientBuffer = rpc_get_client_buffer(DeviceId);
+ const void *ClientBuffer = rpc_get_client_buffer(RPCDevice);
if (auto Err = Device.dataSubmit(ClientPtr, ClientBuffer,
rpc_get_client_size(), nullptr))
return Err;
+ Handles.resize(Device.getDeviceId() + 1);
+ Handles[Device.getDeviceId()] = RPCDevice.handle;
#endif
return Error::success();
}
Error RPCServerTy::runServer(plugin::GenericDeviceTy &Device) {
#ifdef LIBOMPTARGET_RPC_SUPPORT
- if (rpc_status_t Err = rpc_handle_server(Device.getDeviceId()))
+ rpc_device_t RPCDevice{Handles[Device.getDeviceId()]};
+ if (rpc_status_t Err = rpc_handle_server(RPCDevice))
return plugin::Plugin::error(
"Error while running RPC server on device %d: %d", Device.getDeviceId(),
Err);
@@ -124,22 +120,16 @@ Error RPCServerTy::runServer(plugin::GenericDeviceTy &Device) {
Error RPCServerTy::deinitDevice(plugin::GenericDeviceTy &Device) {
#ifdef LIBOMPTARGET_RPC_SUPPORT
+ rpc_device_t RPCDevice{Handles[Device.getDeviceId()]};
auto Dealloc = [](void *Ptr, void *Data) {
plugin::GenericDeviceTy &Device =
*reinterpret_cast<plugin::GenericDeviceTy *>(Data);
Device.free(Ptr, TARGET_ALLOC_HOST);
};
- if (rpc_status_t Err =
- rpc_server_shutdown(Device.getDeviceId(), Dealloc, &Device))
+ if (rpc_status_t Err = rpc_server_shutdown(RPCDevice, Dealloc, &Device))
return plugin::Plugin::error(
"Failed to shut down RPC server for device %d: %d",
Device.getDeviceId(), Err);
#endif
return Error::success();
}
-
-RPCServerTy::~RPCServerTy() {
-#ifdef LIBOMPTARGET_RPC_SUPPORT
- rpc_shutdown();
-#endif
-}
diff --git a/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
index 2bfb47168a7f..b3530462aa19 100644
--- a/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/cuda/CMakeLists.txt
@@ -23,34 +23,12 @@ endif()
libomptarget_say("Building CUDA NextGen offloading plugin.")
-set(LIBOMPTARGET_DLOPEN_LIBCUDA OFF)
-option(LIBOMPTARGET_FORCE_DLOPEN_LIBCUDA "Build with dlopened libcuda" ${LIBOMPTARGET_DLOPEN_LIBCUDA})
-
-add_llvm_library(omptarget.rtl.cuda SHARED
- src/rtl.cpp
-
- LINK_COMPONENTS
- Support
- Object
-
- LINK_LIBS PRIVATE
- PluginCommon
- ${OPENMP_PTHREAD_LIB}
-
- NO_INSTALL_RPATH
- BUILDTREE_ONLY
-)
-
-if ((OMPT_TARGET_DEFAULT) AND (LIBOMPTARGET_OMPT_SUPPORT))
- target_link_libraries(omptarget.rtl.cuda PRIVATE OMPT)
-endif()
-
-if (LIBOMP_HAVE_VERSION_SCRIPT_FLAG)
- target_link_libraries(omptarget.rtl.cuda PRIVATE
- "-Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/../exports,-z,defs")
-endif()
+# Create the library and add the default arguments.
+add_target_library(omptarget.rtl.cuda CUDA)
+target_sources(omptarget.rtl.cuda PRIVATE src/rtl.cpp)
+option(LIBOMPTARGET_FORCE_DLOPEN_LIBCUDA "Build with dlopened libcuda" OFF)
if(LIBOMPTARGET_DEP_CUDA_FOUND AND NOT LIBOMPTARGET_FORCE_DLOPEN_LIBCUDA)
libomptarget_say("Building CUDA plugin linked against libcuda")
target_link_libraries(omptarget.rtl.cuda PRIVATE CUDA::cuda_driver)
@@ -60,13 +38,6 @@ else()
target_sources(omptarget.rtl.cuda PRIVATE dynamic_cuda/cuda.cpp)
endif()
-# Define debug prefix. TODO: This should be automatized in the Debug.h but it
-# requires changing the original plugins.
-target_compile_definitions(omptarget.rtl.cuda PRIVATE TARGET_NAME="CUDA")
-target_compile_definitions(omptarget.rtl.cuda PRIVATE DEBUG_PREFIX="TARGET CUDA RTL")
-
-target_include_directories(omptarget.rtl.cuda PRIVATE ${LIBOMPTARGET_INCLUDE_DIR})
-
# Configure testing for the CUDA plugin. We will build tests if we could a
# functional NVIDIA GPU on the system, or if manually specifies by the user.
option(LIBOMPTARGET_FORCE_NVIDIA_TESTS "Build NVIDIA libomptarget tests" OFF)
@@ -84,5 +55,4 @@ endif()
# Install plugin under the lib destination folder.
install(TARGETS omptarget.rtl.cuda LIBRARY DESTINATION "${OPENMP_INSTALL_LIBDIR}")
set_target_properties(omptarget.rtl.cuda PROPERTIES
- INSTALL_RPATH "$ORIGIN" BUILD_RPATH "$ORIGIN:${CMAKE_CURRENT_BINARY_DIR}/.."
- CXX_VISIBILITY_PRESET protected)
+ INSTALL_RPATH "$ORIGIN" BUILD_RPATH "$ORIGIN:${CMAKE_CURRENT_BINARY_DIR}/..")
diff --git a/openmp/libomptarget/plugins-nextgen/cuda/src/rtl.cpp b/openmp/libomptarget/plugins-nextgen/cuda/src/rtl.cpp
index b862bc749092..fc74c6aa23fd 100644
--- a/openmp/libomptarget/plugins-nextgen/cuda/src/rtl.cpp
+++ b/openmp/libomptarget/plugins-nextgen/cuda/src/rtl.cpp
@@ -255,8 +255,8 @@ private:
/// generic device class.
struct CUDADeviceTy : public GenericDeviceTy {
// Create a CUDA device with a device id and the default CUDA grid values.
- CUDADeviceTy(int32_t DeviceId, int32_t NumDevices)
- : GenericDeviceTy(DeviceId, NumDevices, NVPTXGridValues),
+ CUDADeviceTy(GenericPluginTy &Plugin, int32_t DeviceId, int32_t NumDevices)
+ : GenericDeviceTy(Plugin, DeviceId, NumDevices, NVPTXGridValues),
CUDAStreamManager(*this), CUDAEventManager(*this) {}
~CUDADeviceTy() {}
@@ -471,7 +471,7 @@ struct CUDADeviceTy : public GenericDeviceTy {
/// Allocate and construct a CUDA kernel.
Expected<GenericKernelTy &> constructKernel(const char *Name) override {
// Allocate and construct the CUDA kernel.
- CUDAKernelTy *CUDAKernel = Plugin::get().allocate<CUDAKernelTy>();
+ CUDAKernelTy *CUDAKernel = Plugin.allocate<CUDAKernelTy>();
if (!CUDAKernel)
return Plugin::error("Failed to allocate memory for CUDA kernel");
@@ -529,7 +529,7 @@ struct CUDADeviceTy : public GenericDeviceTy {
return std::move(Err);
// Allocate and initialize the image object.
- CUDADeviceImageTy *CUDAImage = Plugin::get().allocate<CUDADeviceImageTy>();
+ CUDADeviceImageTy *CUDAImage = Plugin.allocate<CUDADeviceImageTy>();
new (CUDAImage) CUDADeviceImageTy(ImageId, *this, TgtImage);
// Load the CUDA module.
@@ -1371,6 +1371,17 @@ struct CUDAPluginTy final : public GenericPluginTy {
/// Deinitialize the plugin.
Error deinitImpl() override { return Plugin::success(); }
+ /// Creates a CUDA device to use for offloading.
+ GenericDeviceTy *createDevice(GenericPluginTy &Plugin, int32_t DeviceId,
+ int32_t NumDevices) override {
+ return new CUDADeviceTy(Plugin, DeviceId, NumDevices);
+ }
+
+ /// Creates a CUDA global handler.
+ GenericGlobalHandlerTy *createGlobalHandler() override {
+ return new CUDAGlobalHandlerTy();
+ }
+
/// Get the ELF code for recognizing the compatible image binary.
uint16_t getMagicElfBits() const override { return ELF::EM_CUDA; }
@@ -1484,18 +1495,10 @@ Error CUDADeviceTy::dataExchangeImpl(const void *SrcPtr,
return Plugin::check(Res, "Error in cuMemcpyDtoDAsync: %s");
}
-GenericPluginTy *Plugin::createPlugin() { return new CUDAPluginTy(); }
-
-GenericDeviceTy *Plugin::createDevice(int32_t DeviceId, int32_t NumDevices) {
- return new CUDADeviceTy(DeviceId, NumDevices);
-}
-
-GenericGlobalHandlerTy *Plugin::createGlobalHandler() {
- return new CUDAGlobalHandlerTy();
-}
+GenericPluginTy *PluginTy::createPlugin() { return new CUDAPluginTy(); }
template <typename... ArgsTy>
-Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) {
+static Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) {
CUresult ResultCode = static_cast<CUresult>(Code);
if (ResultCode == CUDA_SUCCESS)
return Error::success();
diff --git a/openmp/libomptarget/plugins-nextgen/host/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/host/CMakeLists.txt
index 5ccb20e305e8..ccbf7d033fd6 100644
--- a/openmp/libomptarget/plugins-nextgen/host/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/host/CMakeLists.txt
@@ -2,11 +2,6 @@ if(NOT CMAKE_SYSTEM_NAME MATCHES "Linux")
return()
endif()
- # build_generic_elf64("s390x" "S390X" "s390x" "systemz" "s390x-ibm-linux-gnu" "22")
- # build_generic_elf64("aarch64" "aarch64" "aarch64" "aarch64" "aarch64-unknown-linux-gnu" "183")
- # build_generic_elf64("ppc64" "PPC64" "ppc64" "ppc64" "powerpc64-ibm-linux-gnu" "21")
- # build_generic_elf64("x86_64" "x86_64" "x86_64" "x86_64" "x86_64-pc-linux-gnu" "62")
- # build_generic_elf64("ppc64le" "PPC64le" "ppc64" "ppc64le" "powerpc64le-ibm-linux-gnu" "21")
set(supported_targets x86_64 aarch64 ppc64 ppc64le s390x)
if(NOT ${CMAKE_SYSTEM_PROCESSOR} IN_LIST supported_targets)
libomptarget_say("Not building ${machine} NextGen offloading plugin")
@@ -18,16 +13,10 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le$")
set(machine ppc64)
endif()
-add_llvm_library(omptarget.rtl.${machine} SHARED
- src/rtl.cpp
- ADDITIONAL_HEADER_DIRS
- ${LIBOMPTARGET_INCLUDE_DIR}
- LINK_LIBS PRIVATE
- PluginCommon
- ${OPENMP_PTHREAD_LIB}
- NO_INSTALL_RPATH
- BUILDTREE_ONLY
-)
+# Create the library and add the default arguments.
+add_target_library(omptarget.rtl.${machine} ${machine})
+
+target_sources(omptarget.rtl.${machine} PRIVATE src/rtl.cpp)
if(LIBOMPTARGET_DEP_LIBFFI_FOUND)
libomptarget_say("Building ${machine} plugin linked with libffi")
@@ -42,15 +31,6 @@ else()
target_include_directories(omptarget.rtl.${machine} PRIVATE dynamic_ffi)
endif()
-if(OMPT_TARGET_DEFAULT AND LIBOMPTARGET_OMPT_SUPPORT)
- target_link_libraries(omptarget.rtl.${machine} PRIVATE OMPT)
-endif()
-
-if(LIBOMP_HAVE_VERSION_SCRIPT_FLAG)
- target_link_libraries(omptarget.rtl.${machine} PRIVATE
- "-Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/../exports")
-endif()
-
# Install plugin under the lib destination folder.
install(TARGETS omptarget.rtl.${machine}
LIBRARY DESTINATION "${OPENMP_INSTALL_LIBDIR}")
@@ -70,12 +50,6 @@ else()
libomptarget_say("Not generating ${tmachine_name} tests. LibFFI not found.")
endif()
-# Define macro to be used as prefix of the runtime messages for this target.
-target_compile_definitions(omptarget.rtl.${machine} PRIVATE TARGET_NAME=${machine})
-# TODO: This should be automatized in Debug.h.
-target_compile_definitions(omptarget.rtl.${machine} PRIVATE
- DEBUG_PREFIX="TARGET ${machine} RTL")
-
# Define the target specific triples and ELF machine values.
if(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le$" OR
CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64$")
@@ -100,7 +74,7 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64$")
"aarch64-unknown-linux-gnu" "aarch64-unknown-linux-gnu-LTO")
set(LIBOMPTARGET_SYSTEM_TARGETS "${LIBOMPTARGET_SYSTEM_TARGETS}" PARENT_SCOPE)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "s390x$")
- target_compile_definitions(omptarget.rtl.${machine} TARGET_ELF_ID=EM_S390)
+ target_compile_definitions(omptarget.rtl.${machine} PRIVATE TARGET_ELF_ID=EM_S390)
target_compile_definitions(omptarget.rtl.${machine} PRIVATE
LIBOMPTARGET_NEXTGEN_GENERIC_PLUGIN_TRIPLE="s390x-ibm-linux-gnu")
list(APPEND LIBOMPTARGET_SYSTEM_TARGETS
diff --git a/openmp/libomptarget/plugins-nextgen/host/dynamic_ffi/ffi.cpp b/openmp/libomptarget/plugins-nextgen/host/dynamic_ffi/ffi.cpp
index c79daa798581..c586ad1c1969 100644
--- a/openmp/libomptarget/plugins-nextgen/host/dynamic_ffi/ffi.cpp
+++ b/openmp/libomptarget/plugins-nextgen/host/dynamic_ffi/ffi.cpp
@@ -11,6 +11,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/DynamicLibrary.h"
+
+#include "Shared/Debug.h"
#include <memory>
#include "DLWrap.h"
@@ -37,15 +39,21 @@ uint32_t ffi_init() {
std::string ErrMsg;
auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
llvm::sys::DynamicLibrary::getPermanentLibrary(FFI_PATH, &ErrMsg));
- if (!DynlibHandle->isValid())
+
+ if (!DynlibHandle->isValid()) {
+ DP("Unable to load library '%s': %s!\n", FFI_PATH, ErrMsg.c_str());
return DYNAMIC_FFI_FAIL;
+ }
for (size_t I = 0; I < dlwrap::size(); I++) {
const char *Sym = dlwrap::symbol(I);
void *P = DynlibHandle->getAddressOfSymbol(Sym);
- if (P == nullptr)
+ if (P == nullptr) {
+ DP("Unable to find '%s' in '%s'!\n", Sym, FFI_PATH);
return DYNAMIC_FFI_FAIL;
+ }
+ DP("Implementing %s with dlsym(%s) -> %p\n", Sym, Sym, P);
*dlwrap::pointer(I) = P;
}
@@ -53,8 +61,10 @@ uint32_t ffi_init() {
#define DYNAMIC_INIT(SYMBOL) \
{ \
void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
- if (!SymbolPtr) \
+ if (!SymbolPtr) { \
+ DP("Unable to find '%s' in '%s'!\n", #SYMBOL, FFI_PATH); \
return DYNAMIC_FFI_FAIL; \
+ } \
SYMBOL = *reinterpret_cast<decltype(SYMBOL) *>(SymbolPtr); \
}
DYNAMIC_INIT(ffi_type_void);
diff --git a/openmp/libomptarget/plugins-nextgen/host/src/rtl.cpp b/openmp/libomptarget/plugins-nextgen/host/src/rtl.cpp
index 1ef18814a26a..f0ce24249301 100644
--- a/openmp/libomptarget/plugins-nextgen/host/src/rtl.cpp
+++ b/openmp/libomptarget/plugins-nextgen/host/src/rtl.cpp
@@ -66,7 +66,7 @@ struct GenELF64KernelTy : public GenericKernelTy {
GlobalTy Global(getName(), 0);
// Get the metadata (address) of the kernel function.
- GenericGlobalHandlerTy &GHandler = Plugin::get().getGlobalHandler();
+ GenericGlobalHandlerTy &GHandler = Device.Plugin.getGlobalHandler();
if (auto Err = GHandler.getGlobalMetadataFromDevice(Device, Image, Global))
return Err;
@@ -132,8 +132,9 @@ private:
/// Class implementing the device functionalities for GenELF64.
struct GenELF64DeviceTy : public GenericDeviceTy {
/// Create the device with a specific id.
- GenELF64DeviceTy(int32_t DeviceId, int32_t NumDevices)
- : GenericDeviceTy(DeviceId, NumDevices, GenELF64GridValues) {}
+ GenELF64DeviceTy(GenericPluginTy &Plugin, int32_t DeviceId,
+ int32_t NumDevices)
+ : GenericDeviceTy(Plugin, DeviceId, NumDevices, GenELF64GridValues) {}
~GenELF64DeviceTy() {}
@@ -149,8 +150,7 @@ struct GenELF64DeviceTy : public GenericDeviceTy {
/// Construct the kernel for a specific image on the device.
Expected<GenericKernelTy &> constructKernel(const char *Name) override {
// Allocate and construct the kernel.
- GenELF64KernelTy *GenELF64Kernel =
- Plugin::get().allocate<GenELF64KernelTy>();
+ GenELF64KernelTy *GenELF64Kernel = Plugin.allocate<GenELF64KernelTy>();
if (!GenELF64Kernel)
return Plugin::error("Failed to allocate memory for GenELF64 kernel");
@@ -166,8 +166,7 @@ struct GenELF64DeviceTy : public GenericDeviceTy {
Expected<DeviceImageTy *> loadBinaryImpl(const __tgt_device_image *TgtImage,
int32_t ImageId) override {
// Allocate and initialize the image object.
- GenELF64DeviceImageTy *Image =
- Plugin::get().allocate<GenELF64DeviceImageTy>();
+ GenELF64DeviceImageTy *Image = Plugin.allocate<GenELF64DeviceImageTy>();
new (Image) GenELF64DeviceImageTy(ImageId, *this, TgtImage);
// Create a temporary file.
@@ -399,6 +398,17 @@ struct GenELF64PluginTy final : public GenericPluginTy {
/// Deinitialize the plugin.
Error deinitImpl() override { return Plugin::success(); }
+ /// Creates a generic ELF device.
+ GenericDeviceTy *createDevice(GenericPluginTy &Plugin, int32_t DeviceId,
+ int32_t NumDevices) override {
+ return new GenELF64DeviceTy(Plugin, DeviceId, NumDevices);
+ }
+
+ /// Creates a generic global handler.
+ GenericGlobalHandlerTy *createGlobalHandler() override {
+ return new GenELF64GlobalHandlerTy();
+ }
+
/// Get the ELF code to recognize the compatible binary images.
uint16_t getMagicElfBits() const override { return ELF::TARGET_ELF_ID; }
@@ -415,18 +425,10 @@ struct GenELF64PluginTy final : public GenericPluginTy {
}
};
-GenericPluginTy *Plugin::createPlugin() { return new GenELF64PluginTy(); }
-
-GenericDeviceTy *Plugin::createDevice(int32_t DeviceId, int32_t NumDevices) {
- return new GenELF64DeviceTy(DeviceId, NumDevices);
-}
-
-GenericGlobalHandlerTy *Plugin::createGlobalHandler() {
- return new GenELF64GlobalHandlerTy();
-}
+GenericPluginTy *PluginTy::createPlugin() { return new GenELF64PluginTy(); }
template <typename... ArgsTy>
-Error Plugin::check(int32_t Code, const char *ErrMsg, ArgsTy... Args) {
+static Error Plugin::check(int32_t Code, const char *ErrMsg, ArgsTy... Args) {
if (Code == 0)
return Error::success();
diff --git a/openmp/libomptarget/src/PluginManager.cpp b/openmp/libomptarget/src/PluginManager.cpp
index 928913275332..792cae3e3dd5 100644
--- a/openmp/libomptarget/src/PluginManager.cpp
+++ b/openmp/libomptarget/src/PluginManager.cpp
@@ -78,7 +78,7 @@ Error PluginAdaptorTy::init() {
}
// No devices are supported by this RTL?
- NumberOfPluginDevices = number_of_devices();
+ int32_t NumberOfPluginDevices = number_of_devices();
if (!NumberOfPluginDevices) {
return createStringError(inconvertibleErrorCode(),
"No devices supported in this RTL\n");
@@ -110,32 +110,33 @@ void PluginManager::init() {
DP("RTLs loaded!\n");
}
-void PluginAdaptorTy::initDevices(PluginManager &PM) {
- if (isUsed())
+void PluginManager::initDevices(PluginAdaptorTy &RTL) {
+ // If this RTL has already been initialized.
+ if (PM->DeviceOffsets.contains(&RTL))
return;
TIMESCOPE();
// If this RTL is not already in use, initialize it.
- assert(getNumberOfPluginDevices() > 0 &&
+ assert(RTL.number_of_devices() > 0 &&
"Tried to initialize useless plugin adaptor");
// Initialize the device information for the RTL we are about to use.
- auto ExclusiveDevicesAccessor = PM.getExclusiveDevicesAccessor();
+ auto ExclusiveDevicesAccessor = getExclusiveDevicesAccessor();
// Initialize the index of this RTL and save it in the used RTLs.
- DeviceOffset = ExclusiveDevicesAccessor->size();
+ int32_t DeviceOffset = ExclusiveDevicesAccessor->size();
- // If possible, set the device identifier offset in the plugin.
- if (set_device_offset)
- set_device_offset(DeviceOffset);
+ // Set the device identifier offset in the plugin.
+ RTL.set_device_offset(DeviceOffset);
- int32_t NumPD = getNumberOfPluginDevices();
+ int32_t NumberOfUserDevices = 0;
+ int32_t NumPD = RTL.number_of_devices();
ExclusiveDevicesAccessor->reserve(DeviceOffset + NumPD);
// Auto zero-copy is a per-device property. We need to ensure
// that all devices are suggesting to use it.
bool UseAutoZeroCopy = !(NumPD == 0);
for (int32_t PDevI = 0, UserDevId = DeviceOffset; PDevI < NumPD; PDevI++) {
- auto Device = std::make_unique<DeviceTy>(this, UserDevId, PDevI);
+ auto Device = std::make_unique<DeviceTy>(&RTL, UserDevId, PDevI);
if (auto Err = Device->init()) {
DP("Skip plugin known device %d: %s\n", PDevI,
toString(std::move(Err)).c_str());
@@ -153,20 +154,23 @@ void PluginAdaptorTy::initDevices(PluginManager &PM) {
// If all devices suggest to use it, change requirment flags to trigger
// zero-copy behavior when mapping memory.
if (UseAutoZeroCopy)
- PM.addRequirements(OMPX_REQ_AUTO_ZERO_COPY);
+ addRequirements(OMPX_REQ_AUTO_ZERO_COPY);
+ DeviceOffsets[&RTL] = DeviceOffset;
+ DeviceUsed[&RTL] = NumberOfUserDevices;
DP("Plugin adaptor " DPxMOD " has index %d, exposes %d out of %d devices!\n",
- DPxPTR(LibraryHandler.get()), DeviceOffset, NumberOfUserDevices,
- NumberOfPluginDevices);
+ DPxPTR(RTL.LibraryHandler.get()), DeviceOffset, NumberOfUserDevices,
+ RTL.number_of_devices());
}
void PluginManager::initAllPlugins() {
for (auto &R : PluginAdaptors)
- R->initDevices(*this);
+ initDevices(*R);
}
static void registerImageIntoTranslationTable(TranslationTable &TT,
- PluginAdaptorTy &RTL,
+ int32_t DeviceOffset,
+ int32_t NumberOfUserDevices,
__tgt_device_image *Image) {
// same size, as when we increase one, we also increase the other.
@@ -175,8 +179,7 @@ static void registerImageIntoTranslationTable(TranslationTable &TT,
// Resize the Targets Table and Images to accommodate the new targets if
// required
- unsigned TargetsTableMinimumSize =
- RTL.DeviceOffset + RTL.getNumberOfUserDevices();
+ unsigned TargetsTableMinimumSize = DeviceOffset + NumberOfUserDevices;
if (TT.TargetsTable.size() < TargetsTableMinimumSize) {
TT.DeviceTables.resize(TargetsTableMinimumSize, {});
@@ -186,11 +189,11 @@ static void registerImageIntoTranslationTable(TranslationTable &TT,
}
// Register the image in all devices for this target type.
- for (int32_t I = 0; I < RTL.getNumberOfUserDevices(); ++I) {
+ for (int32_t I = 0; I < NumberOfUserDevices; ++I) {
// If we are changing the image we are also invalidating the target table.
- if (TT.TargetsImages[RTL.DeviceOffset + I] != Image) {
- TT.TargetsImages[RTL.DeviceOffset + I] = Image;
- TT.TargetsTable[RTL.DeviceOffset + I] =
+ if (TT.TargetsImages[DeviceOffset + I] != Image) {
+ TT.TargetsImages[DeviceOffset + I] = Image;
+ TT.TargetsTable[DeviceOffset + I] =
0; // lazy initialization of target table.
}
}
@@ -228,7 +231,7 @@ void PluginManager::registerLib(__tgt_bin_desc *Desc) {
DP("Image " DPxMOD " is compatible with RTL %s!\n",
DPxPTR(Img->ImageStart), R.Name.c_str());
- R.initDevices(*this);
+ PM->initDevices(R);
// Initialize (if necessary) translation table for this library.
PM->TrlTblMtx.lock();
@@ -246,8 +249,10 @@ void PluginManager::registerLib(__tgt_bin_desc *Desc) {
DP("Registering image " DPxMOD " with RTL %s!\n", DPxPTR(Img->ImageStart),
R.Name.c_str());
- registerImageIntoTranslationTable(TransTable, R, Img);
- R.UsedImages.insert(Img);
+
+ registerImageIntoTranslationTable(TransTable, PM->DeviceOffsets[&R],
+ PM->DeviceUsed[&R], Img);
+ PM->UsedImages.insert(Img);
PM->TrlTblMtx.unlock();
FoundRTL = &R;
@@ -283,11 +288,11 @@ void PluginManager::unregisterLib(__tgt_bin_desc *Desc) {
// Scan the RTLs that have associated images until we find one that supports
// the current image. We only need to scan RTLs that are already being used.
for (auto &R : PM->pluginAdaptors()) {
- if (!R.isUsed())
+ if (!DeviceOffsets.contains(&R))
continue;
// Ensure that we do not use any unused images associated with this RTL.
- if (!R.UsedImages.contains(Img))
+ if (!UsedImages.contains(Img))
continue;
FoundRTL = &R;
diff --git a/openmp/libomptarget/src/device.cpp b/openmp/libomptarget/src/device.cpp
index 3345277d91d3..44a2facc8d3d 100644
--- a/openmp/libomptarget/src/device.cpp
+++ b/openmp/libomptarget/src/device.cpp
@@ -79,8 +79,7 @@ DeviceTy::~DeviceTy() {
llvm::Error DeviceTy::init() {
// Make call to init_requires if it exists for this plugin.
int32_t Ret = 0;
- if (RTL->init_requires)
- Ret = RTL->init_requires(PM->getRequirements());
+ Ret = RTL->init_requires(PM->getRequirements());
if (Ret != OFFLOAD_SUCCESS)
return llvm::createStringError(
llvm::inconvertibleErrorCode(),
@@ -154,8 +153,6 @@ int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size,
omp_get_initial_device(), HstPtrBegin, DeviceID, TgtPtrBegin, Size,
/*CodePtr=*/OMPT_GET_RETURN_ADDRESS);)
- if (!AsyncInfo || !RTL->data_submit_async || !RTL->synchronize)
- return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size);
return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size,
AsyncInfo);
}
@@ -176,8 +173,6 @@ int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin,
DeviceID, TgtPtrBegin, omp_get_initial_device(), HstPtrBegin, Size,
/*CodePtr=*/OMPT_GET_RETURN_ADDRESS);)
- if (!RTL->data_retrieve_async || !RTL->synchronize)
- return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size);
return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size,
AsyncInfo);
}
@@ -196,7 +191,7 @@ int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr,
RegionInterface.getCallbacks<ompt_target_data_transfer_from_device>(),
RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, Size,
/*CodePtr=*/OMPT_GET_RETURN_ADDRESS);)
- if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) {
+ if (!AsyncInfo) {
assert(RTL->data_exchange && "RTL->data_exchange is nullptr");
return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr,
Size);
@@ -206,9 +201,6 @@ int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr,
}
int32_t DeviceTy::notifyDataMapped(void *HstPtr, int64_t Size) {
- if (!RTL->data_notify_mapped)
- return OFFLOAD_SUCCESS;
-
DP("Notifying about new mapping: HstPtr=" DPxMOD ", Size=%" PRId64 "\n",
DPxPTR(HstPtr), Size);
@@ -220,9 +212,6 @@ int32_t DeviceTy::notifyDataMapped(void *HstPtr, int64_t Size) {
}
int32_t DeviceTy::notifyDataUnmapped(void *HstPtr) {
- if (!RTL->data_notify_unmapped)
- return OFFLOAD_SUCCESS;
-
DP("Notifying about an unmapping: HstPtr=" DPxMOD "\n", DPxPTR(HstPtr));
if (RTL->data_notify_unmapped(RTLDeviceID, HstPtr)) {
@@ -242,70 +231,46 @@ int32_t DeviceTy::launchKernel(void *TgtEntryPtr, void **TgtVarsPtr,
// Run region on device
bool DeviceTy::printDeviceInfo() {
- if (!RTL->print_device_info)
- return false;
RTL->print_device_info(RTLDeviceID);
return true;
}
// Whether data can be copied to DstDevice directly
bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) {
- if (RTL != DstDevice.RTL || !RTL->is_data_exchangable)
+ if (RTL != DstDevice.RTL)
return false;
if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID))
- return (RTL->data_exchange != nullptr) ||
- (RTL->data_exchange_async != nullptr);
-
+ return true;
return false;
}
int32_t DeviceTy::synchronize(AsyncInfoTy &AsyncInfo) {
- if (RTL->synchronize)
- return RTL->synchronize(RTLDeviceID, AsyncInfo);
- return OFFLOAD_SUCCESS;
+ return RTL->synchronize(RTLDeviceID, AsyncInfo);
}
int32_t DeviceTy::queryAsync(AsyncInfoTy &AsyncInfo) {
- if (RTL->query_async)
- return RTL->query_async(RTLDeviceID, AsyncInfo);
-
- return synchronize(AsyncInfo);
+ return RTL->query_async(RTLDeviceID, AsyncInfo);
}
int32_t DeviceTy::createEvent(void **Event) {
- if (RTL->create_event)
- return RTL->create_event(RTLDeviceID, Event);
-
- return OFFLOAD_SUCCESS;
+ return RTL->create_event(RTLDeviceID, Event);
}
int32_t DeviceTy::recordEvent(void *Event, AsyncInfoTy &AsyncInfo) {
- if (RTL->record_event)
- return RTL->record_event(RTLDeviceID, Event, AsyncInfo);
-
- return OFFLOAD_SUCCESS;
+ return RTL->record_event(RTLDeviceID, Event, AsyncInfo);
}
int32_t DeviceTy::waitEvent(void *Event, AsyncInfoTy &AsyncInfo) {
- if (RTL->wait_event)
- return RTL->wait_event(RTLDeviceID, Event, AsyncInfo);
-
- return OFFLOAD_SUCCESS;
+ return RTL->wait_event(RTLDeviceID, Event, AsyncInfo);
}
int32_t DeviceTy::syncEvent(void *Event) {
- if (RTL->sync_event)
- return RTL->sync_event(RTLDeviceID, Event);
-
- return OFFLOAD_SUCCESS;
+ return RTL->sync_event(RTLDeviceID, Event);
}
int32_t DeviceTy::destroyEvent(void *Event) {
- if (RTL->create_event)
- return RTL->destroy_event(RTLDeviceID, Event);
-
- return OFFLOAD_SUCCESS;
+ return RTL->destroy_event(RTLDeviceID, Event);
}
void DeviceTy::dumpOffloadEntries() {
@@ -321,7 +286,5 @@ void DeviceTy::dumpOffloadEntries() {
}
bool DeviceTy::useAutoZeroCopy() {
- if (RTL->use_auto_zero_copy)
- return RTL->use_auto_zero_copy(RTLDeviceID);
- return false;
+ return RTL->use_auto_zero_copy(RTLDeviceID);
}
diff --git a/openmp/libomptarget/src/interface.cpp b/openmp/libomptarget/src/interface.cpp
index b7f547f1ec3d..b562ba8818c3 100644
--- a/openmp/libomptarget/src/interface.cpp
+++ b/openmp/libomptarget/src/interface.cpp
@@ -456,10 +456,8 @@ EXTERN void __tgt_set_info_flag(uint32_t NewInfoLevel) {
assert(PM && "Runtime not initialized");
std::atomic<uint32_t> &InfoLevel = getInfoLevelInternal();
InfoLevel.store(NewInfoLevel);
- for (auto &R : PM->pluginAdaptors()) {
- if (R.set_info_flag)
- R.set_info_flag(NewInfoLevel);
- }
+ for (auto &R : PM->pluginAdaptors())
+ R.set_info_flag(NewInfoLevel);
}
EXTERN int __tgt_print_device_info(int64_t DeviceId) {
diff --git a/openmp/libomptarget/src/omptarget.cpp b/openmp/libomptarget/src/omptarget.cpp
index 5bbf3a455c72..803e941fe838 100644
--- a/openmp/libomptarget/src/omptarget.cpp
+++ b/openmp/libomptarget/src/omptarget.cpp
@@ -481,12 +481,10 @@ void *targetLockExplicit(void *HostPtr, size_t Size, int DeviceNum,
FATAL_MESSAGE(DeviceNum, "%s", toString(DeviceOrErr.takeError()).c_str());
int32_t Err = 0;
- if (!DeviceOrErr->RTL->data_lock) {
- Err = DeviceOrErr->RTL->data_lock(DeviceNum, HostPtr, Size, &RC);
- if (Err) {
- DP("Could not lock ptr %p\n", HostPtr);
- return nullptr;
- }
+ Err = DeviceOrErr->RTL->data_lock(DeviceNum, HostPtr, Size, &RC);
+ if (Err) {
+ DP("Could not lock ptr %p\n", HostPtr);
+ return nullptr;
}
DP("%s returns device ptr " DPxMOD "\n", Name, DPxPTR(RC));
return RC;
@@ -499,9 +497,7 @@ void targetUnlockExplicit(void *HostPtr, int DeviceNum, const char *Name) {
if (!DeviceOrErr)
FATAL_MESSAGE(DeviceNum, "%s", toString(DeviceOrErr.takeError()).c_str());
- if (!DeviceOrErr->RTL->data_unlock)
- DeviceOrErr->RTL->data_unlock(DeviceNum, HostPtr);
-
+ DeviceOrErr->RTL->data_unlock(DeviceNum, HostPtr);
DP("%s returns\n", Name);
}
diff --git a/openmp/libomptarget/test/offloading/d2d_memcpy_sync.c b/openmp/libomptarget/test/offloading/d2d_memcpy_sync.c
new file mode 100644
index 000000000000..6b9b765a74d8
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/d2d_memcpy_sync.c
@@ -0,0 +1,72 @@
+// RUN: %libomptarget-compile-generic && \
+// RUN: env LIBOMPTARGET_AMDGPU_MAX_ASYNC_COPY_BYTES=0 %libomptarget-run-generic | \
+// RUN: %fcheck-generic -allow-empty
+// REQUIRES: amdgcn-amd-amdhsa
+
+#include <assert.h>
+#include <omp.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+const int magic_num = 7;
+
+int main(int argc, char *argv[]) {
+ const int N = 128;
+ const int num_devices = omp_get_num_devices();
+
+ // No target device, just return
+ if (num_devices == 0) {
+ printf("PASS\n");
+ return 0;
+ }
+
+ const int src_device = 0;
+ int dst_device = num_devices - 1;
+
+ int length = N * sizeof(int);
+ int *src_ptr = omp_target_alloc(length, src_device);
+ int *dst_ptr = omp_target_alloc(length, dst_device);
+
+ if (!src_ptr || !dst_ptr) {
+ printf("FAIL\n");
+ return 1;
+ }
+
+#pragma omp target teams distribute parallel for device(src_device) \
+ is_device_ptr(src_ptr)
+ for (int i = 0; i < N; ++i) {
+ src_ptr[i] = magic_num;
+ }
+
+ if (omp_target_memcpy(dst_ptr, src_ptr, length, 0, 0, dst_device,
+ src_device)) {
+ printf("FAIL\n");
+ return 1;
+ }
+
+ int *buffer = malloc(length);
+ if (!buffer) {
+ printf("FAIL\n");
+ return 1;
+ }
+
+#pragma omp target teams distribute parallel for device(dst_device) \
+ map(from : buffer[0 : N]) is_device_ptr(dst_ptr)
+ for (int i = 0; i < N; ++i) {
+ buffer[i] = dst_ptr[i] + magic_num;
+ }
+
+ for (int i = 0; i < N; ++i)
+ assert(buffer[i] == 2 * magic_num);
+
+ printf("PASS\n");
+
+ // Free host and device memory
+ free(buffer);
+ omp_target_free(src_ptr, src_device);
+ omp_target_free(dst_ptr, dst_device);
+
+ return 0;
+}
+
+// CHECK: PASS
diff --git a/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-2.f90 b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-2.f90
new file mode 100644
index 000000000000..489c2532a762
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-2.f90
@@ -0,0 +1,39 @@
+! Offloading test checking interaction of an
+! enter and exit map of an array of scalars
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+ integer :: array(10)
+
+ do I = 1, 10
+ array(I) = I + I
+ end do
+
+ !$omp target enter data map(to: array)
+
+ ! Shouldn't overwrite data already locked in
+ ! on target via enter, this will then be
+ ! overwritten by our exit
+ do I = 1, 10
+ array(I) = 10
+ end do
+
+ !$omp target
+ do i=1,10
+ array(i) = array(i) + i
+ end do
+ !$omp end target
+
+ !$omp target exit data map(from: array)
+
+ print*, array
+end program
+
+!CHECK: 3 6 9 12 15 18 21 24 27 30
diff --git a/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-bounds.f90 b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-bounds.f90
new file mode 100644
index 000000000000..3c8c3507ed72
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-array-bounds.f90
@@ -0,0 +1,44 @@
+! Offloading test checking interaction of an
+! enter and exit map of an array of scalars
+! with specified bounds
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+
+program main
+ integer :: array(10)
+
+ do I = 1, 10
+ array(I) = I + I
+ end do
+
+ !$omp target enter data map(to: array(3:6))
+
+ ! Shouldn't overwrite data already locked in
+ ! on target via enter, which will then be
+ ! overwritten by our exit
+ do I = 1, 10
+ array(I) = 10
+ end do
+
+ ! The compiler/runtime is less lenient about read/write out of
+ ! bounds when using enter and exit, we have to specifically loop
+ ! over the correctly mapped range
+ !$omp target
+ do i=3,6
+ array(i) = array(i) + i
+ end do
+ !$omp end target
+
+ !$omp target exit data map(from: array(3:6))
+
+ print *, array
+end program
+
+!CHECK: 10 10 9 12 15 18 10 10 10 10
diff --git a/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-scalar.f90 b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-scalar.f90
new file mode 100644
index 000000000000..29a0b5ee3e62
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/fortran/target-map-enter-exit-scalar.f90
@@ -0,0 +1,33 @@
+! Offloading test checking interaction of an
+! enter and exit map of an scalar
+! REQUIRES: flang, amdgcn-amd-amdhsa
+! UNSUPPORTED: nvptx64-nvidia-cuda
+! UNSUPPORTED: nvptx64-nvidia-cuda-LTO
+! UNSUPPORTED: aarch64-unknown-linux-gnu
+! UNSUPPORTED: aarch64-unknown-linux-gnu-LTO
+! UNSUPPORTED: x86_64-pc-linux-gnu
+! UNSUPPORTED: x86_64-pc-linux-gnu-LTO
+
+! RUN: %libomptarget-compile-fortran-run-and-check-generic
+program main
+ integer :: scalar
+ scalar = 10
+
+ !$omp target enter data map(to: scalar)
+
+ !ignored, as we've already attached
+ scalar = 20
+
+ !$omp target
+ scalar = scalar + 50
+ !$omp end target
+
+ !$omp target exit data map(from: scalar)
+
+ ! not the answer one may expect, but it is the same
+ ! answer Clang gives so we are correctly on par with
+ ! Clang for the moment.
+ print *, scalar
+end program
+
+!CHECK: 10
diff --git a/openmp/runtime/src/kmp.h b/openmp/runtime/src/kmp.h
index 885d6636abe4..18ccf10fe17d 100644
--- a/openmp/runtime/src/kmp.h
+++ b/openmp/runtime/src/kmp.h
@@ -819,6 +819,7 @@ private:
typedef KMPAffinity::Mask kmp_affin_mask_t;
extern KMPAffinity *__kmp_affinity_dispatch;
+#ifndef KMP_OS_AIX
class kmp_affinity_raii_t {
kmp_affin_mask_t *mask;
bool restored;
@@ -843,6 +844,7 @@ public:
}
~kmp_affinity_raii_t() { restore(); }
};
+#endif // !KMP_OS_AIX
// Declare local char buffers with this size for printing debug and info
// messages, using __kmp_affinity_print_mask().
@@ -3910,7 +3912,8 @@ extern void __kmp_balanced_affinity(kmp_info_t *th, int team_size);
#if KMP_WEIGHTED_ITERATIONS_SUPPORTED
extern int __kmp_get_first_osid_with_ecore(void);
#endif
-#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
+#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
+ KMP_OS_AIX
extern int kmp_set_thread_affinity_mask_initial(void);
#endif
static inline void __kmp_assign_root_init_mask() {
diff --git a/openmp/runtime/src/kmp_affinity.cpp b/openmp/runtime/src/kmp_affinity.cpp
index 048bd174fc95..b574dbbaf54f 100644
--- a/openmp/runtime/src/kmp_affinity.cpp
+++ b/openmp/runtime/src/kmp_affinity.cpp
@@ -2910,12 +2910,17 @@ static inline const char *__kmp_cpuinfo_get_envvar() {
}
// Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
-// affinity map.
+// affinity map. On AIX, the map is obtained through system SRAD (Scheduler
+// Resource Allocation Domain).
static bool __kmp_affinity_create_cpuinfo_map(int *line,
kmp_i18n_id_t *const msg_id) {
+ *msg_id = kmp_i18n_null;
+
+#if KMP_OS_AIX
+ unsigned num_records = __kmp_xproc;
+#else
const char *filename = __kmp_cpuinfo_get_filename();
const char *envvar = __kmp_cpuinfo_get_envvar();
- *msg_id = kmp_i18n_null;
if (__kmp_affinity.flags.verbose) {
KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
@@ -2974,6 +2979,7 @@ static bool __kmp_affinity_create_cpuinfo_map(int *line,
*msg_id = kmp_i18n_str_CantRewindCpuinfo;
return false;
}
+#endif // KMP_OS_AIX
// Allocate the array of records to store the proc info in. The dummy
// element at the end makes the logic in filling them out easier to code.
@@ -3003,6 +3009,99 @@ static bool __kmp_affinity_create_cpuinfo_map(int *line,
INIT_PROC_INFO(threadInfo[i]);
}
+#if KMP_OS_AIX
+ int smt_threads;
+ lpar_info_format1_t cpuinfo;
+ unsigned num_avail = __kmp_xproc;
+
+ if (__kmp_affinity.flags.verbose)
+ KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "system info for topology");
+
+ // Get the number of SMT threads per core.
+ int retval =
+ lpar_get_info(LPAR_INFO_FORMAT1, &cpuinfo, sizeof(lpar_info_format1_t));
+ if (!retval)
+ smt_threads = cpuinfo.smt_threads;
+ else {
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+
+ // Allocate a resource set containing available system resourses.
+ rsethandle_t sys_rset = rs_alloc(RS_SYSTEM);
+ if (sys_rset == NULL) {
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+ // Allocate a resource set for the SRAD info.
+ rsethandle_t srad = rs_alloc(RS_EMPTY);
+ if (srad == NULL) {
+ rs_free(sys_rset);
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+
+ // Get the SRAD system detail level.
+ int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0);
+ if (sradsdl < 0) {
+ rs_free(sys_rset);
+ rs_free(srad);
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+ // Get the number of RADs at that SRAD SDL.
+ int num_rads = rs_numrads(sys_rset, sradsdl, 0);
+ if (num_rads < 0) {
+ rs_free(sys_rset);
+ rs_free(srad);
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+
+ // Get the maximum number of procs that may be contained in a resource set.
+ int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0);
+ if (max_procs < 0) {
+ rs_free(sys_rset);
+ rs_free(srad);
+ CLEANUP_THREAD_INFO;
+ *msg_id = kmp_i18n_str_UnknownTopology;
+ return false;
+ }
+
+ int cur_rad = 0;
+ int num_set = 0;
+ for (int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS;
+ ++srad_idx) {
+ // Check if the SRAD is available in the RSET.
+ if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0)
+ continue;
+
+ for (int cpu = 0; cpu < max_procs; cpu++) {
+ // Set the info for the cpu if it is in the SRAD.
+ if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) {
+ threadInfo[cpu][osIdIndex] = cpu;
+ threadInfo[cpu][pkgIdIndex] = cur_rad;
+ threadInfo[cpu][coreIdIndex] = cpu / smt_threads;
+ ++num_set;
+ if (num_set >= num_avail) {
+ // Done if all available CPUs have been set.
+ break;
+ }
+ }
+ }
+ ++cur_rad;
+ }
+ rs_free(sys_rset);
+ rs_free(srad);
+
+ // The topology is already sorted.
+
+#else // !KMP_OS_AIX
unsigned num_avail = 0;
*line = 0;
#if KMP_ARCH_S390X
@@ -3250,6 +3349,8 @@ static bool __kmp_affinity_create_cpuinfo_map(int *line,
qsort(threadInfo, num_avail, sizeof(*threadInfo),
__kmp_affinity_cmp_ProcCpuInfo_phys_id);
+#endif // KMP_OS_AIX
+
// The table is now sorted by pkgId / coreId / threadId, but we really don't
// know the radix of any of the fields. pkgId's may be sparsely assigned among
// the chips on a system. Although coreId's are usually assigned
@@ -4445,7 +4546,7 @@ static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) {
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
-#if KMP_OS_LINUX
+#if KMP_OS_LINUX || KMP_OS_AIX
if (!success) {
int line = 0;
success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
@@ -4841,7 +4942,12 @@ void __kmp_affinity_uninitialize(void) {
}
if (__kmp_affin_origMask != NULL) {
if (KMP_AFFINITY_CAPABLE()) {
+#if KMP_OS_AIX
+ // Uninitialize by unbinding the thread.
+ bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
+#else
__kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
+#endif
}
KMP_CPU_FREE(__kmp_affin_origMask);
__kmp_affin_origMask = NULL;
@@ -5015,7 +5121,10 @@ void __kmp_affinity_bind_init_mask(int gtid) {
__kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
} else
#endif
+#ifndef KMP_OS_AIX
+ // Do not set the full mask as the init mask on AIX.
__kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
+#endif
}
void __kmp_affinity_bind_place(int gtid) {
@@ -5128,7 +5237,7 @@ int __kmp_aux_set_affinity(void **mask) {
int __kmp_aux_get_affinity(void **mask) {
int gtid;
int retval;
-#if KMP_OS_WINDOWS || KMP_DEBUG
+#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
kmp_info_t *th;
#endif
if (!KMP_AFFINITY_CAPABLE()) {
@@ -5136,7 +5245,7 @@ int __kmp_aux_get_affinity(void **mask) {
}
gtid = __kmp_entry_gtid();
-#if KMP_OS_WINDOWS || KMP_DEBUG
+#if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG
th = __kmp_threads[gtid];
#else
(void)gtid; // unused variable
@@ -5159,7 +5268,7 @@ int __kmp_aux_get_affinity(void **mask) {
}
}
-#if !KMP_OS_WINDOWS
+#if !KMP_OS_WINDOWS && !KMP_OS_AIX
retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
KA_TRACE(
@@ -5179,7 +5288,7 @@ int __kmp_aux_get_affinity(void **mask) {
KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
return 0;
-#endif /* KMP_OS_WINDOWS */
+#endif /* !KMP_OS_WINDOWS && !KMP_OS_AIX */
}
int __kmp_aux_get_affinity_max_proc() {
@@ -5561,7 +5670,8 @@ void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
}
}
-#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
+#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
+ KMP_OS_AIX
// We don't need this entry for Windows because
// there is GetProcessAffinityMask() api
//
@@ -5596,7 +5706,11 @@ extern "C"
"set full mask for thread %d\n",
gtid));
KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
+#if KMP_OS_AIX
+ return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY);
+#else
return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
+#endif
}
#endif
diff --git a/openmp/runtime/src/kmp_affinity.h b/openmp/runtime/src/kmp_affinity.h
index 1c7db2f59943..7efc090f8863 100644
--- a/openmp/runtime/src/kmp_affinity.h
+++ b/openmp/runtime/src/kmp_affinity.h
@@ -191,7 +191,8 @@ public:
};
#endif /* KMP_USE_HWLOC */
-#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
+#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
+ KMP_OS_AIX
#if KMP_OS_LINUX
/* On some of the older OS's that we build on, these constants aren't present
in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
@@ -317,6 +318,10 @@ public:
#elif KMP_OS_NETBSD
#include <pthread.h>
#include <sched.h>
+#elif KMP_OS_AIX
+#include <sys/dr.h>
+#include <sys/rset.h>
+#define VMI_MAXRADS 64 // Maximum number of RADs allowed by AIX.
#endif
class KMPNativeAffinity : public KMPAffinity {
class Mask : public KMPAffinity::Mask {
@@ -404,6 +409,70 @@ class KMPNativeAffinity : public KMPAffinity {
++retval;
return retval;
}
+#if KMP_OS_AIX
+ // On AIX, we don't have a way to get CPU(s) a thread is bound to.
+ // This routine is only used to get the full mask.
+ int get_system_affinity(bool abort_on_error) override {
+ KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
+ "Illegal get affinity operation when not capable");
+
+ (void)abort_on_error;
+
+ // Set the mask with all CPUs that are available.
+ for (int i = 0; i < __kmp_xproc; ++i)
+ KMP_CPU_SET(i, this);
+ return 0;
+ }
+ int set_system_affinity(bool abort_on_error) const override {
+ KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
+
+ "Illegal set affinity operation when not capable");
+
+ int location;
+ int gtid = __kmp_entry_gtid();
+ int tid = thread_self();
+
+ // Unbind the thread if it was bound to any processors before so that
+ // we can bind the thread to CPUs specified by the mask not others.
+ int retval = bindprocessor(BINDTHREAD, tid, PROCESSOR_CLASS_ANY);
+
+ // On AIX, we can only bind to one instead of a set of CPUs with the
+ // bindprocessor() system call.
+ KMP_CPU_SET_ITERATE(location, this) {
+ if (KMP_CPU_ISSET(location, this)) {
+ retval = bindprocessor(BINDTHREAD, tid, location);
+ if (retval == -1 && errno == 1) {
+ rsid_t rsid;
+ rsethandle_t rsh;
+ // Put something in rsh to prevent compiler warning
+ // about uninitalized use
+ rsh = rs_alloc(RS_EMPTY);
+ rsid.at_pid = getpid();
+ if (RS_DEFAULT_RSET != ra_getrset(R_PROCESS, rsid, 0, rsh)) {
+ retval = ra_detachrset(R_PROCESS, rsid, 0);
+ retval = bindprocessor(BINDTHREAD, tid, location);
+ }
+ }
+ if (retval == 0) {
+ KA_TRACE(10, ("__kmp_set_system_affinity: Done binding "
+ "T#%d to cpu=%d.\n",
+ gtid, location));
+ continue;
+ }
+ int error = errno;
+ if (abort_on_error) {
+ __kmp_fatal(KMP_MSG(FunctionError, "bindprocessor()"),
+ KMP_ERR(error), __kmp_msg_null);
+ KA_TRACE(10, ("__kmp_set_system_affinity: Error binding "
+ "T#%d to cpu=%d, errno=%d.\n",
+ gtid, location, error));
+ return error;
+ }
+ }
+ }
+ return 0;
+ }
+#else // !KMP_OS_AIX
int get_system_affinity(bool abort_on_error) override {
KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
"Illegal get affinity operation when not capable");
@@ -446,6 +515,7 @@ class KMPNativeAffinity : public KMPAffinity {
}
return error;
}
+#endif // KMP_OS_AIX
};
void determine_capable(const char *env_var) override {
__kmp_affinity_determine_capable(env_var);
@@ -475,7 +545,7 @@ class KMPNativeAffinity : public KMPAffinity {
api_type get_api_type() const override { return NATIVE_OS; }
};
#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY \
- */
+ || KMP_OS_AIX */
#if KMP_OS_WINDOWS
class KMPNativeAffinity : public KMPAffinity {
diff --git a/openmp/runtime/src/kmp_collapse.cpp b/openmp/runtime/src/kmp_collapse.cpp
index 569d2c150831..f1bf04901dc7 100644
--- a/openmp/runtime/src/kmp_collapse.cpp
+++ b/openmp/runtime/src/kmp_collapse.cpp
@@ -1482,8 +1482,8 @@ void kmp_handle_upper_triangle_matrix(
original_bounds_nest[0].ub0_u64);
kmp_uint64 outer_lb0 = kmp_fix_iv(original_bounds_nest[0].loop_iv_type,
original_bounds_nest[0].lb0_u64);
- kmp_uint64 inner_ub0 = kmp_fix_iv(original_bounds_nest[1].loop_iv_type,
- original_bounds_nest[1].ub0_u64);
+ [[maybe_unused]] kmp_uint64 inner_ub0 = kmp_fix_iv(
+ original_bounds_nest[1].loop_iv_type, original_bounds_nest[1].ub0_u64);
// calculate the chunk's lower and upper bounds
// the total number of iterations in the loop is the sum of the arithmetic
// progression from the outer lower to outer upper bound (inclusive since the
@@ -1517,16 +1517,11 @@ void kmp_handle_upper_triangle_matrix(
kmp_uint64 iter_with_current = iter_before_current + iter_current;
// calculate the outer loop lower bound (lbo) which is the max outer iv value
// that gives the number of iterations that is equal or just below the total
- // number of iterations executed by the previous threads, for less_than
- // (1-based) inner loops (inner_ub0 == -1) it will be i.e.
- // lbo*(lbo-1)/2<=iter_before_current => lbo^2-lbo-2*iter_before_current<=0
- // for less_than_equal (0-based) inner loops (inner_ub == 0) it will be:
- // i.e. lbo*(lbo+1)/2<=iter_before_current =>
- // lbo^2+lbo-2*iter_before_current<=0 both cases can be handled similarily
- // using a parameter to control the equatio sign
+ // number of iterations executed by the previous threads:
+ // lbo*(lbo+1)/2<=iter_before_current =>
+ // lbo^2+lbo-2*iter_before_current<=0
kmp_uint64 lower_bound_outer =
(kmp_uint64)(sqrt_newton_approx(1 + 8 * iter_before_current) + 1) / 2 - 1;
- ;
// calculate the inner loop lower bound which is the remaining number of
// iterations required to hit the total number of iterations executed by the
// previous threads giving the starting point of this thread
diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp
index 878e78b5c7ad..0268f692ff7f 100644
--- a/openmp/runtime/src/kmp_csupport.cpp
+++ b/openmp/runtime/src/kmp_csupport.cpp
@@ -18,6 +18,7 @@
#include "kmp_itt.h"
#include "kmp_lock.h"
#include "kmp_stats.h"
+#include "kmp_utils.h"
#include "ompt-specific.h"
#define MAX_MESSAGE 512
@@ -4233,7 +4234,7 @@ void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
up = pr_buf->th_doacross_info[3];
st = pr_buf->th_doacross_info[4];
#if OMPT_SUPPORT && OMPT_OPTIONAL
- ompt_dependence_t deps[num_dims];
+ SimpleVLA<ompt_dependence_t> deps(num_dims);
#endif
if (st == 1) { // most common case
if (vec[0] < lo || vec[0] > up) {
@@ -4345,7 +4346,7 @@ void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
lo = pr_buf->th_doacross_info[2];
st = pr_buf->th_doacross_info[4];
#if OMPT_SUPPORT && OMPT_OPTIONAL
- ompt_dependence_t deps[num_dims];
+ SimpleVLA<ompt_dependence_t> deps(num_dims);
#endif
if (st == 1) { // most common case
iter_number = vec[0] - lo;
diff --git a/openmp/runtime/src/kmp_os.h b/openmp/runtime/src/kmp_os.h
index 63da9e5fa15d..a628070c882a 100644
--- a/openmp/runtime/src/kmp_os.h
+++ b/openmp/runtime/src/kmp_os.h
@@ -76,7 +76,7 @@
#endif
#if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
- KMP_OS_DRAGONFLY) && \
+ KMP_OS_DRAGONFLY || KMP_OS_AIX) && \
!KMP_OS_WASI
#define KMP_AFFINITY_SUPPORTED 1
#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
diff --git a/openmp/runtime/src/kmp_runtime.cpp b/openmp/runtime/src/kmp_runtime.cpp
index a60bdb968371..c715a57d23e6 100644
--- a/openmp/runtime/src/kmp_runtime.cpp
+++ b/openmp/runtime/src/kmp_runtime.cpp
@@ -4431,8 +4431,10 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
#endif
KMP_MB();
- /* first, try to get one from the thread pool */
- if (__kmp_thread_pool) {
+ /* first, try to get one from the thread pool unless allocating thread is
+ * the main hidden helper thread. The hidden helper team should always
+ * allocate new OS threads. */
+ if (__kmp_thread_pool && !KMP_HIDDEN_HELPER_TEAM(team)) {
new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
__kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
if (new_thr == __kmp_thread_pool_insert_pt) {
@@ -4497,7 +4499,7 @@ kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
}
/* no, well fork a new one */
- KMP_ASSERT(__kmp_nth == __kmp_all_nth);
+ KMP_ASSERT(KMP_HIDDEN_HELPER_TEAM(team) || __kmp_nth == __kmp_all_nth);
KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
#if KMP_USE_MONITOR
@@ -6752,11 +6754,11 @@ void __kmp_register_library_startup(void) {
int fd1 = -1;
shm_name = __kmp_str_format("/%s", name);
int shm_preexist = 0;
- fd1 = shm_open(shm_name, O_CREAT | O_EXCL | O_RDWR, 0666);
+ fd1 = shm_open(shm_name, O_CREAT | O_EXCL | O_RDWR, 0600);
if ((fd1 == -1) && (errno == EEXIST)) {
// file didn't open because it already exists.
// try opening existing file
- fd1 = shm_open(shm_name, O_RDWR, 0666);
+ fd1 = shm_open(shm_name, O_RDWR, 0600);
if (fd1 == -1) { // file didn't open
KMP_WARNING(FunctionError, "Can't open SHM");
__kmp_shm_available = false;
@@ -6800,11 +6802,11 @@ void __kmp_register_library_startup(void) {
int fd1 = -1;
temp_reg_status_file_name = __kmp_str_format("/tmp/%s", name);
int tmp_preexist = 0;
- fd1 = open(temp_reg_status_file_name, O_CREAT | O_EXCL | O_RDWR, 0666);
+ fd1 = open(temp_reg_status_file_name, O_CREAT | O_EXCL | O_RDWR, 0600);
if ((fd1 == -1) && (errno == EEXIST)) {
// file didn't open because it already exists.
// try opening existing file
- fd1 = open(temp_reg_status_file_name, O_RDWR, 0666);
+ fd1 = open(temp_reg_status_file_name, O_RDWR, 0600);
if (fd1 == -1) { // file didn't open if (fd1 == -1) {
KMP_WARNING(FunctionError, "Can't open TEMP");
__kmp_tmp_available = false;
@@ -6944,7 +6946,7 @@ void __kmp_unregister_library(void) {
int fd1;
if (__kmp_shm_available) {
shm_name = __kmp_str_format("/%s", name);
- fd1 = shm_open(shm_name, O_RDONLY, 0666);
+ fd1 = shm_open(shm_name, O_RDONLY, 0600);
if (fd1 != -1) { // File opened successfully
char *data1 = (char *)mmap(0, SHM_SIZE, PROT_READ, MAP_SHARED, fd1, 0);
if (data1 != MAP_FAILED) {
diff --git a/openmp/runtime/src/kmp_taskdeps.cpp b/openmp/runtime/src/kmp_taskdeps.cpp
index f7529481393f..e575ad8b08a5 100644
--- a/openmp/runtime/src/kmp_taskdeps.cpp
+++ b/openmp/runtime/src/kmp_taskdeps.cpp
@@ -1030,6 +1030,12 @@ void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid,
__kmp_task_stealing_constraint);
}
+ // Wait until the last __kmp_release_deps is finished before we free the
+ // current stack frame holding the "node" variable; once its nrefs count
+ // reaches 1, we're sure nobody else can try to reference it again.
+ while (node.dn.nrefs > 1)
+ KMP_YIELD(TRUE);
+
#if OMPT_SUPPORT
__ompt_taskwait_dep_finish(current_task, taskwait_task_data);
#endif /* OMPT_SUPPORT */
diff --git a/openmp/runtime/src/z_Linux_util.cpp b/openmp/runtime/src/z_Linux_util.cpp
index d751a417331c..29db9d008a49 100644
--- a/openmp/runtime/src/z_Linux_util.cpp
+++ b/openmp/runtime/src/z_Linux_util.cpp
@@ -125,7 +125,8 @@ static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
}
#endif
-#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY) && \
+#if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
+ KMP_OS_AIX) && \
KMP_AFFINITY_SUPPORTED)
/* Affinity support */
@@ -142,6 +143,29 @@ void __kmp_affinity_bind_thread(int which) {
KMP_CPU_FREE_FROM_STACK(mask);
}
+#if KMP_OS_AIX
+void __kmp_affinity_determine_capable(const char *env_var) {
+ // All versions of AIX support bindprocessor().
+
+ size_t mask_size = __kmp_xproc / CHAR_BIT;
+ // Round up to byte boundary.
+ if (__kmp_xproc % CHAR_BIT)
+ ++mask_size;
+
+ // Round up to the mask_size_type boundary.
+ if (mask_size % sizeof(__kmp_affin_mask_size))
+ mask_size += sizeof(__kmp_affin_mask_size) -
+ mask_size % sizeof(__kmp_affin_mask_size);
+ KMP_AFFINITY_ENABLE(mask_size);
+ KA_TRACE(10,
+ ("__kmp_affinity_determine_capable: "
+ "AIX OS affinity interface bindprocessor functional (mask size = "
+ "%" KMP_SIZE_T_SPEC ").\n",
+ __kmp_affin_mask_size));
+}
+
+#else // !KMP_OS_AIX
+
/* Determine if we can access affinity functionality on this version of
* Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
* __kmp_affin_mask_size to the appropriate value (0 means not capable). */
@@ -271,8 +295,9 @@ void __kmp_affinity_determine_capable(const char *env_var) {
KMP_WARNING(AffCantGetMaskSize, env_var);
}
}
-
-#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
+#endif // KMP_OS_AIX
+#endif // (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
+ KMP_OS_DRAGONFLY || KMP_OS_AIX) && KMP_AFFINITY_SUPPORTED
#if KMP_USE_FUTEX
@@ -501,7 +526,7 @@ static void *__kmp_launch_worker(void *thr) {
#endif /* KMP_BLOCK_SIGNALS */
void *exit_val;
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
- KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
+ KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
void *volatile padding = 0;
#endif
int gtid;
@@ -550,7 +575,7 @@ static void *__kmp_launch_worker(void *thr) {
#endif /* KMP_BLOCK_SIGNALS */
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
- KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
+ KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
if (__kmp_stkoffset > 0 && gtid > 0) {
padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
(void)padding;
@@ -1268,7 +1293,8 @@ static void __kmp_atfork_child(void) {
++__kmp_fork_count;
#if KMP_AFFINITY_SUPPORTED
-#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY
+#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \
+ KMP_OS_AIX
// reset the affinity in the child to the initial thread
// affinity in the parent
kmp_set_thread_affinity_mask_initial();
@@ -2325,6 +2351,7 @@ int __kmp_is_address_mapped(void *addr) {
found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
#elif KMP_OS_AIX
+ (void)rc;
// FIXME(AIX): Implement this
found = 1;
diff --git a/openmp/runtime/test/lit.cfg b/openmp/runtime/test/lit.cfg
index a3456063c10f..e27e52bb4289 100644
--- a/openmp/runtime/test/lit.cfg
+++ b/openmp/runtime/test/lit.cfg
@@ -129,7 +129,7 @@ if config.operating_system == 'NetBSD':
if config.operating_system == 'Darwin':
config.available_features.add("darwin")
-if config.operating_system in ['Windows', 'Linux', 'FreeBSD', 'NetBSD', 'DragonFly']:
+if config.operating_system in ['Windows', 'Linux', 'FreeBSD', 'NetBSD', 'DragonFly', 'AIX']:
config.available_features.add('affinity')
if config.operating_system in ['Linux']:
diff --git a/openmp/runtime/test/tasking/hidden_helper_task/issue-87117.c b/openmp/runtime/test/tasking/hidden_helper_task/issue-87117.c
new file mode 100644
index 000000000000..23080982f49e
--- /dev/null
+++ b/openmp/runtime/test/tasking/hidden_helper_task/issue-87117.c
@@ -0,0 +1,36 @@
+// RUN: %libomp-compile
+// RUN: env KMP_HOT_TEAMS_MODE=0 KMP_HOT_TEAMS_MAX_LEVEL=1 %libomp-run
+//
+// Force the defaults of:
+// KMP_HOT_TEAMS_MODE=0 means free extra threads after parallel
+// involving non-hot team
+// KMP_HOT_TEAMS_MAX_LEVEL=1 means only the initial outer team
+// is a hot team.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <omp.h>
+
+int main() {
+ int a;
+ omp_set_max_active_levels(2);
+// This nested parallel creates extra threads on the thread pool
+#pragma omp parallel num_threads(2)
+ {
+#pragma omp parallel num_threads(2)
+ {
+#pragma omp atomic
+ a++;
+ }
+ }
+
+// Causes assert if hidden helper thread tries to allocate from thread pool
+// instead of creating new OS threads
+#pragma omp parallel num_threads(1)
+ {
+#pragma omp target nowait
+ { a++; }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/openmp/runtime/test/worksharing/for/collapse_test.inc b/openmp/runtime/test/worksharing/for/collapse_test.inc
new file mode 100644
index 000000000000..de0e7e4e57f3
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/collapse_test.inc
@@ -0,0 +1,201 @@
+#include <omp.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <memory.h>
+
+#define LOOP_IV_TYPE0 LOOP_TYPES
+#define LOOP_TYPE0 LOOP_TYPES
+#define LOOP_STYPE0 LOOP_TYPES
+
+#define LOOP_IV_TYPE1 LOOP_TYPES
+#define LOOP_TYPE1 LOOP_TYPES
+#define LOOP_STYPE1 LOOP_TYPES
+
+#define LOOP_IV_TYPE2 LOOP_TYPES
+#define LOOP_TYPE2 LOOP_TYPES
+#define LOOP_STYPE2 LOOP_TYPES
+
+#define MAX_THREADS 256
+
+#if defined VERBOSE
+#define PRINTF printf
+#else
+#define PRINTF
+#endif
+
+LOOP_TYPE0 iLB, iUB;
+LOOP_TYPE1 jA0, jB0;
+LOOP_TYPE2 kA0, kB0;
+
+LOOP_STYPE0 iStep;
+LOOP_STYPE1 jA1, jB1, jStep;
+LOOP_STYPE2 kA1, kB1, kStep;
+
+// We can check <=, <, >=, > (!= has different pattern)
+// Additional definition of LOOP_LEi, LOOP_LTi, etc. is helpful to build calls
+// of the test from main
+
+#if defined LOOP_LE0
+#define COMPARE0 <=
+#elif defined LOOP_LT0
+#define COMPARE0 <
+#elif defined LOOP_GE0
+#define COMPARE0 >=
+#elif defined LOOP_GT0
+#define COMPARE0 >
+#endif
+
+#if defined LOOP_LE1
+#define COMPARE1 <=
+#elif defined LOOP_LT1
+#define COMPARE1 <
+#elif defined LOOP_GE1
+#define COMPARE1 >=
+#elif defined LOOP_GT1
+#define COMPARE1 >
+#endif
+
+#if defined LOOP_LE2
+#define COMPARE2 <=
+#elif defined LOOP_LT2
+#define COMPARE2 <
+#elif defined LOOP_GE2
+#define COMPARE2 >=
+#elif defined LOOP_GT2
+#define COMPARE2 >
+#endif
+
+typedef struct {
+ LOOP_IV_TYPE0 i;
+ LOOP_IV_TYPE1 j;
+ LOOP_IV_TYPE2 k;
+} spaceType;
+
+spaceType *AllocSpace(unsigned size) {
+
+ spaceType *p = (spaceType *)malloc(size * sizeof(spaceType));
+ memset(p, 0, size * sizeof(spaceType));
+ return p;
+}
+
+void FreeSpace(spaceType *space) { free(space); }
+
+// record an iteration
+void Set(spaceType *space, unsigned count, unsigned trueCount, LOOP_IV_TYPE0 i,
+ LOOP_IV_TYPE1 j, LOOP_IV_TYPE0 k) {
+ if (count > trueCount) {
+ // number of iterations exceeded
+ // will be reported with checks
+ return;
+ }
+ space[count - 1].i = i;
+ space[count - 1].j = j;
+ space[count - 1].k = k;
+}
+int test() {
+ int pass = 1;
+ LOOP_IV_TYPE0 i;
+ LOOP_IV_TYPE1 j;
+ LOOP_IV_TYPE2 k;
+
+ spaceType *openmpSpace;
+ spaceType *scalarSpace;
+
+ unsigned trueCount = 0;
+ unsigned openmpCount = 0;
+ unsigned scalarCount = 0;
+ unsigned uselessThreadsOpenMP = 0;
+ unsigned usefulThreadsOpenMP = 0;
+ unsigned chunkSizesOpenmp[MAX_THREADS] = {0};
+
+ unsigned num_threads = omp_get_max_threads();
+ if (num_threads > MAX_THREADS)
+ num_threads = MAX_THREADS;
+ omp_set_num_threads(num_threads);
+
+ // count iterations and allocate space
+ LOOP { ++trueCount; }
+
+ openmpSpace = AllocSpace(trueCount);
+ scalarSpace = AllocSpace(trueCount);
+
+ // fill the scalar (compare) space
+ LOOP {
+ ++scalarCount;
+ Set(scalarSpace, scalarCount, trueCount, i, j, k);
+ }
+
+ // test run body:
+ // perform and record OpenMP iterations and thread use
+#pragma omp parallel num_threads(num_threads)
+ {
+#pragma omp for collapse(3) private(i, j, k)
+ LOOP {
+ unsigned count;
+ unsigned gtid = omp_get_thread_num();
+#pragma omp atomic update
+ ++chunkSizesOpenmp[gtid];
+#pragma omp atomic capture
+ count = ++openmpCount;
+ Set(openmpSpace, count, trueCount, i, j, k);
+ }
+ }
+
+ // check for the right number of iterations processed
+ // (only need to check for less, greater is checked when recording)
+ if (openmpCount < trueCount) {
+ PRINTF("OpenMP FAILURE: Openmp processed fewer iterations: %d vs %d\n",
+ openmpCount, trueCount);
+ pass = 0;
+ } else if (openmpCount > trueCount) {
+ PRINTF("OpenMP FAILURE: Openmp processed more iterations: %d vs %d\n",
+ openmpCount, trueCount);
+ pass = 0;
+ }
+
+ // check openMP for iteration correctnes against scalar
+ for (unsigned i = 0; i < trueCount; i++) {
+ unsigned j;
+ for (j = 0; j < openmpCount; j++) {
+ if ((scalarSpace[i].i == openmpSpace[j].i) &&
+ (scalarSpace[i].j == openmpSpace[j].j) &&
+ (scalarSpace[i].k == openmpSpace[j].k)) {
+ break;
+ }
+ }
+ if (j == openmpCount) {
+ PRINTF("OpenMP FAILURE: (%d %d %d) not processed\n", scalarSpace[i].i,
+ scalarSpace[i].j, scalarSpace[i].k);
+ pass = 0;
+ }
+ }
+
+ // check for efficient thread use
+ for (unsigned i = 0; i < num_threads; ++i) {
+ if (chunkSizesOpenmp[i] == 0) {
+ ++uselessThreadsOpenMP;
+ }
+ }
+
+ // a check to see if at least more than one thread was used (weakish)
+ if ((uselessThreadsOpenMP == num_threads - 1) && (trueCount > 1)) {
+ PRINTF("OpenMP FAILURE: threads are not used\n");
+ pass = 0;
+ }
+
+#if 0
+ // a check to see if the load was spread more or less evenly so that
+ // when there was more work than threads each one got at least something
+ // (stronger, but may currently fail for a general collapse case)
+ if ((trueCount >= num_threads) && (uselessThreadsOpenMP > 0)) {
+ PRINTF("OpenMP FAILURE: %d threads not used with %d iterations\n",
+ uselessThreadsOpenMP, openmpCount);
+ pass = 0;
+ }
+#endif
+
+ // clean up space
+ FreeSpace(openmpSpace);
+ FreeSpace(scalarSpace);
+ return pass;
+}
diff --git a/openmp/runtime/test/worksharing/for/omp_collapse_many_GELTGT_int.c b/openmp/runtime/test/worksharing/for/omp_collapse_many_GELTGT_int.c
new file mode 100644
index 000000000000..77b2d6918d87
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/omp_collapse_many_GELTGT_int.c
@@ -0,0 +1,65 @@
+// RUN: %libomp-compile-and-run
+
+// Non-rectangular loop collapsing.
+//
+// Nested loops conform to OpenMP 5.2 standard,
+// inner loops bounds may depend on outer loops induction variables.
+
+#define LOOP_TYPES int
+#define COMPARE0 >=
+#define COMPARE1 <
+#define COMPARE2 >
+#define LOOP \
+ for (i = iLB; i COMPARE0 iUB; i += iStep) \
+ for (j = jA0; j COMPARE1 jB0; j += jStep) \
+ for (k = kA0; k COMPARE2 kB0; k += kStep)
+#include "collapse_test.inc"
+
+int main() {
+ int fail;
+
+ iLB = 3;
+ iUB = -2;
+ jA0 = -3;
+ jA1 = 0;
+ jB0 = -6;
+ jB1 = 0;
+ kA0 = -2;
+ kA1 = 0;
+ kB0 = -4;
+ kB1 = 0;
+ iStep = -1;
+ jStep = -1;
+ kStep = -4;
+ PRINTF("\nOne off iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; jB1=%d; kA0=%d; "
+ "kA1=%d; kB0=%d; kB1=%d; iStep=%d; jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1, iStep, jStep, kStep);
+ fail = (test() == 0);
+
+ if (!fail) {
+ for (iStep = -3; iStep >= -6; iStep -= 2) {
+ for (jA0 = -6; jA0 <= 6; jA0 += 3) {
+ for (jB0 = -3; jB0 <= 10; jB0 += 3) {
+ for (jStep = 1; jStep <= 10; jStep += 2) {
+ for (kA0 = -2; kA0 <= 4; ++kA0) {
+ for (kB0 = -4; kB0 <= 2; ++kB0) {
+ for (kStep = -2; kStep >= -10; kStep -= 4) {
+ {
+ PRINTF("\nTrying iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; "
+ "jB1=%d; kA0=%d; kA1=%d; kB0=%d; kB1=%d; iStep=%d; "
+ "jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1,
+ iStep, jStep, kStep);
+ fail = fail || (test() == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return fail;
+}
diff --git a/openmp/runtime/test/worksharing/for/omp_collapse_many_GTGEGT_int.c b/openmp/runtime/test/worksharing/for/omp_collapse_many_GTGEGT_int.c
new file mode 100644
index 000000000000..985211172e62
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/omp_collapse_many_GTGEGT_int.c
@@ -0,0 +1,71 @@
+// RUN: %libomp-compile-and-run
+
+// Non-rectangular loop collapsing.
+//
+// Nested loops conform to OpenMP 5.2 standard,
+// inner loops bounds may depend on outer loops induction variables.
+
+#define LOOP_TYPES int
+#define COMPARE0 >
+#define COMPARE1 >=
+#define COMPARE2 >
+
+#define DLOOP_GT0
+#define DLOOP_GE1
+#define DLOOP_GT2
+
+#define LOOP \
+ for (i = iLB; i COMPARE0 iUB; i += iStep) \
+ for (j = jA0; j COMPARE1 jB0; j += jStep) \
+ for (k = kA0; k COMPARE2 kB0; k += kStep)
+#include "collapse_test.inc"
+
+int main() {
+ int fail;
+
+ iLB = 3;
+ iUB = -2;
+ jA0 = -3;
+ jA1 = 0;
+ jB0 = -6;
+ jB1 = 0;
+ kA0 = -2;
+ kA1 = 0;
+ kB0 = -4;
+ kB1 = 0;
+ iStep = -1;
+ jStep = -1;
+ kStep = -4;
+ PRINTF("\nOne off iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; jB1=%d; kA0=%d; "
+ "kA1=%d; kB0=%d; kB1=%d; iStep=%d; jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1, iStep, jStep, kStep);
+ fail = (test() == 0);
+
+ if (!fail) {
+
+ for (iStep = -3; iStep >= -6; iStep -= 2) {
+ for (jA0 = -3; jA0 <= 10; jA0 += 3) {
+ for (jB0 = -6; jB0 <= 6; jB0 += 3) {
+ for (jStep = -1; jStep >= -10; jStep -= 2) {
+ for (kA0 = -2; kA0 <= 4; ++kA0) {
+ for (kB0 = -4; kB0 <= 2; ++kB0) {
+ for (kStep = -2; kStep >= -10; kStep -= 4) {
+ {
+ PRINTF("\nTrying iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; "
+ "jB1=%d; kA0=%d; kA1=%d; kB0=%d; kB1=%d; iStep=%d; "
+ "jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1,
+ iStep, jStep, kStep);
+ fail = fail || (test() == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return fail;
+}
diff --git a/openmp/runtime/test/worksharing/for/omp_collapse_many_LTLEGE_int.c b/openmp/runtime/test/worksharing/for/omp_collapse_many_LTLEGE_int.c
new file mode 100644
index 000000000000..47e3b42226c8
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/omp_collapse_many_LTLEGE_int.c
@@ -0,0 +1,66 @@
+// RUN: %libomp-compile-and-run
+
+// Non-rectangular loop collapsing.
+//
+// Nested loops conform to OpenMP 5.2 standard,
+// inner loops bounds may depend on outer loops induction variables.
+
+#define LOOP_TYPES int
+#define COMPARE0 <
+#define COMPARE1 <=
+#define COMPARE2 >=
+#define LOOP \
+ for (i = iLB; i COMPARE0 iUB; i += iStep) \
+ for (j = jA0; j COMPARE1 jB0; j += jStep) \
+ for (k = kA0; k COMPARE2 kB0; k += kStep)
+#include "collapse_test.inc"
+
+int main() {
+ int fail;
+
+ iLB = -2;
+ iUB = 3;
+ jA0 = -3;
+ jA1 = 0;
+ jB0 = -6;
+ jB1 = 0;
+ kA0 = -2;
+ kA1 = 0;
+ kB0 = -4;
+ kB1 = 0;
+ iStep = -1;
+ jStep = -1;
+ kStep = -4;
+ PRINTF("\nOne off iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; jB1=%d; kA0=%d; "
+ "kA1=%d; kB0=%d; kB1=%d; iStep=%d; jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1, iStep, jStep, kStep);
+ fail = (test() == 0);
+
+ if (!fail) {
+
+ for (iStep = 2; iStep <= 6; iStep += 2) {
+ for (jA0 = -6; jA0 <= 6; jA0 += 3) {
+ for (jB0 = -3; jB0 <= 10; jB0 += 3) {
+ for (jStep = 1; jStep <= 10; jStep += 2) {
+ for (kA0 = -2; kA0 <= 4; ++kA0) {
+ for (kB0 = -4; kB0 <= 2; ++kB0) {
+ for (kStep = -2; kStep >= -10; kStep -= 4) {
+ {
+ PRINTF("\nTrying iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; "
+ "jB1=%d; kA0=%d; kA1=%d; kB0=%d; kB1=%d; iStep=%d; "
+ "jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1,
+ iStep, jStep, kStep);
+ fail = fail || (test() == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return fail;
+}
diff --git a/openmp/runtime/test/worksharing/for/omp_collapse_many_int.c b/openmp/runtime/test/worksharing/for/omp_collapse_many_int.c
new file mode 100644
index 000000000000..4455602df8a2
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/omp_collapse_many_int.c
@@ -0,0 +1,73 @@
+// RUN: %libomp-compile-and-run
+// XFAIL: true
+
+// Non-rectangular loop collapsing.
+//
+// Nested loops conform to OpenMP 5.2 standard,
+// inner loops bounds may depend on outer loops induction variables.
+
+#define LOOP_TYPES int
+#define LOOP \
+ for (i = iLB; i <= iUB; i += iStep) \
+ for (j = i * jA1 + jA0; j <= i * jB1 + jB0; j += jStep) \
+ for (k = j * kA1 + kA0; k <= j * kB1 + kB0; k += kStep)
+#include "collapse_test.inc"
+
+int main() {
+ int fail = 0;
+
+ iLB = -2;
+ iUB = 3;
+ jA0 = -7;
+ jA1 = -1;
+ jB0 = 13;
+ jB1 = 3;
+ kA0 = -20;
+ kA1 = -2;
+ kB0 = 111;
+ kB1 = -1;
+ iStep = 5;
+ jStep = 9;
+ kStep = 10;
+ PRINTF("\nOne off iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; jB1=%d; kA0=%d; "
+ "kA1=%d; kB0=%d; kB1=%d; iStep=%d; jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1, iStep, jStep, kStep);
+ fail = fail || (test() == 0);
+
+ if (!fail) {
+
+ // NOTE: if a loop on some level won't execute for all iterations of an
+ // outer loop, it still should work. Runtime doesn't require lower bounds to
+ // be <= upper bounds for all possible i, j, k.
+
+ iLB = -2;
+ iUB = 3;
+ jA0 = -7;
+ jB0 = 5;
+ kA0 = -13;
+ kB0 = 37;
+
+ for (kA1 = -2; kA1 <= 2; ++kA1) { // <=
+ for (kB1 = -2; kB1 <= 2; ++kB1) {
+ for (jA1 = -3; jA1 <= 3; ++jA1) {
+ for (jB1 = -3; jB1 <= 3; ++jB1) {
+ for (iStep = 1; iStep <= 3; ++iStep) {
+ for (jStep = 2; jStep <= 6; jStep += 2) {
+ for (kStep = 2; kStep <= 8; kStep += 3) {
+ PRINTF("\nTrying iLB=%d; iUB=%d; jA0=%d; jA1=%d; jB0=%d; "
+ "jB1=%d; kA0=%d; kA1=%d; kB0=%d; kB1=%d; iStep=%d; "
+ "jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jA1, jB0, jB1, kA0, kA1, kB0, kB1,
+ iStep, jStep, kStep);
+ fail = fail || (test() == 0);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return fail;
+}
diff --git a/openmp/runtime/test/worksharing/for/omp_collapse_one_int.c b/openmp/runtime/test/worksharing/for/omp_collapse_one_int.c
new file mode 100644
index 000000000000..437d4bff31eb
--- /dev/null
+++ b/openmp/runtime/test/worksharing/for/omp_collapse_one_int.c
@@ -0,0 +1,32 @@
+// RUN: %libomp-compile-and-run
+
+// Non-rectangular loop collapsing.
+//
+// Nested loops conform to OpenMP 5.2 standard,
+// inner loops bounds may depend on outer loops induction variables.
+
+#define LOOP_TYPES int
+#define LOOP \
+ for (i = iLB; i <= iUB; i += iStep) \
+ for (j = i + jA0; j <= i + jB0; j += jStep) \
+ for (k = j + kA0; k <= j + kB0; k += kStep)
+
+#include "collapse_test.inc"
+
+int main() {
+ int fail;
+ iLB = -2;
+ iUB = 3;
+ jA0 = -7;
+ jB0 = 13;
+ kA0 = -20;
+ kB0 = 111;
+ iStep = 5;
+ jStep = 9;
+ kStep = 10;
+ PRINTF("\nOne off iLB=%d; iUB=%d; jA0=%d; jB0=%d; kA0=%d; kB0=%d; iStep=%d; "
+ "jStep=%d; kStep=%d;\n",
+ iLB, iUB, jA0, jB0, kA0, kB0, iStep, jStep, kStep);
+ fail = (test() == 0);
+ return fail;
+}
diff --git a/polly/include/polly/Support/PollyDebug.h b/polly/include/polly/Support/PollyDebug.h
new file mode 100644
index 000000000000..5cea68efc243
--- /dev/null
+++ b/polly/include/polly/Support/PollyDebug.h
@@ -0,0 +1,38 @@
+//===-PollyDebug.h -Provide support for debugging Polly passes-*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Functions to aid printing Debug Info of all polly passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef POLLY_DEBUG_H
+#define POLLY_DEBUG_H
+
+#include "llvm/Support/Debug.h"
+
+namespace polly {
+using namespace llvm;
+bool getPollyDebugFlag();
+
+#ifndef NDEBUG
+#define POLLY_DEBUG(X) \
+ do { \
+ if (polly::getPollyDebugFlag()) { \
+ X; \
+ } else { \
+ DEBUG_WITH_TYPE(DEBUG_TYPE, X); \
+ } \
+ } while (0)
+#else
+#define POLLY_DEBUG(X) \
+ do { \
+ } while (false)
+#endif
+
+} // namespace polly
+#endif
diff --git a/polly/lib/Analysis/DependenceInfo.cpp b/polly/lib/Analysis/DependenceInfo.cpp
index 69257c603877..9ee004fbac9e 100644
--- a/polly/lib/Analysis/DependenceInfo.cpp
+++ b/polly/lib/Analysis/DependenceInfo.cpp
@@ -39,6 +39,7 @@
using namespace polly;
using namespace llvm;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-dependence"
static cl::opt<int> OptComputeOut(
@@ -300,10 +301,10 @@ static __isl_give isl_union_flow *buildFlow(__isl_keep isl_union_map *Snk,
AI = isl_union_access_info_set_kill(AI, isl_union_map_copy(Kill));
AI = isl_union_access_info_set_schedule(AI, isl_schedule_copy(Schedule));
auto Flow = isl_union_access_info_compute_flow(AI);
- LLVM_DEBUG(if (!Flow) dbgs()
- << "last error: "
- << isl_ctx_last_error(isl_schedule_get_ctx(Schedule))
- << '\n';);
+ POLLY_DEBUG(if (!Flow) dbgs()
+ << "last error: "
+ << isl_ctx_last_error(isl_schedule_get_ctx(Schedule))
+ << '\n';);
return Flow;
}
@@ -312,18 +313,18 @@ void Dependences::calculateDependences(Scop &S) {
isl_schedule *Schedule;
isl_union_set *TaggedStmtDomain;
- LLVM_DEBUG(dbgs() << "Scop: \n" << S << "\n");
+ POLLY_DEBUG(dbgs() << "Scop: \n" << S << "\n");
collectInfo(S, Read, MustWrite, MayWrite, ReductionTagMap, TaggedStmtDomain,
Level);
bool HasReductions = !isl_union_map_is_empty(ReductionTagMap);
- LLVM_DEBUG(dbgs() << "Read: " << Read << '\n';
- dbgs() << "MustWrite: " << MustWrite << '\n';
- dbgs() << "MayWrite: " << MayWrite << '\n';
- dbgs() << "ReductionTagMap: " << ReductionTagMap << '\n';
- dbgs() << "TaggedStmtDomain: " << TaggedStmtDomain << '\n';);
+ POLLY_DEBUG(dbgs() << "Read: " << Read << '\n';
+ dbgs() << "MustWrite: " << MustWrite << '\n';
+ dbgs() << "MayWrite: " << MayWrite << '\n';
+ dbgs() << "ReductionTagMap: " << ReductionTagMap << '\n';
+ dbgs() << "TaggedStmtDomain: " << TaggedStmtDomain << '\n';);
Schedule = S.getScheduleTree().release();
@@ -360,10 +361,10 @@ void Dependences::calculateDependences(Scop &S) {
Schedule = isl_schedule_pullback_union_pw_multi_aff(Schedule, Tags);
}
- LLVM_DEBUG(dbgs() << "Read: " << Read << "\n";
- dbgs() << "MustWrite: " << MustWrite << "\n";
- dbgs() << "MayWrite: " << MayWrite << "\n";
- dbgs() << "Schedule: " << Schedule << "\n");
+ POLLY_DEBUG(dbgs() << "Read: " << Read << "\n";
+ dbgs() << "MustWrite: " << MustWrite << "\n";
+ dbgs() << "MayWrite: " << MayWrite << "\n";
+ dbgs() << "Schedule: " << Schedule << "\n");
isl_union_map *StrictWAW = nullptr;
{
@@ -504,7 +505,7 @@ void Dependences::calculateDependences(Scop &S) {
isl_union_map_copy(WAW), isl_union_set_copy(TaggedStmtDomain));
STMT_WAR =
isl_union_map_intersect_domain(isl_union_map_copy(WAR), TaggedStmtDomain);
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Wrapped Dependences:\n";
dump();
dbgs() << "\n";
@@ -553,7 +554,7 @@ void Dependences::calculateDependences(Scop &S) {
} else
TC_RED = isl_union_map_empty(isl_union_map_get_space(RED));
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Final Wrapped Dependences:\n";
dump();
dbgs() << "\n";
@@ -603,7 +604,7 @@ void Dependences::calculateDependences(Scop &S) {
RED = isl_union_map_zip(RED);
TC_RED = isl_union_map_zip(TC_RED);
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Zipped Dependences:\n";
dump();
dbgs() << "\n";
@@ -615,7 +616,7 @@ void Dependences::calculateDependences(Scop &S) {
RED = isl_union_set_unwrap(isl_union_map_domain(RED));
TC_RED = isl_union_set_unwrap(isl_union_map_domain(TC_RED));
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Unwrapped Dependences:\n";
dump();
dbgs() << "\n";
@@ -631,7 +632,7 @@ void Dependences::calculateDependences(Scop &S) {
RED = isl_union_map_coalesce(RED);
TC_RED = isl_union_map_coalesce(TC_RED);
- LLVM_DEBUG(dump());
+ POLLY_DEBUG(dump());
}
bool Dependences::isValidSchedule(Scop &S, isl::schedule NewSched) const {
diff --git a/polly/lib/Analysis/PolyhedralInfo.cpp b/polly/lib/Analysis/PolyhedralInfo.cpp
index 5c77be0a9a1f..8d8e81a9049d 100644
--- a/polly/lib/Analysis/PolyhedralInfo.cpp
+++ b/polly/lib/Analysis/PolyhedralInfo.cpp
@@ -32,6 +32,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polyhedral-info"
static cl::opt<bool> CheckParallel("polly-check-parallel",
@@ -77,19 +78,19 @@ bool PolyhedralInfo::checkParallel(Loop *L, isl_pw_aff **MinDepDistPtr) const {
DI->getDependences(const_cast<Scop *>(S), Dependences::AL_Access);
if (!D.hasValidDependences())
return false;
- LLVM_DEBUG(dbgs() << "Loop :\t" << L->getHeader()->getName() << ":\n");
+ POLLY_DEBUG(dbgs() << "Loop :\t" << L->getHeader()->getName() << ":\n");
isl_union_map *Deps =
D.getDependences(Dependences::TYPE_RAW | Dependences::TYPE_WAW |
Dependences::TYPE_WAR | Dependences::TYPE_RED)
.release();
- LLVM_DEBUG(dbgs() << "Dependences :\t" << stringFromIslObj(Deps, "null")
- << "\n");
+ POLLY_DEBUG(dbgs() << "Dependences :\t" << stringFromIslObj(Deps, "null")
+ << "\n");
isl_union_map *Schedule = getScheduleForLoop(S, L);
- LLVM_DEBUG(dbgs() << "Schedule: \t" << stringFromIslObj(Schedule, "null")
- << "\n");
+ POLLY_DEBUG(dbgs() << "Schedule: \t" << stringFromIslObj(Schedule, "null")
+ << "\n");
IsParallel = D.isParallel(Schedule, Deps, MinDepDistPtr);
isl_union_map_free(Schedule);
@@ -125,14 +126,14 @@ __isl_give isl_union_map *PolyhedralInfo::getScheduleForLoop(const Scop *S,
Loop *L) const {
isl_union_map *Schedule = isl_union_map_empty(S->getParamSpace().release());
int CurrDim = S->getRelativeLoopDepth(L);
- LLVM_DEBUG(dbgs() << "Relative loop depth:\t" << CurrDim << "\n");
+ POLLY_DEBUG(dbgs() << "Relative loop depth:\t" << CurrDim << "\n");
assert(CurrDim >= 0 && "Loop in region should have at least depth one");
for (auto &SS : *S) {
if (L->contains(SS.getSurroundingLoop())) {
unsigned int MaxDim = SS.getNumIterators();
- LLVM_DEBUG(dbgs() << "Maximum depth of Stmt:\t" << MaxDim << "\n");
+ POLLY_DEBUG(dbgs() << "Maximum depth of Stmt:\t" << MaxDim << "\n");
isl_map *ScheduleMap = SS.getSchedule().release();
assert(
ScheduleMap &&
diff --git a/polly/lib/Analysis/PruneUnprofitable.cpp b/polly/lib/Analysis/PruneUnprofitable.cpp
index db4a3d73dc33..f8469c03fe55 100644
--- a/polly/lib/Analysis/PruneUnprofitable.cpp
+++ b/polly/lib/Analysis/PruneUnprofitable.cpp
@@ -22,6 +22,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-prune-unprofitable"
namespace {
@@ -57,7 +58,7 @@ static void updateStatistics(Scop &S, bool Pruned) {
static bool runPruneUnprofitable(Scop &S) {
if (PollyProcessUnprofitable) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "NOTE: -polly-process-unprofitable active, won't prune "
"anything\n");
return false;
@@ -66,7 +67,7 @@ static bool runPruneUnprofitable(Scop &S) {
ScopsProcessed++;
if (!S.isProfitable(true)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "SCoP pruned because it probably cannot be optimized in "
"a significant way\n");
S.invalidate(PROFITABLE, DebugLoc());
diff --git a/polly/lib/Analysis/ScopBuilder.cpp b/polly/lib/Analysis/ScopBuilder.cpp
index 0edc41d10641..214e4d360d6b 100644
--- a/polly/lib/Analysis/ScopBuilder.cpp
+++ b/polly/lib/Analysis/ScopBuilder.cpp
@@ -60,6 +60,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-scops"
STATISTIC(ScopFound, "Number of valid Scops");
@@ -2544,9 +2545,9 @@ bool checkCandidatePairAccesses(MemoryAccess *LoadMA, MemoryAccess *StoreMA,
isl::map LoadAccs = LoadMA->getAccessRelation();
isl::map StoreAccs = StoreMA->getAccessRelation();
bool Valid = LoadAccs.has_equal_space(StoreAccs);
- LLVM_DEBUG(dbgs() << " == The accessed space below is "
- << (Valid ? "" : "not ") << "equal!\n");
- LLVM_DEBUG(LoadMA->dump(); StoreMA->dump());
+ POLLY_DEBUG(dbgs() << " == The accessed space below is "
+ << (Valid ? "" : "not ") << "equal!\n");
+ POLLY_DEBUG(LoadMA->dump(); StoreMA->dump());
if (Valid) {
// Then check if they actually access the same memory.
@@ -2560,8 +2561,8 @@ bool checkCandidatePairAccesses(MemoryAccess *LoadMA, MemoryAccess *StoreMA,
isl::set InterAccs =
isl::manage(RS.copy()).intersect(isl::manage(WS.copy()));
Valid = !InterAccs.is_empty();
- LLVM_DEBUG(dbgs() << " == The accessed memory is " << (Valid ? "" : "not ")
- << "overlapping!\n");
+ POLLY_DEBUG(dbgs() << " == The accessed memory is " << (Valid ? "" : "not ")
+ << "overlapping!\n");
}
if (Valid) {
@@ -2571,8 +2572,8 @@ bool checkCandidatePairAccesses(MemoryAccess *LoadMA, MemoryAccess *StoreMA,
isl::set AllAccs = AllAccsRel.range();
Valid = !hasIntersectingAccesses(AllAccs, LoadMA, StoreMA, Domain, MemAccs);
- LLVM_DEBUG(dbgs() << " == The accessed memory is " << (Valid ? "not " : "")
- << "accessed by other instructions!\n");
+ POLLY_DEBUG(dbgs() << " == The accessed memory is " << (Valid ? "not " : "")
+ << "accessed by other instructions!\n");
}
return Valid;
}
@@ -3240,8 +3241,8 @@ bool ScopBuilder::buildAliasChecks() {
// we make the assumed context infeasible.
scop->invalidate(ALIASING, DebugLoc());
- LLVM_DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << scop->getNameStr()
- << " could not be created. This SCoP has been dismissed.");
+ POLLY_DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << scop->getNameStr()
+ << " could not be created. This SCoP has been dismissed.");
return false;
}
@@ -3562,7 +3563,7 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
DenseMap<BasicBlock *, isl::set> InvalidDomainMap;
if (!buildDomains(&R, InvalidDomainMap)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Bailing-out because buildDomains encountered problems\n");
return;
}
@@ -3582,7 +3583,7 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
scop->removeStmtNotInDomainMap();
scop->simplifySCoP(false);
if (scop->isEmpty()) {
- LLVM_DEBUG(dbgs() << "Bailing-out because SCoP is empty\n");
+ POLLY_DEBUG(dbgs() << "Bailing-out because SCoP is empty\n");
return;
}
@@ -3599,7 +3600,8 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
// Check early for a feasible runtime context.
if (!scop->hasFeasibleRuntimeContext()) {
- LLVM_DEBUG(dbgs() << "Bailing-out because of unfeasible context (early)\n");
+ POLLY_DEBUG(
+ dbgs() << "Bailing-out because of unfeasible context (early)\n");
return;
}
@@ -3607,7 +3609,7 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
// only the runtime context could become infeasible.
if (!scop->isProfitable(UnprofitableScalarAccs)) {
scop->invalidate(PROFITABLE, DebugLoc());
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Bailing-out because SCoP is not considered profitable\n");
return;
}
@@ -3626,7 +3628,7 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
scop->simplifyContexts();
if (!buildAliasChecks()) {
- LLVM_DEBUG(dbgs() << "Bailing-out because could not build alias checks\n");
+ POLLY_DEBUG(dbgs() << "Bailing-out because could not build alias checks\n");
return;
}
@@ -3638,7 +3640,7 @@ void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
// Check late for a feasible runtime context because profitability did not
// change.
if (!scop->hasFeasibleRuntimeContext()) {
- LLVM_DEBUG(dbgs() << "Bailing-out because of unfeasible context (late)\n");
+ POLLY_DEBUG(dbgs() << "Bailing-out because of unfeasible context (late)\n");
return;
}
@@ -3662,12 +3664,12 @@ ScopBuilder::ScopBuilder(Region *R, AssumptionCache &AC, AAResults &AA,
buildScop(*R, AC);
- LLVM_DEBUG(dbgs() << *scop);
+ POLLY_DEBUG(dbgs() << *scop);
if (!scop->hasFeasibleRuntimeContext()) {
InfeasibleScops++;
Msg = "SCoP ends here but was dismissed.";
- LLVM_DEBUG(dbgs() << "SCoP detected but dismissed\n");
+ POLLY_DEBUG(dbgs() << "SCoP detected but dismissed\n");
RecordedAssumptions.clear();
scop.reset();
} else {
diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
index 938d3f149677..eab7bd83e6a4 100644
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -91,6 +91,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-detect"
// This option is set to a very high value, as analyzing such loops increases
@@ -406,8 +407,8 @@ inline bool ScopDetection::invalid(DetectionContext &Context, bool Assert,
// canUseISLTripCount().
Log.report(RejectReason);
- LLVM_DEBUG(dbgs() << RejectReason->getMessage());
- LLVM_DEBUG(dbgs() << "\n");
+ POLLY_DEBUG(dbgs() << RejectReason->getMessage());
+ POLLY_DEBUG(dbgs() << "\n");
} else {
assert(!Assert && "Verification of detected scop failed");
}
@@ -704,8 +705,8 @@ bool ScopDetection::isValidCallInst(CallInst &CI,
return false;
if (isDebugCall(&CI)) {
- LLVM_DEBUG(dbgs() << "Allow call to debug function: "
- << CalledFunction->getName() << '\n');
+ POLLY_DEBUG(dbgs() << "Allow call to debug function: "
+ << CalledFunction->getName() << '\n');
return true;
}
@@ -1486,7 +1487,7 @@ Region *ScopDetection::expandRegion(Region &R) {
std::unique_ptr<Region> LastValidRegion;
auto ExpandedRegion = std::unique_ptr<Region>(R.getExpandedRegion());
- LLVM_DEBUG(dbgs() << "\tExpanding " << R.getNameStr() << "\n");
+ POLLY_DEBUG(dbgs() << "\tExpanding " << R.getNameStr() << "\n");
while (ExpandedRegion) {
BBPair P = getBBPairForRegion(ExpandedRegion.get());
@@ -1495,7 +1496,8 @@ Region *ScopDetection::expandRegion(Region &R) {
/*Verifying=*/false);
DetectionContext &Context = *Entry.get();
- LLVM_DEBUG(dbgs() << "\t\tTrying " << ExpandedRegion->getNameStr() << "\n");
+ POLLY_DEBUG(dbgs() << "\t\tTrying " << ExpandedRegion->getNameStr()
+ << "\n");
// Only expand when we did not collect errors.
if (!Context.Log.hasErrors()) {
@@ -1529,7 +1531,7 @@ Region *ScopDetection::expandRegion(Region &R) {
}
}
- LLVM_DEBUG({
+ POLLY_DEBUG({
if (LastValidRegion)
dbgs() << "\tto " << LastValidRegion->getNameStr() << "\n";
else
@@ -1750,10 +1752,11 @@ bool ScopDetection::isProfitableRegion(DetectionContext &Context) const {
bool ScopDetection::isValidRegion(DetectionContext &Context) {
Region &CurRegion = Context.CurRegion;
- LLVM_DEBUG(dbgs() << "Checking region: " << CurRegion.getNameStr() << "\n\t");
+ POLLY_DEBUG(dbgs() << "Checking region: " << CurRegion.getNameStr()
+ << "\n\t");
if (!PollyAllowFullFunction && CurRegion.isTopLevelRegion()) {
- LLVM_DEBUG(dbgs() << "Top level region is invalid\n");
+ POLLY_DEBUG(dbgs() << "Top level region is invalid\n");
Context.IsInvalid = true;
return false;
}
@@ -1761,14 +1764,14 @@ bool ScopDetection::isValidRegion(DetectionContext &Context) {
DebugLoc DbgLoc;
if (CurRegion.getExit() &&
isa<UnreachableInst>(CurRegion.getExit()->getTerminator())) {
- LLVM_DEBUG(dbgs() << "Unreachable in exit\n");
+ POLLY_DEBUG(dbgs() << "Unreachable in exit\n");
return invalid<ReportUnreachableInExit>(Context, /*Assert=*/true,
CurRegion.getExit(), DbgLoc);
}
if (!OnlyRegion.empty() &&
!CurRegion.getEntry()->getName().count(OnlyRegion)) {
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Region entry does not match -polly-only-region";
dbgs() << "\n";
});
@@ -1802,7 +1805,7 @@ bool ScopDetection::isValidRegion(DetectionContext &Context) {
return invalid<ReportIrreducibleRegion>(Context, /*Assert=*/true,
&CurRegion, DbgLoc);
- LLVM_DEBUG(dbgs() << "OK\n");
+ POLLY_DEBUG(dbgs() << "OK\n");
return true;
}
diff --git a/polly/lib/Analysis/ScopInfo.cpp b/polly/lib/Analysis/ScopInfo.cpp
index 3e78cc8937fb..fa35fae84ace 100644
--- a/polly/lib/Analysis/ScopInfo.cpp
+++ b/polly/lib/Analysis/ScopInfo.cpp
@@ -73,6 +73,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-scops"
STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
@@ -2042,7 +2043,7 @@ void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
}
void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
- LLVM_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
+ POLLY_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
addAssumption(Kind, isl::set::empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
}
diff --git a/polly/lib/CMakeLists.txt b/polly/lib/CMakeLists.txt
index 9780f24d5d36..4557878e515e 100644
--- a/polly/lib/CMakeLists.txt
+++ b/polly/lib/CMakeLists.txt
@@ -62,6 +62,7 @@ add_llvm_pass_plugin(Polly
CodeGen/PerfMonitor.cpp
Exchange/JSONExporter.cpp
Support/GICHelper.cpp
+ Support/PollyDebug.cpp
Support/SCEVAffinator.cpp
Support/SCEVValidator.cpp
Support/RegisterPasses.cpp
diff --git a/polly/lib/CodeGen/CodeGeneration.cpp b/polly/lib/CodeGen/CodeGeneration.cpp
index 3792d995fd99..c4ca3f2e0f0f 100644
--- a/polly/lib/CodeGen/CodeGeneration.cpp
+++ b/polly/lib/CodeGen/CodeGeneration.cpp
@@ -47,6 +47,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-codegen"
static cl::opt<bool> Verify("polly-codegen-verify",
@@ -86,7 +87,7 @@ static void verifyGeneratedFunction(Scop &S, Function &F, IslAstInfo &AI) {
if (!Verify || !verifyFunction(F, &errs()))
return;
- LLVM_DEBUG({
+ POLLY_DEBUG({
errs() << "== ISL Codegen created an invalid function ==\n\n== The "
"SCoP ==\n";
errs() << S;
@@ -183,7 +184,7 @@ static bool generateCode(Scop &S, IslAstInfo &AI, LoopInfo &LI,
// DependenceInfo or IslAstInfo around.
IslAst &Ast = AI.getIslAst();
if (Ast.getSharedIslCtx() != S.getSharedIslCtx()) {
- LLVM_DEBUG(dbgs() << "Got an IstAst for a different Scop/isl_ctx\n");
+ POLLY_DEBUG(dbgs() << "Got an IstAst for a different Scop/isl_ctx\n");
return false;
}
diff --git a/polly/lib/CodeGen/IslAst.cpp b/polly/lib/CodeGen/IslAst.cpp
index fe2b5d9104d7..142a19d5e069 100644
--- a/polly/lib/CodeGen/IslAst.cpp
+++ b/polly/lib/CodeGen/IslAst.cpp
@@ -52,6 +52,7 @@
#include <cassert>
#include <cstdlib>
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-ast"
using namespace llvm;
@@ -643,14 +644,14 @@ static std::unique_ptr<IslAstInfo> runIslAst(
const Dependences &D = GetDeps(Dependences::AL_Statement);
if (D.getSharedIslCtx() != Scop.getSharedIslCtx()) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Got dependence analysis for different SCoP/isl_ctx\n");
return {};
}
std::unique_ptr<IslAstInfo> Ast = std::make_unique<IslAstInfo>(Scop, D);
- LLVM_DEBUG({
+ POLLY_DEBUG({
if (Ast)
Ast->print(dbgs());
});
@@ -751,7 +752,7 @@ void IslAstInfo::print(raw_ostream &OS) {
P = isl_ast_node_print(RootNode.get(), P, Options);
AstStr = isl_printer_get_str(P);
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << S.getContextStr() << "\n";
dbgs() << stringFromIslObj(S.getScheduleTree(), "null");
});
diff --git a/polly/lib/Support/PollyDebug.cpp b/polly/lib/Support/PollyDebug.cpp
new file mode 100644
index 000000000000..9dcd8ed03694
--- /dev/null
+++ b/polly/lib/Support/PollyDebug.cpp
@@ -0,0 +1,27 @@
+//===-PollyDebug.cpp -Provide support for debugging Polly passes-*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Functions to aid printing Debug Info of all polly passes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "polly/Support/PollyDebug.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace polly;
+using namespace llvm;
+
+bool PollyDebugFlag;
+bool polly::getPollyDebugFlag() { return PollyDebugFlag; }
+
+// -debug - Command line option to enable the DEBUG statements in the passes.
+// This flag may only be enabled in debug builds.
+static cl::opt<bool, true>
+ PollyDebug("polly-debug",
+ cl::desc("Enable debug output for only polly passes."),
+ cl::Hidden, cl::location(PollyDebugFlag), cl::ZeroOrMore);
diff --git a/polly/lib/Support/SCEVValidator.cpp b/polly/lib/Support/SCEVValidator.cpp
index e3d9818597ea..5bb82624ed78 100644
--- a/polly/lib/Support/SCEVValidator.cpp
+++ b/polly/lib/Support/SCEVValidator.cpp
@@ -9,6 +9,7 @@
using namespace llvm;
using namespace polly;
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-scev-validator"
namespace SCEVType {
@@ -136,7 +137,7 @@ public:
ValidatorResult visitVScale(const SCEVVScale *VScale) {
// We do not support VScale constants.
- LLVM_DEBUG(dbgs() << "INVALID: VScale is not supported");
+ POLLY_DEBUG(dbgs() << "INVALID: VScale is not supported");
return ValidatorResult(SCEVType::INVALID);
}
@@ -203,7 +204,7 @@ public:
}
if ((Op.isIV() || Op.isPARAM()) && !Return.isINT()) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "INVALID: More than one non-int operand in MulExpr\n"
<< "\tExpr: " << *Expr << "\n"
<< "\tPrevious expression type: " << Return << "\n"
@@ -224,7 +225,7 @@ public:
ValidatorResult visitAddRecExpr(const SCEVAddRecExpr *Expr) {
if (!Expr->isAffine()) {
- LLVM_DEBUG(dbgs() << "INVALID: AddRec is not affine");
+ POLLY_DEBUG(dbgs() << "INVALID: AddRec is not affine");
return ValidatorResult(SCEVType::INVALID);
}
@@ -239,7 +240,7 @@ public:
auto *L = Expr->getLoop();
if (R->contains(L) && (!Scope || !L->contains(Scope))) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "INVALID: Loop of AddRec expression boxed in an a "
"non-affine subregion or has a non-synthesizable exit "
"value.");
@@ -253,8 +254,8 @@ public:
return Result;
}
- LLVM_DEBUG(dbgs() << "INVALID: AddRec within scop has non-int"
- "recurrence part");
+ POLLY_DEBUG(dbgs() << "INVALID: AddRec within scop has non-int"
+ "recurrence part");
return ValidatorResult(SCEVType::INVALID);
}
@@ -314,7 +315,7 @@ public:
ValidatorResult Op = visit(Expr->getOperand(i));
if (!Op.isConstant()) {
- LLVM_DEBUG(dbgs() << "INVALID: UMaxExpr has a non-constant operand");
+ POLLY_DEBUG(dbgs() << "INVALID: UMaxExpr has a non-constant operand");
return ValidatorResult(SCEVType::INVALID);
}
}
@@ -329,7 +330,7 @@ public:
ValidatorResult Op = visit(Expr->getOperand(i));
if (!Op.isConstant()) {
- LLVM_DEBUG(dbgs() << "INVALID: UMinExpr has a non-constant operand");
+ POLLY_DEBUG(dbgs() << "INVALID: UMinExpr has a non-constant operand");
return ValidatorResult(SCEVType::INVALID);
}
}
@@ -344,7 +345,7 @@ public:
ValidatorResult Op = visit(Expr->getOperand(i));
if (!Op.isConstant()) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs()
<< "INVALID: SCEVSequentialUMinExpr has a non-constant operand");
return ValidatorResult(SCEVType::INVALID);
@@ -356,8 +357,8 @@ public:
ValidatorResult visitGenericInst(Instruction *I, const SCEV *S) {
if (R->contains(I)) {
- LLVM_DEBUG(dbgs() << "INVALID: UnknownExpr references an instruction "
- "within the region\n");
+ POLLY_DEBUG(dbgs() << "INVALID: UnknownExpr references an instruction "
+ "within the region\n");
return ValidatorResult(SCEVType::INVALID);
}
@@ -393,7 +394,7 @@ public:
if (LHS.isConstant() && RHS.isConstant())
return ValidatorResult(SCEVType::PARAM, DivExpr);
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "INVALID: unsigned division of non-constant expressions");
return ValidatorResult(SCEVType::INVALID);
}
@@ -434,12 +435,13 @@ public:
Value *V = Expr->getValue();
if (!Expr->getType()->isIntegerTy() && !Expr->getType()->isPointerTy()) {
- LLVM_DEBUG(dbgs() << "INVALID: UnknownExpr is not an integer or pointer");
+ POLLY_DEBUG(
+ dbgs() << "INVALID: UnknownExpr is not an integer or pointer");
return ValidatorResult(SCEVType::INVALID);
}
if (isa<UndefValue>(V)) {
- LLVM_DEBUG(dbgs() << "INVALID: UnknownExpr references an undef value");
+ POLLY_DEBUG(dbgs() << "INVALID: UnknownExpr references an undef value");
return ValidatorResult(SCEVType::INVALID);
}
@@ -601,7 +603,7 @@ bool polly::isAffineExpr(const Region *R, llvm::Loop *Scope, const SCEV *Expr,
return false;
SCEVValidator Validator(R, Scope, SE, ILS);
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "\n";
dbgs() << "Expr: " << *Expr << "\n";
dbgs() << "Region: " << R->getNameStr() << "\n";
@@ -610,7 +612,7 @@ bool polly::isAffineExpr(const Region *R, llvm::Loop *Scope, const SCEV *Expr,
ValidatorResult Result = Validator.visit(Expr);
- LLVM_DEBUG({
+ POLLY_DEBUG({
if (Result.isValid())
dbgs() << "VALID\n";
dbgs() << "\n";
diff --git a/polly/lib/Transform/DeLICM.cpp b/polly/lib/Transform/DeLICM.cpp
index dae5e79639f7..8afa63724ade 100644
--- a/polly/lib/Transform/DeLICM.cpp
+++ b/polly/lib/Transform/DeLICM.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/InitializePasses.h"
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-delicm"
using namespace polly;
@@ -547,7 +548,7 @@ private:
/// @see Knowledge::isConflicting
bool isConflicting(const Knowledge &Proposed) {
raw_ostream *OS = nullptr;
- LLVM_DEBUG(OS = &llvm::dbgs());
+ POLLY_DEBUG(OS = &llvm::dbgs());
return Knowledge::isConflicting(Zone, Proposed, OS, 4);
}
@@ -559,7 +560,7 @@ private:
if (SAI->isValueKind()) {
auto *MA = S->getValueDef(SAI);
if (!MA) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs()
<< " Reject because value is read-only within the scop\n");
return false;
@@ -576,7 +577,7 @@ private:
auto UserInst = cast<Instruction>(User);
if (!S->contains(UserInst)) {
- LLVM_DEBUG(dbgs() << " Reject because value is escaping\n");
+ POLLY_DEBUG(dbgs() << " Reject because value is escaping\n");
return false;
}
}
@@ -593,9 +594,9 @@ private:
auto PHI = cast<PHINode>(MA->getAccessInstruction());
for (auto Incoming : PHI->blocks()) {
if (!S->contains(Incoming)) {
- LLVM_DEBUG(dbgs()
- << " Reject because at least one incoming block is "
- "not in the scop region\n");
+ POLLY_DEBUG(dbgs()
+ << " Reject because at least one incoming block is "
+ "not in the scop region\n");
return false;
}
}
@@ -603,7 +604,7 @@ private:
return true;
}
- LLVM_DEBUG(dbgs() << " Reject ExitPHI or other non-value\n");
+ POLLY_DEBUG(dbgs() << " Reject ExitPHI or other non-value\n");
return false;
}
@@ -686,12 +687,12 @@ private:
// { DomainDef[] -> Element[] }
auto DefTarget = TargetElt.apply_domain(DefSched.reverse());
simplify(DefTarget);
- LLVM_DEBUG(dbgs() << " Def Mapping: " << DefTarget << '\n');
+ POLLY_DEBUG(dbgs() << " Def Mapping: " << DefTarget << '\n');
auto OrigDomain = getDomainFor(DefMA);
auto MappedDomain = DefTarget.domain();
if (!OrigDomain.is_subset(MappedDomain)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs()
<< " Reject because mapping does not encompass all instances\n");
return false;
@@ -704,7 +705,7 @@ private:
isl::union_map DefUses;
std::tie(DefUses, Lifetime) = computeValueUses(SAI);
- LLVM_DEBUG(dbgs() << " Lifetime: " << Lifetime << '\n');
+ POLLY_DEBUG(dbgs() << " Lifetime: " << Lifetime << '\n');
/// { [Element[] -> Zone[]] }
auto EltZone = Lifetime.apply_domain(DefTarget).wrap();
@@ -858,12 +859,12 @@ private:
// { DomainRead[] -> Element[] }
auto PHITarget = PHISched.apply_range(TargetElt);
simplify(PHITarget);
- LLVM_DEBUG(dbgs() << " Mapping: " << PHITarget << '\n');
+ POLLY_DEBUG(dbgs() << " Mapping: " << PHITarget << '\n');
auto OrigDomain = getDomainFor(PHIRead);
auto MappedDomain = PHITarget.domain();
if (!OrigDomain.is_subset(MappedDomain)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs()
<< " Reject because mapping does not encompass all instances\n");
return false;
@@ -872,7 +873,7 @@ private:
// { DomainRead[] -> DomainWrite[] }
auto PerPHIWrites = computePerPHI(SAI);
if (PerPHIWrites.is_null()) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << " Reject because cannot determine incoming values\n");
return false;
}
@@ -894,17 +895,17 @@ private:
auto ExpandedWritesDom = WritesTarget.domain();
if (!DelicmPartialWrites &&
!UniverseWritesDom.is_subset(ExpandedWritesDom)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << " Reject because did not find PHI write mapping for "
"all instances\n");
if (DelicmOverapproximateWrites)
- LLVM_DEBUG(dbgs() << " Relevant Mapping: "
- << RelevantWritesTarget << '\n');
- LLVM_DEBUG(dbgs() << " Deduced Mapping: " << WritesTarget
- << '\n');
- LLVM_DEBUG(dbgs() << " Missing instances: "
- << UniverseWritesDom.subtract(ExpandedWritesDom)
- << '\n');
+ POLLY_DEBUG(dbgs() << " Relevant Mapping: "
+ << RelevantWritesTarget << '\n');
+ POLLY_DEBUG(dbgs() << " Deduced Mapping: " << WritesTarget
+ << '\n');
+ POLLY_DEBUG(dbgs() << " Missing instances: "
+ << UniverseWritesDom.subtract(ExpandedWritesDom)
+ << '\n');
return false;
}
@@ -916,7 +917,7 @@ private:
// { DomainRead[] -> Zone[] }
auto Lifetime = betweenScatter(PerPHIWriteScatter, PHISched, false, true);
simplify(Lifetime);
- LLVM_DEBUG(dbgs() << " Lifetime: " << Lifetime << "\n");
+ POLLY_DEBUG(dbgs() << " Lifetime: " << Lifetime << "\n");
// { DomainWrite[] -> Zone[] }
auto WriteLifetime = isl::union_map(Lifetime).apply_domain(PerPHIWrites);
@@ -1029,7 +1030,7 @@ private:
// Use the target store's write location as a suggestion to map scalars to.
auto EltTarget = Target.apply_range(TargetAccRel);
simplify(EltTarget);
- LLVM_DEBUG(dbgs() << " Target mapping is " << EltTarget << '\n');
+ POLLY_DEBUG(dbgs() << " Target mapping is " << EltTarget << '\n');
// Stack of elements not yet processed.
SmallVector<MemoryAccess *, 16> Worklist;
@@ -1067,8 +1068,8 @@ private:
if (Closed.count(SAI))
continue;
Closed.insert(SAI);
- LLVM_DEBUG(dbgs() << "\n Trying to map " << MA << " (SAI: " << SAI
- << ")\n");
+ POLLY_DEBUG(dbgs() << "\n Trying to map " << MA << " (SAI: " << SAI
+ << ")\n");
// Skip non-mappable scalars.
if (!isMappable(SAI))
@@ -1076,7 +1077,7 @@ private:
auto MASize = DL.getTypeAllocSize(MA->getAccessValue()->getType());
if (MASize > StoreSize) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << " Reject because storage size is insufficient\n");
continue;
}
@@ -1212,7 +1213,7 @@ public:
"The only reason that these things have not been computed should "
"be if the max-operations limit hit");
DeLICMOutOfQuota++;
- LLVM_DEBUG(dbgs() << "DeLICM analysis exceeded max_operations\n");
+ POLLY_DEBUG(dbgs() << "DeLICM analysis exceeded max_operations\n");
DebugLoc Begin, End;
getDebugLocations(getBBPairForRegion(&S->getRegion()), Begin, End);
OptimizationRemarkAnalysis R(DEBUG_TYPE, "OutOfQuota", Begin,
@@ -1223,7 +1224,7 @@ public:
}
Zone = OriginalZone = Knowledge({}, EltUnused, EltKnown, EltWritten);
- LLVM_DEBUG(dbgs() << "Computed Zone:\n"; OriginalZone.print(dbgs(), 4));
+ POLLY_DEBUG(dbgs() << "Computed Zone:\n"; OriginalZone.print(dbgs(), 4));
assert(Zone.isUsable() && OriginalZone.isUsable());
return true;
@@ -1245,8 +1246,8 @@ public:
continue;
if (MA->isMayWrite()) {
- LLVM_DEBUG(dbgs() << "Access " << MA
- << " pruned because it is a MAY_WRITE\n");
+ POLLY_DEBUG(dbgs() << "Access " << MA
+ << " pruned because it is a MAY_WRITE\n");
OptimizationRemarkMissed R(DEBUG_TYPE, "TargetMayWrite",
MA->getAccessInstruction());
R << "Skipped possible mapping target because it is not an "
@@ -1256,8 +1257,8 @@ public:
}
if (Stmt.getNumIterators() == 0) {
- LLVM_DEBUG(dbgs() << "Access " << MA
- << " pruned because it is not in a loop\n");
+ POLLY_DEBUG(dbgs() << "Access " << MA
+ << " pruned because it is not in a loop\n");
OptimizationRemarkMissed R(DEBUG_TYPE, "WriteNotInLoop",
MA->getAccessInstruction());
R << "skipped possible mapping target because it is not in a loop";
@@ -1266,9 +1267,9 @@ public:
}
if (isScalarAccess(MA)) {
- LLVM_DEBUG(dbgs()
- << "Access " << MA
- << " pruned because it writes only a single element\n");
+ POLLY_DEBUG(dbgs()
+ << "Access " << MA
+ << " pruned because it writes only a single element\n");
OptimizationRemarkMissed R(DEBUG_TYPE, "ScalarWrite",
MA->getAccessInstruction());
R << "skipped possible mapping target because the memory location "
@@ -1278,8 +1279,8 @@ public:
}
if (!isa<StoreInst>(MA->getAccessInstruction())) {
- LLVM_DEBUG(dbgs() << "Access " << MA
- << " pruned because it is not a StoreInst\n");
+ POLLY_DEBUG(dbgs() << "Access " << MA
+ << " pruned because it is not a StoreInst\n");
OptimizationRemarkMissed R(DEBUG_TYPE, "NotAStore",
MA->getAccessInstruction());
R << "skipped possible mapping target because non-store instructions "
@@ -1301,9 +1302,9 @@ public:
// arguments.
isl::union_map AccRel = MA->getLatestAccessRelation();
if (!AccRel.is_single_valued().is_true()) {
- LLVM_DEBUG(dbgs() << "Access " << MA
- << " is incompatible because it writes multiple "
- "elements per instance\n");
+ POLLY_DEBUG(dbgs() << "Access " << MA
+ << " is incompatible because it writes multiple "
+ "elements per instance\n");
OptimizationRemarkMissed R(DEBUG_TYPE, "NonFunctionalAccRel",
MA->getAccessInstruction());
R << "skipped possible mapping target because it writes more than "
@@ -1314,7 +1315,7 @@ public:
isl::union_set TouchedElts = AccRel.range();
if (!TouchedElts.is_subset(CompatibleElts)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs()
<< "Access " << MA
<< " is incompatible because it touches incompatible elements\n");
@@ -1328,7 +1329,7 @@ public:
assert(isCompatibleAccess(MA));
NumberOfCompatibleTargets++;
- LLVM_DEBUG(dbgs() << "Analyzing target access " << MA << "\n");
+ POLLY_DEBUG(dbgs() << "Analyzing target access " << MA << "\n");
if (collapseScalarsToStore(MA))
Modified = true;
}
@@ -1361,15 +1362,15 @@ static std::unique_ptr<DeLICMImpl> collapseToUnused(Scop &S, LoopInfo &LI) {
std::unique_ptr<DeLICMImpl> Impl = std::make_unique<DeLICMImpl>(&S, &LI);
if (!Impl->computeZone()) {
- LLVM_DEBUG(dbgs() << "Abort because cannot reliably compute lifetimes\n");
+ POLLY_DEBUG(dbgs() << "Abort because cannot reliably compute lifetimes\n");
return Impl;
}
- LLVM_DEBUG(dbgs() << "Collapsing scalars to unused array elements...\n");
+ POLLY_DEBUG(dbgs() << "Collapsing scalars to unused array elements...\n");
Impl->greedyCollapse();
- LLVM_DEBUG(dbgs() << "\nFinal Scop:\n");
- LLVM_DEBUG(dbgs() << S);
+ POLLY_DEBUG(dbgs() << "\nFinal Scop:\n");
+ POLLY_DEBUG(dbgs() << S);
return Impl;
}
diff --git a/polly/lib/Transform/FlattenAlgo.cpp b/polly/lib/Transform/FlattenAlgo.cpp
index f8ed332348ab..27a699e5ea59 100644
--- a/polly/lib/Transform/FlattenAlgo.cpp
+++ b/polly/lib/Transform/FlattenAlgo.cpp
@@ -14,6 +14,7 @@
#include "polly/FlattenAlgo.h"
#include "polly/Support/ISLOStream.h"
#include "polly/Support/ISLTools.h"
+#include "polly/Support/PollyDebug.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "polly-flatten-algo"
@@ -171,7 +172,7 @@ isl::union_map tryFlattenSequence(isl::union_map Schedule) {
// Would cause an infinite loop.
if (!isDimBoundedByConstant(ScatterSet, 0)) {
- LLVM_DEBUG(dbgs() << "Abort; dimension is not of fixed size\n");
+ POLLY_DEBUG(dbgs() << "Abort; dimension is not of fixed size\n");
return {};
}
@@ -182,8 +183,8 @@ isl::union_map tryFlattenSequence(isl::union_map Schedule) {
auto Counter = isl::pw_aff(isl::local_space(ParamSpace.set_from_params()));
while (!ScatterSet.is_empty()) {
- LLVM_DEBUG(dbgs() << "Next counter:\n " << Counter << "\n");
- LLVM_DEBUG(dbgs() << "Remaining scatter set:\n " << ScatterSet << "\n");
+ POLLY_DEBUG(dbgs() << "Next counter:\n " << Counter << "\n");
+ POLLY_DEBUG(dbgs() << "Remaining scatter set:\n " << ScatterSet << "\n");
auto ThisSet = ScatterSet.project_out(isl::dim::set, 1, Dims - 1);
auto ThisFirst = ThisSet.lexmin();
auto ScatterFirst = ThisFirst.add_dims(isl::dim::set, Dims - 1);
@@ -199,11 +200,11 @@ isl::union_map tryFlattenSequence(isl::union_map Schedule) {
auto RemainingSubSchedule = scheduleProjectOut(SubSchedule, 0, 1);
auto FirstSubScatter = isl::set(FirstSubSchedule.range());
- LLVM_DEBUG(dbgs() << "Next step in sequence is:\n " << FirstSubScatter
- << "\n");
+ POLLY_DEBUG(dbgs() << "Next step in sequence is:\n " << FirstSubScatter
+ << "\n");
if (!isDimBoundedByParameter(FirstSubScatter, 0)) {
- LLVM_DEBUG(dbgs() << "Abort; sequence step is not bounded\n");
+ POLLY_DEBUG(dbgs() << "Abort; sequence step is not bounded\n");
return {};
}
@@ -236,8 +237,8 @@ isl::union_map tryFlattenSequence(isl::union_map Schedule) {
Counter = Counter.add(PartLen);
}
- LLVM_DEBUG(dbgs() << "Sequence-flatten result is:\n " << NewSchedule
- << "\n");
+ POLLY_DEBUG(dbgs() << "Sequence-flatten result is:\n " << NewSchedule
+ << "\n");
return NewSchedule;
}
@@ -266,20 +267,20 @@ isl::union_map tryFlattenLoop(isl::union_map Schedule) {
SubExtent = SubExtent.project_out(isl::dim::set, 1, SubDims - 1);
if (!isDimBoundedByConstant(SubExtent, 0)) {
- LLVM_DEBUG(dbgs() << "Abort; dimension not bounded by constant\n");
+ POLLY_DEBUG(dbgs() << "Abort; dimension not bounded by constant\n");
return {};
}
auto Min = SubExtent.dim_min(0);
- LLVM_DEBUG(dbgs() << "Min bound:\n " << Min << "\n");
+ POLLY_DEBUG(dbgs() << "Min bound:\n " << Min << "\n");
auto MinVal = getConstant(Min, false, true);
auto Max = SubExtent.dim_max(0);
- LLVM_DEBUG(dbgs() << "Max bound:\n " << Max << "\n");
+ POLLY_DEBUG(dbgs() << "Max bound:\n " << Max << "\n");
auto MaxVal = getConstant(Max, true, false);
if (MinVal.is_null() || MaxVal.is_null() || MinVal.is_nan() ||
MaxVal.is_nan()) {
- LLVM_DEBUG(dbgs() << "Abort; dimension bounds could not be determined\n");
+ POLLY_DEBUG(dbgs() << "Abort; dimension bounds could not be determined\n");
return {};
}
@@ -297,15 +298,15 @@ isl::union_map tryFlattenLoop(isl::union_map Schedule) {
auto IndexMap = isl::union_map::from(Index);
auto Result = IndexMap.flat_range_product(RemainingSubSchedule);
- LLVM_DEBUG(dbgs() << "Loop-flatten result is:\n " << Result << "\n");
+ POLLY_DEBUG(dbgs() << "Loop-flatten result is:\n " << Result << "\n");
return Result;
}
} // anonymous namespace
isl::union_map polly::flattenSchedule(isl::union_map Schedule) {
unsigned Dims = getNumScatterDims(Schedule);
- LLVM_DEBUG(dbgs() << "Recursive schedule to process:\n " << Schedule
- << "\n");
+ POLLY_DEBUG(dbgs() << "Recursive schedule to process:\n " << Schedule
+ << "\n");
// Base case; no dimensions left
if (Dims == 0) {
@@ -319,20 +320,20 @@ isl::union_map polly::flattenSchedule(isl::union_map Schedule) {
// Fixed dimension; no need to preserve variabledness.
if (!isVariableDim(Schedule)) {
- LLVM_DEBUG(dbgs() << "Fixed dimension; try sequence flattening\n");
+ POLLY_DEBUG(dbgs() << "Fixed dimension; try sequence flattening\n");
auto NewScheduleSequence = tryFlattenSequence(Schedule);
if (!NewScheduleSequence.is_null())
return NewScheduleSequence;
}
// Constant stride
- LLVM_DEBUG(dbgs() << "Try loop flattening\n");
+ POLLY_DEBUG(dbgs() << "Try loop flattening\n");
auto NewScheduleLoop = tryFlattenLoop(Schedule);
if (!NewScheduleLoop.is_null())
return NewScheduleLoop;
// Try again without loop condition (may blow up the number of pieces!!)
- LLVM_DEBUG(dbgs() << "Try sequence flattening again\n");
+ POLLY_DEBUG(dbgs() << "Try sequence flattening again\n");
auto NewScheduleSequence = tryFlattenSequence(Schedule);
if (!NewScheduleSequence.is_null())
return NewScheduleSequence;
diff --git a/polly/lib/Transform/FlattenSchedule.cpp b/polly/lib/Transform/FlattenSchedule.cpp
index 87bf642ba0d9..f514ef359ba0 100644
--- a/polly/lib/Transform/FlattenSchedule.cpp
+++ b/polly/lib/Transform/FlattenSchedule.cpp
@@ -18,6 +18,7 @@
#include "polly/ScopPass.h"
#include "polly/Support/ISLOStream.h"
#include "polly/Support/ISLTools.h"
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-flatten-schedule"
using namespace polly;
@@ -57,23 +58,23 @@ public:
// OldSchedule.
IslCtx = S.getSharedIslCtx();
- LLVM_DEBUG(dbgs() << "Going to flatten old schedule:\n");
+ POLLY_DEBUG(dbgs() << "Going to flatten old schedule:\n");
OldSchedule = S.getSchedule();
- LLVM_DEBUG(printSchedule(dbgs(), OldSchedule, 2));
+ POLLY_DEBUG(printSchedule(dbgs(), OldSchedule, 2));
auto Domains = S.getDomains();
auto RestrictedOldSchedule = OldSchedule.intersect_domain(Domains);
- LLVM_DEBUG(dbgs() << "Old schedule with domains:\n");
- LLVM_DEBUG(printSchedule(dbgs(), RestrictedOldSchedule, 2));
+ POLLY_DEBUG(dbgs() << "Old schedule with domains:\n");
+ POLLY_DEBUG(printSchedule(dbgs(), RestrictedOldSchedule, 2));
auto NewSchedule = flattenSchedule(RestrictedOldSchedule);
- LLVM_DEBUG(dbgs() << "Flattened new schedule:\n");
- LLVM_DEBUG(printSchedule(dbgs(), NewSchedule, 2));
+ POLLY_DEBUG(dbgs() << "Flattened new schedule:\n");
+ POLLY_DEBUG(printSchedule(dbgs(), NewSchedule, 2));
NewSchedule = NewSchedule.gist_domain(Domains);
- LLVM_DEBUG(dbgs() << "Gisted, flattened new schedule:\n");
- LLVM_DEBUG(printSchedule(dbgs(), NewSchedule, 2));
+ POLLY_DEBUG(dbgs() << "Gisted, flattened new schedule:\n");
+ POLLY_DEBUG(printSchedule(dbgs(), NewSchedule, 2));
S.setSchedule(NewSchedule);
return false;
diff --git a/polly/lib/Transform/ForwardOpTree.cpp b/polly/lib/Transform/ForwardOpTree.cpp
index 5e6de2e182a5..e9be6c9cdcc2 100644
--- a/polly/lib/Transform/ForwardOpTree.cpp
+++ b/polly/lib/Transform/ForwardOpTree.cpp
@@ -40,6 +40,7 @@
#include <cassert>
#include <memory>
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-optree"
using namespace llvm;
@@ -171,7 +172,7 @@ struct ForwardingAction {
Result.Decision =
IsProfitable ? FD_CanForwardProfitably : FD_CanForwardLeaf;
Result.Execute = [=]() {
- LLVM_DEBUG(dbgs() << " trivially forwarded: " << *Val << "\n");
+ POLLY_DEBUG(dbgs() << " trivially forwarded: " << *Val << "\n");
return true;
};
return Result;
@@ -368,12 +369,12 @@ public:
Known = {};
Translator = {};
NormalizeMap = {};
- LLVM_DEBUG(dbgs() << "Known analysis exceeded max_operations\n");
+ POLLY_DEBUG(dbgs() << "Known analysis exceeded max_operations\n");
return false;
}
KnownAnalyzed++;
- LLVM_DEBUG(dbgs() << "All known: " << Known << "\n");
+ POLLY_DEBUG(dbgs() << "All known: " << Known << "\n");
return true;
}
@@ -490,7 +491,7 @@ public:
// do not add another MemoryAccess.
auto ExecAction = [this, TargetStmt, LI, Access]() -> bool {
TargetStmt->prependInstruction(LI);
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << " forwarded known load with preexisting MemoryAccess"
<< Access << "\n");
(void)Access;
@@ -528,10 +529,10 @@ public:
if (SameVal.is_null())
return ForwardingAction::notApplicable();
- LLVM_DEBUG(dbgs() << " expected values where " << TargetExpectedVal
- << "\n");
- LLVM_DEBUG(dbgs() << " candidate elements where " << Candidates
- << "\n");
+ POLLY_DEBUG(dbgs() << " expected values where " << TargetExpectedVal
+ << "\n");
+ POLLY_DEBUG(dbgs() << " candidate elements where " << Candidates
+ << "\n");
// { ValInst[] }
isl::space ValInstSpace = ExpectedVal.get_space().range();
@@ -568,8 +569,8 @@ public:
// { [TargetDomain[] -> Value[]] -> [DefDomain[] -> Value] }
LocalTranslator = DefToTarget.reverse().product(ValToVal);
- LLVM_DEBUG(dbgs() << " local translator is " << LocalTranslator
- << "\n");
+ POLLY_DEBUG(dbgs() << " local translator is " << LocalTranslator
+ << "\n");
if (LocalTranslator.is_null())
return ForwardingAction::notApplicable();
@@ -579,8 +580,8 @@ public:
LocalTranslator]() -> bool {
TargetStmt->prependInstruction(LI);
MemoryAccess *Access = makeReadArrayAccess(TargetStmt, LI, SameVal);
- LLVM_DEBUG(dbgs() << " forwarded known load with new MemoryAccess"
- << Access << "\n");
+ POLLY_DEBUG(dbgs() << " forwarded known load with new MemoryAccess"
+ << Access << "\n");
(void)Access;
if (!LocalTranslator.is_null())
@@ -643,8 +644,8 @@ public:
Access = TargetStmt->ensureValueRead(Inst);
Access->setNewAccessRelation(SameVal);
- LLVM_DEBUG(dbgs() << " forwarded known content of " << *Inst
- << " which is " << SameVal << "\n");
+ POLLY_DEBUG(dbgs() << " forwarded known content of " << *Inst
+ << " which is " << SameVal << "\n");
TotalReloads++;
NumReloads++;
return false;
@@ -712,8 +713,8 @@ public:
// instruction using them.
TargetStmt->prependInstruction(UseInst);
- LLVM_DEBUG(dbgs() << " forwarded speculable instruction: " << *UseInst
- << "\n");
+ POLLY_DEBUG(dbgs() << " forwarded speculable instruction: " << *UseInst
+ << "\n");
NumInstructionsCopied++;
TotalInstructionsCopied++;
return true;
@@ -765,7 +766,7 @@ public:
if (TargetUse.getKind() == VirtualUse::Synthesizable)
return ForwardingAction::triviallyForwardable(false, UseVal);
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << " Synthesizable would not be synthesizable anymore: "
<< *UseVal << "\n");
return ForwardingAction::cannotForward();
@@ -779,8 +780,8 @@ public:
auto ExecAction = [this, TargetStmt, UseVal]() {
TargetStmt->ensureValueRead(UseVal);
- LLVM_DEBUG(dbgs() << " forwarded read-only value " << *UseVal
- << "\n");
+ POLLY_DEBUG(dbgs() << " forwarded read-only value " << *UseVal
+ << "\n");
NumReadOnlyCopied++;
TotalReadOnlyCopied++;
@@ -830,7 +831,8 @@ public:
// When no method is found to forward the operand tree, we effectively
// cannot handle it.
- LLVM_DEBUG(dbgs() << " Cannot forward instruction: " << *Inst << "\n");
+ POLLY_DEBUG(dbgs() << " Cannot forward instruction: " << *Inst
+ << "\n");
return ForwardingAction::cannotForward();
}
@@ -945,7 +947,7 @@ public:
/// Try to forward an operand tree rooted in @p RA.
bool tryForwardTree(MemoryAccess *RA) {
assert(RA->isLatestScalarKind());
- LLVM_DEBUG(dbgs() << "Trying to forward operand tree " << RA << "...\n");
+ POLLY_DEBUG(dbgs() << "Trying to forward operand tree " << RA << "...\n");
ScopStmt *Stmt = RA->getStatement();
Loop *InLoop = Stmt->getSurroundingLoop();
@@ -1036,22 +1038,22 @@ static std::unique_ptr<ForwardOpTreeImpl> runForwardOpTree(Scop &S,
Impl = std::make_unique<ForwardOpTreeImpl>(&S, &LI, MaxOpGuard);
if (AnalyzeKnown) {
- LLVM_DEBUG(dbgs() << "Prepare forwarders...\n");
+ POLLY_DEBUG(dbgs() << "Prepare forwarders...\n");
Impl->computeKnownValues();
}
- LLVM_DEBUG(dbgs() << "Forwarding operand trees...\n");
+ POLLY_DEBUG(dbgs() << "Forwarding operand trees...\n");
Impl->forwardOperandTrees();
if (MaxOpGuard.hasQuotaExceeded()) {
- LLVM_DEBUG(dbgs() << "Not all operations completed because of "
- "max_operations exceeded\n");
+ POLLY_DEBUG(dbgs() << "Not all operations completed because of "
+ "max_operations exceeded\n");
KnownOutOfQuota++;
}
}
- LLVM_DEBUG(dbgs() << "\nFinal Scop:\n");
- LLVM_DEBUG(dbgs() << S);
+ POLLY_DEBUG(dbgs() << "\nFinal Scop:\n");
+ POLLY_DEBUG(dbgs() << S);
// Update statistics
Scop::ScopStatistics ScopStats = S.getStatistics();
diff --git a/polly/lib/Transform/ManualOptimizer.cpp b/polly/lib/Transform/ManualOptimizer.cpp
index 264491b7577b..0e330f207fbc 100644
--- a/polly/lib/Transform/ManualOptimizer.cpp
+++ b/polly/lib/Transform/ManualOptimizer.cpp
@@ -22,6 +22,7 @@
#include "llvm/Transforms/Utils/LoopUtils.h"
#include <optional>
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-opt-manual"
using namespace polly;
@@ -159,13 +160,13 @@ private:
return Result;
LLVMContext &Ctx = LoopMD->getContext();
- LLVM_DEBUG(dbgs() << "Dependency violation detected\n");
+ POLLY_DEBUG(dbgs() << "Dependency violation detected\n");
DebugLoc TransformLoc = findTransformationDebugLoc(LoopMD, DebugLocAttr);
if (IgnoreDepcheck) {
- LLVM_DEBUG(dbgs() << "Still accepting transformation due to "
- "-polly-pragma-ignore-depcheck\n");
+ POLLY_DEBUG(dbgs() << "Still accepting transformation due to "
+ "-polly-pragma-ignore-depcheck\n");
if (ORE) {
ORE->emit(
OptimizationRemark(DEBUG_TYPE, RemarkName, TransformLoc, CodeRegion)
@@ -177,7 +178,7 @@ private:
return Result;
}
- LLVM_DEBUG(dbgs() << "Rolling back transformation\n");
+ POLLY_DEBUG(dbgs() << "Rolling back transformation\n");
if (ORE) {
ORE->emit(DiagnosticInfoOptimizationFailure(DEBUG_TYPE, RemarkName,
diff --git a/polly/lib/Transform/MatmulOptimizer.cpp b/polly/lib/Transform/MatmulOptimizer.cpp
index 05578bd9ed11..51ae5a778e4f 100644
--- a/polly/lib/Transform/MatmulOptimizer.cpp
+++ b/polly/lib/Transform/MatmulOptimizer.cpp
@@ -42,6 +42,7 @@
#include <string>
#include <vector>
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-opt-isl"
using namespace llvm;
@@ -1825,10 +1826,10 @@ polly::tryOptimizeMatMulPattern(isl::schedule_node Node,
const Dependences *D) {
TCInfoTy TCI;
if (PMBasedTCOpts && isTCPattern(Node, D, TCI))
- LLVM_DEBUG(dbgs() << "The tensor contraction pattern was detected\n");
+ POLLY_DEBUG(dbgs() << "The tensor contraction pattern was detected\n");
MatMulInfoTy MMI;
if (PMBasedMMMOpts && isMatrMultPattern(Node, D, MMI)) {
- LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
+ POLLY_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
return optimizeMatMulPattern(Node, TTI, MMI);
}
return {};
diff --git a/polly/lib/Transform/ScheduleOptimizer.cpp b/polly/lib/Transform/ScheduleOptimizer.cpp
index 5a0ea3b40675..55d51982d90e 100644
--- a/polly/lib/Transform/ScheduleOptimizer.cpp
+++ b/polly/lib/Transform/ScheduleOptimizer.cpp
@@ -69,6 +69,7 @@ class Loop;
class Module;
} // namespace llvm
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-opt-isl"
static cl::opt<std::string>
@@ -730,14 +731,14 @@ static void runIslScheduleOptimizer(
// Schedule without optimizations.
isl::schedule Schedule = S.getScheduleTree();
walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
- LLVM_DEBUG(printSchedule(dbgs(), Schedule, "Original schedule tree"));
+ POLLY_DEBUG(printSchedule(dbgs(), Schedule, "Original schedule tree"));
bool HasUserTransformation = false;
if (PragmaBasedOpts) {
isl::schedule ManuallyTransformed = applyManualTransformations(
&S, Schedule, GetDeps(Dependences::AL_Statement), ORE);
if (ManuallyTransformed.is_null()) {
- LLVM_DEBUG(dbgs() << "Error during manual optimization\n");
+ POLLY_DEBUG(dbgs() << "Error during manual optimization\n");
return;
}
@@ -745,7 +746,7 @@ static void runIslScheduleOptimizer(
// User transformations have precedence over other transformations.
HasUserTransformation = true;
Schedule = std::move(ManuallyTransformed);
- LLVM_DEBUG(
+ POLLY_DEBUG(
printSchedule(dbgs(), Schedule, "After manual transformations"));
}
}
@@ -755,18 +756,18 @@ static void runIslScheduleOptimizer(
// TODO: Detect disabled heuristics and no user-directed transformation
// metadata earlier in ScopDetection.
if (!HasUserTransformation && S.hasDisableHeuristicsHint()) {
- LLVM_DEBUG(dbgs() << "Heuristic optimizations disabled by metadata\n");
+ POLLY_DEBUG(dbgs() << "Heuristic optimizations disabled by metadata\n");
return;
}
// Get dependency analysis.
const Dependences &D = GetDeps(Dependences::AL_Statement);
if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
- LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
+ POLLY_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
return;
}
if (!D.hasValidDependences()) {
- LLVM_DEBUG(dbgs() << "Dependency information not available\n");
+ POLLY_DEBUG(dbgs() << "Dependency information not available\n");
return;
}
@@ -776,9 +777,9 @@ static void runIslScheduleOptimizer(
// are added by the rescheduling analyzer. Therefore, disabling the
// rescheduler implicitly also disables these optimizations.
if (!EnableReschedule) {
- LLVM_DEBUG(dbgs() << "Skipping rescheduling due to command line option\n");
+ POLLY_DEBUG(dbgs() << "Skipping rescheduling due to command line option\n");
} else if (HasUserTransformation) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Skipping rescheduling due to manual transformation\n");
} else {
// Build input data.
@@ -824,10 +825,10 @@ static void runIslScheduleOptimizer(
"or 'no'. Falling back to default: 'yes'\n";
}
- LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: ");
- LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
- LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
- LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
+ POLLY_DEBUG(dbgs() << "\n\nCompute schedule from: ");
+ POLLY_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
+ POLLY_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
+ POLLY_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
int IslMaximizeBands;
if (MaximizeBandDepth == "yes") {
@@ -873,14 +874,14 @@ static void runIslScheduleOptimizer(
Schedule = SC.compute_schedule();
if (MaxOpGuard.hasQuotaExceeded())
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Schedule optimizer calculation exceeds ISL quota\n");
}
isl_options_set_on_error(Ctx, OnErrorStatus);
ScopsRescheduled++;
- LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After rescheduling"));
+ POLLY_DEBUG(printSchedule(dbgs(), Schedule, "After rescheduling"));
}
walkScheduleTreeForStatistics(Schedule, 1);
@@ -908,7 +909,7 @@ static void runIslScheduleOptimizer(
if (OAI.PatternOpts || OAI.Postopts || OAI.Prevect) {
Schedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
Schedule = hoistExtensionNodes(Schedule);
- LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After post-optimizations"));
+ POLLY_DEBUG(printSchedule(dbgs(), Schedule, "After post-optimizations"));
walkScheduleTreeForStatistics(Schedule, 2);
}
diff --git a/polly/lib/Transform/ScheduleTreeTransform.cpp b/polly/lib/Transform/ScheduleTreeTransform.cpp
index e42b3d1c2460..f0684de825d2 100644
--- a/polly/lib/Transform/ScheduleTreeTransform.cpp
+++ b/polly/lib/Transform/ScheduleTreeTransform.cpp
@@ -21,6 +21,7 @@
#include "llvm/IR/Metadata.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-opt-isl"
using namespace polly;
@@ -599,7 +600,7 @@ public:
if (Nest.size() <= 1)
return getBase().visitBand(Band);
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Found loops to collapse between\n";
dumpIslObj(RootBand, dbgs());
dbgs() << "and\n";
@@ -644,7 +645,7 @@ public:
};
static isl::schedule collapseBands(isl::schedule Sched) {
- LLVM_DEBUG(dbgs() << "Collapse bands in schedule\n");
+ POLLY_DEBUG(dbgs() << "Collapse bands in schedule\n");
BandCollapseRewriter Rewriter;
return Rewriter.visit(Sched);
}
@@ -773,7 +774,7 @@ static isl::schedule tryGreedyFuse(isl::schedule_node_band LHS,
if (!canFuseOutermost(LHS, RHS, Deps))
return {};
- LLVM_DEBUG({
+ POLLY_DEBUG({
dbgs() << "Found loops for greedy fusion:\n";
dumpIslObj(LHS, dbgs());
dbgs() << "and\n";
@@ -1228,12 +1229,12 @@ isl::schedule polly::applyMaxFission(isl::schedule_node BandToFission) {
isl::schedule polly::applyGreedyFusion(isl::schedule Sched,
const isl::union_map &Deps) {
- LLVM_DEBUG(dbgs() << "Greedy loop fusion\n");
+ POLLY_DEBUG(dbgs() << "Greedy loop fusion\n");
GreedyFusionRewriter Rewriter;
isl::schedule Result = Rewriter.visit(Sched, Deps);
if (!Rewriter.AnyChange) {
- LLVM_DEBUG(dbgs() << "Found nothing to fuse\n");
+ POLLY_DEBUG(dbgs() << "Found nothing to fuse\n");
return Sched;
}
diff --git a/polly/lib/Transform/ScopInliner.cpp b/polly/lib/Transform/ScopInliner.cpp
index ca61407d2587..b78206c1e40b 100644
--- a/polly/lib/Transform/ScopInliner.cpp
+++ b/polly/lib/Transform/ScopInliner.cpp
@@ -21,6 +21,7 @@
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-scop-inliner"
using namespace llvm;
@@ -60,8 +61,8 @@ public:
if (!F)
return false;
if (F->isDeclaration()) {
- LLVM_DEBUG(dbgs() << "Skipping " << F->getName()
- << "because it is a declaration.\n");
+ POLLY_DEBUG(dbgs() << "Skipping " << F->getName()
+ << "because it is a declaration.\n");
return false;
}
@@ -86,8 +87,8 @@ public:
bool Changed = false;
if (HasScopAsTopLevelRegion) {
- LLVM_DEBUG(dbgs() << "Skipping " << F->getName()
- << " has scop as top level region");
+ POLLY_DEBUG(dbgs() << "Skipping " << F->getName()
+ << " has scop as top level region");
F->addFnAttr(llvm::Attribute::AlwaysInline);
ModulePassManager MPM;
@@ -98,8 +99,8 @@ public:
if (!PA.areAllPreserved())
Changed = true;
} else {
- LLVM_DEBUG(dbgs() << F->getName()
- << " does NOT have scop as top level region\n");
+ POLLY_DEBUG(dbgs() << F->getName()
+ << " does NOT have scop as top level region\n");
}
return Changed;
diff --git a/polly/lib/Transform/Simplify.cpp b/polly/lib/Transform/Simplify.cpp
index 41a155a41de8..75e91cd1c031 100644
--- a/polly/lib/Transform/Simplify.cpp
+++ b/polly/lib/Transform/Simplify.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/Debug.h"
#include <optional>
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-simplify"
using namespace llvm;
@@ -237,8 +238,8 @@ void SimplifyImpl::removeEmptyDomainStmts() {
assert(NumStmtsBefore >= S->getSize());
EmptyDomainsRemoved = NumStmtsBefore - S->getSize();
- LLVM_DEBUG(dbgs() << "Removed " << EmptyDomainsRemoved << " (of "
- << NumStmtsBefore << ") statements with empty domains \n");
+ POLLY_DEBUG(dbgs() << "Removed " << EmptyDomainsRemoved << " (of "
+ << NumStmtsBefore << ") statements with empty domains \n");
TotalEmptyDomainsRemoved[CallNo] += EmptyDomainsRemoved;
}
@@ -280,8 +281,8 @@ void SimplifyImpl::removeOverwrites() {
// If all of a write's elements are overwritten, remove it.
isl::union_map AccRelUnion = AccRel;
if (AccRelUnion.is_subset(WillBeOverwritten)) {
- LLVM_DEBUG(dbgs() << "Removing " << MA
- << " which will be overwritten anyway\n");
+ POLLY_DEBUG(dbgs() << "Removing " << MA
+ << " which will be overwritten anyway\n");
Stmt.removeSingleMemoryAccess(MA);
OverwritesRemoved++;
@@ -532,9 +533,9 @@ void SimplifyImpl::removeRedundantWrites() {
isl::map AccRelStoredVal = isl::map::from_domain_and_range(
AccRelWrapped, makeValueSet(StoredVal));
if (isl::union_map(AccRelStoredVal).is_subset(Known)) {
- LLVM_DEBUG(dbgs() << "Cleanup of " << MA << ":\n");
- LLVM_DEBUG(dbgs() << " Scalar: " << *StoredVal << "\n");
- LLVM_DEBUG(dbgs() << " AccRel: " << AccRel << "\n");
+ POLLY_DEBUG(dbgs() << "Cleanup of " << MA << ":\n");
+ POLLY_DEBUG(dbgs() << " Scalar: " << *StoredVal << "\n");
+ POLLY_DEBUG(dbgs() << " AccRel: " << AccRel << "\n");
Stmt.removeSingleMemoryAccess(MA);
@@ -576,8 +577,8 @@ void SimplifyImpl::removeUnnecessaryStmts() {
S->simplifySCoP(true);
assert(NumStmtsBefore >= S->getSize());
StmtsRemoved = NumStmtsBefore - S->getSize();
- LLVM_DEBUG(dbgs() << "Removed " << StmtsRemoved << " (of " << NumStmtsBefore
- << ") statements\n");
+ POLLY_DEBUG(dbgs() << "Removed " << StmtsRemoved << " (of " << NumStmtsBefore
+ << ") statements\n");
TotalStmtsRemoved[CallNo] += StmtsRemoved;
}
@@ -595,7 +596,7 @@ void SimplifyImpl::removeEmptyPartialAccesses() {
if (!AccRel.is_empty().is_true())
continue;
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Removing " << MA
<< " because it's a partial access that never occurs\n");
DeferredRemove.push_back(MA);
@@ -628,8 +629,8 @@ void SimplifyImpl::markAndSweep(LoopInfo *LI) {
for (MemoryAccess *MA : AllMAs) {
if (UsedMA.count(MA))
continue;
- LLVM_DEBUG(dbgs() << "Removing " << MA
- << " because its value is not used\n");
+ POLLY_DEBUG(dbgs() << "Removing " << MA
+ << " because its value is not used\n");
ScopStmt *Stmt = MA->getStatement();
Stmt->removeSingleMemoryAccess(MA);
@@ -650,8 +651,8 @@ void SimplifyImpl::markAndSweep(LoopInfo *LI) {
for (Instruction *Inst : AllInsts) {
auto It = UsedInsts.find({&Stmt, Inst});
if (It == UsedInsts.end()) {
- LLVM_DEBUG(dbgs() << "Removing "; Inst->print(dbgs());
- dbgs() << " because it is not used\n");
+ POLLY_DEBUG(dbgs() << "Removing "; Inst->print(dbgs());
+ dbgs() << " because it is not used\n");
DeadInstructionsRemoved++;
TotalDeadInstructionsRemoved[CallNo]++;
continue;
@@ -708,31 +709,31 @@ void SimplifyImpl::run(Scop &S, LoopInfo *LI) {
this->S = &S;
ScopsProcessed[CallNo]++;
- LLVM_DEBUG(dbgs() << "Removing statements that are never executed...\n");
+ POLLY_DEBUG(dbgs() << "Removing statements that are never executed...\n");
removeEmptyDomainStmts();
- LLVM_DEBUG(dbgs() << "Removing partial writes that never happen...\n");
+ POLLY_DEBUG(dbgs() << "Removing partial writes that never happen...\n");
removeEmptyPartialAccesses();
- LLVM_DEBUG(dbgs() << "Removing overwrites...\n");
+ POLLY_DEBUG(dbgs() << "Removing overwrites...\n");
removeOverwrites();
- LLVM_DEBUG(dbgs() << "Coalesce partial writes...\n");
+ POLLY_DEBUG(dbgs() << "Coalesce partial writes...\n");
coalesceWrites();
- LLVM_DEBUG(dbgs() << "Removing redundant writes...\n");
+ POLLY_DEBUG(dbgs() << "Removing redundant writes...\n");
removeRedundantWrites();
- LLVM_DEBUG(dbgs() << "Cleanup unused accesses...\n");
+ POLLY_DEBUG(dbgs() << "Cleanup unused accesses...\n");
markAndSweep(LI);
- LLVM_DEBUG(dbgs() << "Removing statements without side effects...\n");
+ POLLY_DEBUG(dbgs() << "Removing statements without side effects...\n");
removeUnnecessaryStmts();
if (isModified())
ScopsModified[CallNo]++;
- LLVM_DEBUG(dbgs() << "\nFinal Scop:\n");
- LLVM_DEBUG(dbgs() << S);
+ POLLY_DEBUG(dbgs() << "\nFinal Scop:\n");
+ POLLY_DEBUG(dbgs() << S);
auto ScopStats = S.getStatistics();
NumValueWrites[CallNo] += ScopStats.NumValueWrites;
diff --git a/polly/lib/Transform/ZoneAlgo.cpp b/polly/lib/Transform/ZoneAlgo.cpp
index 4c86891d2cf7..a114a241d87a 100644
--- a/polly/lib/Transform/ZoneAlgo.cpp
+++ b/polly/lib/Transform/ZoneAlgo.cpp
@@ -156,6 +156,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
+#include "polly/Support/PollyDebug.h"
#define DEBUG_TYPE "polly-zone"
STATISTIC(NumIncompatibleArrays, "Number of not zone-analyzable arrays");
@@ -342,7 +343,7 @@ void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt,
if (MA->isRead()) {
// Reject load after store to same location.
if (!Stores.is_disjoint(AccRel)) {
- LLVM_DEBUG(
+ POLLY_DEBUG(
dbgs() << "Load after store of same element in same statement\n");
OptimizationRemarkMissed R(PassName, "LoadAfterStore",
MA->getAccessInstruction());
@@ -362,7 +363,7 @@ void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt,
// In region statements the order is less clear, eg. the load and store
// might be in a boxed loop.
if (Stmt->isRegionStmt() && !Loads.is_disjoint(AccRel)) {
- LLVM_DEBUG(dbgs() << "WRITE in non-affine subregion not supported\n");
+ POLLY_DEBUG(dbgs() << "WRITE in non-affine subregion not supported\n");
OptimizationRemarkMissed R(PassName, "StoreInSubregion",
MA->getAccessInstruction());
R << "store is in a non-affine subregion";
@@ -373,7 +374,7 @@ void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt,
// Do not allow more than one store to the same location.
if (!Stores.is_disjoint(AccRel) && !onlySameValueWrites(Stmt)) {
- LLVM_DEBUG(dbgs() << "WRITE after WRITE to same element\n");
+ POLLY_DEBUG(dbgs() << "WRITE after WRITE to same element\n");
OptimizationRemarkMissed R(PassName, "StoreAfterStore",
MA->getAccessInstruction());
R << "store after store of same element in same statement";
diff --git a/polly/test/Support/pollyDebug.ll b/polly/test/Support/pollyDebug.ll
new file mode 100644
index 000000000000..ada079023b6c
--- /dev/null
+++ b/polly/test/Support/pollyDebug.ll
@@ -0,0 +1,85 @@
+; Test if "polly-debug" flag enables debug prints from different parts of polly
+; RUN: opt %loadNPMPolly -O3 -polly -polly-debug --disable-output < %s 2>&1 | FileCheck %s
+;
+; REQUIRES: asserts
+
+; void callee(int n, double A[], int i) {
+; for (int j = 0; j < n; j += 1)
+; A[i+j] = 42.0;
+; }
+;
+; void caller(int n, double A[]) {
+; for (int i = 0; i < n; i += 1)
+; callee(n, A, i);
+; }
+
+
+%unrelated_type = type { i32 }
+
+define internal void @callee(i32 %n, ptr noalias nonnull %A, i32 %i) #0 {
+entry:
+ br label %for
+
+for:
+ %j = phi i32 [0, %entry], [%j.inc, %inc]
+ %j.cmp = icmp slt i32 %j, %n
+ br i1 %j.cmp, label %body, label %exit
+
+ body:
+ %idx = add i32 %i, %j
+ %arrayidx = getelementptr inbounds double, ptr %A, i32 %idx
+ store double 42.0, ptr %arrayidx
+ br label %inc
+
+inc:
+ %j.inc = add nuw nsw i32 %j, 1
+ br label %for
+
+exit:
+ br label %return
+
+return:
+ ret void
+}
+
+
+define void @caller(i32 %n, ptr noalias nonnull %A) #0 {
+entry:
+ br label %for
+
+for:
+ %i = phi i32 [0, %entry], [%j.inc, %inc]
+ %i.cmp = icmp slt i32 %i, %n
+ br i1 %i.cmp, label %body, label %exit
+
+ body:
+ call void @callee(i32 %n, ptr %A, i32 %i)
+ br label %inc
+
+inc:
+ %j.inc = add nuw nsw i32 %i, 1
+ br label %for
+
+exit:
+ br label %return
+
+return:
+ ret void
+}
+
+
+declare void @unrelated_decl()
+
+
+attributes #0 = { noinline }
+
+!llvm.ident = !{!8}
+!8 = !{!"xyxxy"}
+
+; CHECK: Checking region: entry => <Function Return>
+; CHECK: Removing statements that are never executed...
+; CHECK: Final Scop:
+; CHECK: Forwarding operand trees...
+; CHECK: Final Scop:
+; CHECK: Collapsing scalars to unused array elements...
+; CHECK: Final Scop:
diff --git a/utils/bazel/.bazelrc b/utils/bazel/.bazelrc
index c06e9b341626..1d7cf4a4df1b 100644
--- a/utils/bazel/.bazelrc
+++ b/utils/bazel/.bazelrc
@@ -30,6 +30,10 @@ build --features=layering_check
# See: https://bazel.build/reference/be/functions#exports_files
build --incompatible_no_implicit_file_export
+# Enable so downstream users can flip this flag globally, this should
+# eventually become the default
+common --incompatible_disallow_empty_glob
+
###############################################################################
# Options to select different strategies for linking potential dependent
# libraries. The default leaves it disabled.
diff --git a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
index 043a3b61a75f..1c12c8167ba4 100644
--- a/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/bolt/BUILD.bazel
@@ -221,8 +221,6 @@ cc_library(
srcs = glob([
"lib/Target/AArch64/*.cpp",
]),
- hdrs = glob([
- ]),
includes = ["include"],
deps = [
":Core",
diff --git a/utils/bazel/llvm-project-overlay/clang-tools-extra/clang-tidy/defs.bzl b/utils/bazel/llvm-project-overlay/clang-tools-extra/clang-tidy/defs.bzl
index 41c03aad871c..387d104a773a 100644
--- a/utils/bazel/llvm-project-overlay/clang-tools-extra/clang-tidy/defs.bzl
+++ b/utils/bazel/llvm-project-overlay/clang-tools-extra/clang-tidy/defs.bzl
@@ -16,8 +16,8 @@ _common_library_deps = [
]
def clang_tidy_library(name, **kwargs):
- kwargs["srcs"] = kwargs.get("srcs", native.glob([paths.join(name, "*.cpp")]))
- kwargs["hdrs"] = kwargs.get("hdrs", native.glob([paths.join(name, "*.h")]))
+ kwargs["srcs"] = kwargs.get("srcs", native.glob([paths.join(name, "*.cpp")], allow_empty = True))
+ kwargs["hdrs"] = kwargs.get("hdrs", native.glob([paths.join(name, "*.h")], allow_empty = True))
kwargs["deps"] = kwargs.get("deps", []) + _common_library_deps
cc_library(
name = name,
diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
index 865cafbf50c6..1bf6bee10952 100644
--- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
@@ -614,7 +614,6 @@ cc_library(
"include/clang/Basic/Version.inc",
] + glob([
"lib/Basic/*.cpp",
- "lib/Basic/*.c",
"lib/Basic/*.h",
"lib/Basic/Targets/*.cpp",
"lib/Basic/Targets/*.h",
@@ -1042,7 +1041,6 @@ cc_library(
"lib/Analysis/FlowSensitive/Models/*.cpp",
"lib/Analysis/FlowSensitive/*.cpp",
"lib/Analysis/*.cpp",
- "lib/Analysis/*.h",
]) + [
":analysis_htmllogger_gen",
],
@@ -1180,10 +1178,8 @@ gentbl(
cc_library(
name = "parse",
- srcs = [
- ] + glob([
+ srcs = glob([
"lib/Parse/*.cpp",
- "lib/Parse/*.h",
]),
hdrs = [
"include/clang/Parse/AttrParserStringSwitches.inc",
@@ -1207,7 +1203,6 @@ cc_library(
name = "ast_matchers",
srcs = glob([
"lib/ASTMatchers/*.cpp",
- "lib/ASTMatchers/*.h",
]),
hdrs = glob(["include/clang/ASTMatchers/*.h"]),
includes = ["include"],
@@ -1241,7 +1236,6 @@ cc_library(
name = "rewrite",
srcs = glob([
"lib/Rewrite/*.cpp",
- "lib/Rewrite/*.h",
]),
hdrs = glob(["include/clang/Rewrite/Core/*.h"]),
includes = ["include"],
@@ -1275,7 +1269,6 @@ cc_library(
name = "tooling_core",
srcs = glob([
"lib/Tooling/Core/*.cpp",
- "lib/Tooling/Core/*.h",
]),
hdrs = glob(["include/clang/Tooling/Core/*.h"]),
includes = ["include"],
@@ -1340,11 +1333,9 @@ cc_library(
name = "tooling_refactoring",
srcs = glob([
"lib/Tooling/Refactoring/**/*.cpp",
- "lib/Tooling/Refactoring/**/*.h",
]),
hdrs = glob([
"include/clang/Tooling/Refactoring/**/*.h",
- "include/clang/Tooling/Refactoring/**/*.def",
]),
deps = [
":ast",
@@ -1593,9 +1584,6 @@ cc_library(
srcs = glob(
[
"lib/Driver/*.cpp",
- "lib/Driver/*.h",
- "lib/Driver/Arch/*.cpp",
- "lib/Driver/Arch/*.h",
"lib/Driver/ToolChains/*.cpp",
"lib/Driver/ToolChains/*.h",
"lib/Driver/ToolChains/Arch/*.cpp",
@@ -1833,9 +1821,6 @@ cc_library(
copts = ["$(STACK_FRAME_UNLIMITED)"],
data = [":builtin_headers_gen"],
includes = ["include"],
- textual_hdrs = glob([
- "include/clang/Frontend/*.def",
- ]),
deps = [
":apinotes",
":ast",
@@ -1872,7 +1857,6 @@ cc_library(
name = "frontend_rewrite",
srcs = glob([
"lib/Frontend/Rewrite/*.cpp",
- "lib/Frontend/Rewrite/*.h",
]),
hdrs = glob(["include/clang/Rewrite/Frontend/*.h"]),
includes = ["include"],
@@ -2072,6 +2056,7 @@ cc_library(
"//llvm:Demangle",
"//llvm:Support",
"//llvm:TextAPI",
+ "//llvm:TextAPIBinaryReader",
],
)
@@ -2116,7 +2101,6 @@ cc_library(
name = "frontend_tool",
srcs = glob([
"lib/FrontendTool/*.cpp",
- "lib/FrontendTool/*.h",
]),
hdrs = glob(["include/clang/FrontendTool/*.h"]),
includes = ["include"],
@@ -2320,7 +2304,6 @@ cc_binary(
testonly = 1,
srcs = glob([
"tools/clang-import-test/*.cpp",
- "tools/clang-import-test/*.h",
]),
stamp = 0,
deps = [
@@ -2350,7 +2333,6 @@ cc_library(
name = "clang-driver",
srcs = glob([
"tools/driver/*.cpp",
- "tools/driver/*.h",
]) + ["clang-driver.cpp"],
copts = [
# Disable stack frame size checks in the driver because
@@ -2668,7 +2650,6 @@ cc_library(
name = "extract_api",
srcs = glob([
"lib/ExtractAPI/**/*.cpp",
- "lib/ExtractAPI/**/*.h",
]),
hdrs = glob(["include/clang/ExtractAPI/**/*.h"]),
includes = ["include"],
diff --git a/utils/bazel/llvm-project-overlay/compiler-rt/BUILD.bazel b/utils/bazel/llvm-project-overlay/compiler-rt/BUILD.bazel
index 9bdd454e1e36..577e6c033b4e 100644
--- a/utils/bazel/llvm-project-overlay/compiler-rt/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/compiler-rt/BUILD.bazel
@@ -19,6 +19,10 @@ cc_library(
],
# Will raise error unless supported platforms.
}),
+ target_compatible_with = select({
+ "@platforms//os:linux": [],
+ "//conditions:default": ["@platforms//:incompatible"],
+ }),
)
WIN32_ONLY_FILES = [
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 40cfb1f470db..9169f330baac 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -68,6 +68,7 @@ libc_support_library(
name = "llvm_libc_macros_math_macros",
hdrs = ["include/llvm-libc-macros/math-macros.h"],
deps = [":llvm_libc_macros_limits_macros"],
+ defines = ["__FP_LOGBNAN_MIN"],
)
libc_support_library(
@@ -97,6 +98,11 @@ libc_support_library(
deps = [":llvm_libc_macros_float_macros"],
)
+libc_support_library(
+ name = "llvm_libc_macros_fcntl_macros",
+ hdrs = ["include/llvm-libc-macros/linux/fcntl-macros.h"],
+)
+
############################### Support libraries ##############################
libc_support_library(
@@ -211,7 +217,6 @@ libc_support_library(
":__support_cpp_limits",
":__support_cpp_type_traits",
":__support_macros_attributes",
- ":__support_macros_config",
":__support_macros_sanitizer",
],
)
@@ -339,6 +344,7 @@ libc_support_library(
"src/__support/CPP/type_traits/is_base_of.h",
"src/__support/CPP/type_traits/is_class.h",
"src/__support/CPP/type_traits/is_const.h",
+ "src/__support/CPP/type_traits/is_constant_evaluated.h",
"src/__support/CPP/type_traits/is_convertible.h",
"src/__support/CPP/type_traits/is_destructible.h",
"src/__support/CPP/type_traits/is_enum.h",
@@ -377,7 +383,6 @@ libc_support_library(
],
deps = [
":__support_macros_attributes",
- ":__support_macros_config",
":__support_macros_properties_types",
":llvm_libc_macros_stdfix_macros",
],
@@ -439,6 +444,7 @@ libc_support_library(
hdrs = ["src/__support/fixedvector.h"],
deps = [
":__support_cpp_array",
+ ":__support_cpp_iterator",
],
)
@@ -618,7 +624,10 @@ libc_support_library(
deps = [
":__support_common",
":__support_cpp_type_traits",
+ ":__support_fputil_fenv_impl",
":__support_fputil_fp_bits",
+ ":__support_macros_optimization",
+ ":__support_uint128",
],
)
@@ -654,7 +663,6 @@ libc_support_library(
":__support_cpp_limits",
":__support_cpp_type_traits",
":__support_macros_attributes",
- ":__support_macros_config",
],
)
@@ -743,12 +751,12 @@ libc_support_library(
deps = [
":__support_common",
":__support_cpp_bit",
- ":__support_sign",
":__support_cpp_type_traits",
":__support_libc_assert",
":__support_macros_attributes",
":__support_macros_properties_types",
":__support_math_extras",
+ ":__support_sign",
":__support_uint128",
],
)
@@ -993,10 +1001,7 @@ libc_support_library(
libc_support_library(
name = "__support_osutil_quick_exit",
hdrs = ["src/__support/OSUtil/quick_exit.h"],
- textual_hdrs = [
- "src/__support/OSUtil/linux/quick_exit.h",
- #TODO: add support for GPU quick_exit (isn't just in a header.)
- ],
+ srcs = ["src/__support/OSUtil/linux/quick_exit.cpp"],
deps = [
":__support_osutil_syscall",
],
@@ -2312,7 +2317,6 @@ libc_support_library(
":__support_cpp_cstddef",
":__support_cpp_type_traits",
":__support_macros_attributes",
- ":__support_macros_config",
":__support_macros_optimization",
":__support_macros_properties_architectures",
":__support_macros_properties_cpu_features",
@@ -3262,6 +3266,18 @@ libc_function(
],
)
+libc_function(
+ name = "rename",
+ srcs = ["src/stdio/linux/rename.cpp"],
+ hdrs = ["src/stdio/rename.h"],
+ deps = [
+ ":__support_common",
+ ":__support_osutil_syscall",
+ ":errno",
+ ":llvm_libc_macros_fcntl_macros",
+ ],
+)
+
############################### sys/stat targets ###############################
libc_function(
diff --git a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
index 7d815bc4a229..7dc12bade260 100644
--- a/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
+++ b/utils/bazel/llvm-project-overlay/libc/libc_build_rules.bzl
@@ -78,7 +78,6 @@ def libc_function(
its deps.
**kwargs: Other attributes relevant for a cc_library. For example, deps.
"""
-
# We use the explicit equals pattern here because append and += mutate the
# original list, where this creates a new list and stores it in deps.
copts = copts or []
@@ -87,7 +86,15 @@ def libc_function(
"-fno-builtin",
"-fno-lax-vector-conversions",
"-ftrivial-auto-var-init=pattern",
+ "-fno-omit-frame-pointer",
+ "-fstack-protector-strong",
]
+ # x86 targets have -mno-omit-leaf-frame-pointer.
+ platform_copts = selects.with_or({
+ PLATFORM_CPU_X86_64: ["-mno-omit-leaf-frame-pointer"],
+ "//conditions:default": []
+ })
+ copts = copts + platform_copts
# We compile the code twice, the first target is suffixed with ".__internal__" and contains the
# C++ functions in the "LIBC_NAMESPACE" namespace. This allows us to test the function in the
diff --git a/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
index 2a0c071f2286..d2087a3d528f 100644
--- a/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/test/UnitTest/BUILD.bazel
@@ -76,6 +76,7 @@ libc_support_library(
deps = [
":LibcUnitTest",
":string_utils",
+ "//libc:__support_cpp_array",
"//libc:__support_cpp_bit",
"//libc:__support_cpp_bitset",
"//libc:__support_cpp_span",
diff --git a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
index 803010e8b3ad..5f59d70ecc16 100644
--- a/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/utils/MPFRWrapper/BUILD.bazel
@@ -46,6 +46,7 @@ libc_support_library(
"//libc:__support_cpp_type_traits",
"//libc:__support_fputil_fp_bits",
"//libc:__support_fputil_fpbits_str",
+ "//libc:llvm_libc_macros_math_macros",
"//libc/test/UnitTest:LibcUnitTest",
"//libc/test/UnitTest:fp_test_helpers",
"//libc/utils/MPFRWrapper:mpfr_impl",
diff --git a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
index 8fb71fc1f971..5a494a13acea 100644
--- a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
@@ -187,7 +187,6 @@ cc_library(
name = "MinGW",
srcs = glob([
"MinGW/*.cpp",
- "MinGW/*.h",
]),
includes = ["MinGW"],
deps = [
@@ -296,7 +295,6 @@ cc_binary(
name = "lld",
srcs = glob([
"tools/lld/*.cpp",
- "tools/lld/*.h",
]) + ["lld-driver.cpp"],
deps = [
":COFF",
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index 4802daa66286..9cfcb7d3838e 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -4,8 +4,10 @@
load("@bazel_skylib//rules:common_settings.bzl", "string_flag")
load("@bazel_skylib//rules:expand_template.bzl", "expand_template")
+load("//mlir:tblgen.bzl", "td_library")
load(":binary_alias.bzl", "binary_alias")
load(":config.bzl", "llvm_config_defines")
+load(":driver.bzl", "generate_driver_selects", "generate_driver_tools_def", "llvm_driver_cc_binary", "select_driver_tools")
load(":enum_targets_gen.bzl", "enum_targets_gen")
load(":targets.bzl", "llvm_targets")
load(":tblgen.bzl", "gentbl")
@@ -66,7 +68,10 @@ enum_targets_gen(
llvm_target_asm_parsers = [
t
for t in llvm_targets
- if glob(["lib/Target/{}/AsmParser/CMakeLists.txt".format(t)])
+ if glob(
+ ["lib/Target/{}/AsmParser/CMakeLists.txt".format(t)],
+ allow_empty = True,
+ )
]
enum_targets_gen(
@@ -81,7 +86,10 @@ enum_targets_gen(
llvm_target_disassemblers = [
t
for t in llvm_targets
- if glob(["lib/Target/{}/Disassembler/CMakeLists.txt".format(t)])
+ if glob(
+ ["lib/Target/{}/Disassembler/CMakeLists.txt".format(t)],
+ allow_empty = True,
+ )
]
enum_targets_gen(
@@ -96,7 +104,10 @@ enum_targets_gen(
llvm_target_mcas = [
t
for t in llvm_targets
- if glob(["lib/Target/{}/MCA/CMakeLists.txt".format(t)])
+ if glob(
+ ["lib/Target/{}/MCA/CMakeLists.txt".format(t)],
+ allow_empty = True,
+ )
]
enum_targets_gen(
@@ -111,7 +122,10 @@ enum_targets_gen(
llvm_target_exegesis = [
t
for t in llvm_targets
- if glob(["tools/llvm-exegesis/lib/{}/CMakeLists.txt".format(t)])
+ if glob(
+ ["tools/llvm-exegesis/lib/{}/CMakeLists.txt".format(t)],
+ allow_empty = True,
+ )
]
enum_targets_gen(
@@ -143,6 +157,12 @@ exports_files([
"include/llvm/Config/abi-breaking.h.cmake",
])
+td_library(
+ name = "OptParserTdFiles",
+ srcs = ["include/llvm/Option/OptParser.td"],
+ includes = ["include"],
+)
+
cc_library(
name = "config",
hdrs = [
@@ -168,7 +188,6 @@ cc_library(
name = "Demangle",
srcs = glob([
"lib/Demangle/*.cpp",
- "lib/Demangle/*.h",
]),
hdrs = glob([
"include/llvm/Demangle/*.h",
@@ -203,7 +222,6 @@ cc_library(
"include/llvm/Option/*.h",
]) + select({
"@platforms//os:windows": glob([
- "lib/Support/Windows/*.h",
"lib/Support/Windows/*.inc",
]),
"//conditions:default": glob([
@@ -275,6 +293,10 @@ cc_library(
"-ldl",
"-lm",
],
+ "@platforms//os:macos": [
+ "-pthread",
+ "-ldl",
+ ],
"//conditions:default": [
"-pthread",
"-ldl",
@@ -315,7 +337,6 @@ cc_library(
name = "LineEditor",
srcs = glob([
"lib/LineEditor/*.cpp",
- "lib/LineEditor/*.h",
]),
hdrs = glob(["include/llvm/LineEditor/*.h"]),
copts = llvm_copts,
@@ -329,7 +350,6 @@ cc_library(
name = "Option",
srcs = glob([
"lib/Option/*.cpp",
- "lib/Option/*.h",
]),
hdrs = glob(["include/llvm/Option/*.h"]),
copts = llvm_copts,
@@ -376,8 +396,6 @@ cc_library(
name = "BinaryFormat",
srcs = glob([
"lib/BinaryFormat/*.cpp",
- "lib/BinaryFormat/*.def",
- "lib/BinaryFormat/*.h",
]),
hdrs = glob([
"include/llvm/BinaryFormat/*.h",
@@ -409,7 +427,6 @@ cc_library(
name = "DebugInfoMSF",
srcs = glob([
"lib/DebugInfo/MSF/*.cpp",
- "lib/DebugInfo/MSF/*.h",
]),
hdrs = glob(["include/llvm/DebugInfo/MSF/*.h"]),
copts = llvm_copts,
@@ -420,7 +437,6 @@ cc_library(
name = "DebugInfoBTF",
srcs = glob([
"lib/DebugInfo/BTF/*.cpp",
- "lib/DebugInfo/BTF/*.h",
]),
hdrs = glob(["include/llvm/DebugInfo/BTF/*.h"]) + [
"include/llvm/DebugInfo/BTF/BTF.def",
@@ -437,7 +453,6 @@ cc_library(
name = "DebugInfoCodeView",
srcs = glob([
"lib/DebugInfo/CodeView/*.cpp",
- "lib/DebugInfo/CodeView/*.h",
]),
hdrs = glob([
"include/llvm/DebugInfo/CodeView/*.h",
@@ -480,9 +495,7 @@ cc_library(
name = "DebugInfoPDB",
srcs = glob([
"lib/DebugInfo/PDB/*.cpp",
- "lib/DebugInfo/PDB/*.h",
"lib/DebugInfo/PDB/Native/*.cpp",
- "lib/DebugInfo/PDB/Native/*.h",
]),
hdrs = glob([
"include/llvm/DebugInfo/PDB/*.h",
@@ -523,12 +536,9 @@ cc_library(
name = "MC",
srcs = glob([
"lib/MC/*.cpp",
- "lib/MC/*.h",
]),
hdrs = glob([
"include/llvm/MC/*.h",
- "include/llvm/MC/*.def",
- "include/llvm/MC/*.inc",
]),
copts = llvm_copts,
deps = [
@@ -545,7 +555,6 @@ cc_library(
name = "DebugInfoDWARF",
srcs = glob([
"lib/DebugInfo/DWARF/*.cpp",
- "lib/DebugInfo/DWARF/*.h",
]),
hdrs = glob(["include/llvm/DebugInfo/DWARF/*.h"]),
copts = llvm_copts,
@@ -563,7 +572,6 @@ cc_library(
name = "DebugInfoGSYM",
srcs = glob([
"lib/DebugInfo/GSYM/*.cpp",
- "lib/DebugInfo/GSYM/*.h",
]),
hdrs = glob(["include/llvm/DebugInfo/GSYM/*.h"]),
copts = llvm_copts,
@@ -580,7 +588,6 @@ cc_library(
name = "Symbolize",
srcs = glob([
"lib/DebugInfo/Symbolize/*.cpp",
- "lib/DebugInfo/Symbolize/*.h",
]),
hdrs = glob([
"include/llvm/DebugInfo/Symbolize/*.h",
@@ -599,18 +606,44 @@ cc_library(
],
)
+# Command line flag to control which tools get included in the llvm driver binary.
+# The macro also generates config_setting targets used by select_driver_tools().
+generate_driver_selects(name = "driver-tools")
+
+generate_driver_tools_def(
+ name = "gen_llvm_driver_tools_def",
+ out = "LLVMDriverTools.def",
+ driver_tools = select_driver_tools(":driver-tools"),
+)
+
+# Workaround inability to put `.def` files into `srcs` with a library
+cc_library(
+ name = "llvm_driver_tools_def_lib",
+ includes = ["."],
+ textual_hdrs = ["LLVMDriverTools.def"],
+)
+
+cc_binary(
+ name = "llvm",
+ srcs = glob(["tools/llvm-driver/*.cpp"]),
+ deps = [
+ ":Support",
+ ":llvm_driver_tools_def_lib",
+ ] + select_driver_tools(":driver-tools"),
+)
+
cc_binary(
name = "llvm-min-tblgen",
srcs = [
"utils/TableGen/Attributes.cpp",
- "utils/TableGen/CodeGenIntrinsics.cpp",
- "utils/TableGen/CodeGenIntrinsics.h",
+ "utils/TableGen/Basic/CodeGenIntrinsics.cpp",
+ "utils/TableGen/Basic/CodeGenIntrinsics.h",
+ "utils/TableGen/Basic/SDNodeProperties.cpp",
+ "utils/TableGen/Basic/SDNodeProperties.h",
+ "utils/TableGen/Basic/SequenceToOffsetTable.h",
"utils/TableGen/DirectiveEmitter.cpp",
"utils/TableGen/IntrinsicEmitter.cpp",
"utils/TableGen/RISCVTargetDefEmitter.cpp",
- "utils/TableGen/SDNodeProperties.cpp",
- "utils/TableGen/SDNodeProperties.h",
- "utils/TableGen/SequenceToOffsetTable.h",
"utils/TableGen/TableGen.cpp",
"utils/TableGen/VTEmitter.cpp",
],
@@ -626,17 +659,17 @@ cc_binary(
cc_library(
name = "TableGenGlobalISel",
srcs = [
- "utils/TableGen/GlobalISel/CodeExpander.cpp",
+ "utils/TableGen/Common/GlobalISel/CodeExpander.cpp",
],
- hdrs = glob([
+ hdrs = [
# We have to include these headers here as well as in the `hdrs` below
# to allow the `.cpp` files to use file-relative-inclusion to find
# them, even though consumers of this library use inclusion relative to
# `utils/TableGen` with the `strip_includes_prefix` of this library.
# This mixture appears to be incompatible with header modules.
- "utils/TableGen/GlobalISel/CodeExpander.h",
- "utils/TableGen/GlobalISel/CodeExpansions.h",
- ]),
+ "utils/TableGen/Common/GlobalISel/CodeExpander.h",
+ "utils/TableGen/Common/GlobalISel/CodeExpansions.h",
+ ],
copts = llvm_copts,
features = ["-header_modules"],
strip_include_prefix = "utils/TableGen",
@@ -658,20 +691,25 @@ cc_binary(
srcs = glob(
[
"utils/TableGen/*.cpp",
- "utils/TableGen/*.inc",
"utils/TableGen/*.h",
- "utils/TableGen/GlobalISel/*.cpp",
- "utils/TableGen/GlobalISel/*.h",
+ "utils/TableGen/Basic/*.cpp",
+ "utils/TableGen/Basic/*.h",
+ "utils/TableGen/Common/*.cpp",
+ "utils/TableGen/Common/*.h",
+ "utils/TableGen/Common/GlobalISel/*.cpp",
+ "utils/TableGen/Common/GlobalISel/*.h",
# Some tablegen sources include headers from MC, so these have to be
# listed here. MC uses headers produced by tablegen, so it cannot be a
# regular dependency.
"include/llvm/MC/*.h",
- "include/llvm/TargetParser/SubtargetFeature.h",
],
- exclude = ["utils/TableGen/GlobalISel/CodeExpander.cpp"],
- ),
+ exclude = ["utils/TableGen/Common/GlobalISel/CodeExpander.cpp"],
+ ) + [
+ "include/llvm/TargetParser/SubtargetFeature.h",
+ ],
copts = llvm_copts,
+ includes = ["utils/TableGen"],
stamp = 0,
deps = [
":CodeGenTypes",
@@ -821,7 +859,6 @@ cc_library(
name = "BitstreamReader",
srcs = glob([
"lib/Bitstream/Reader/*.cpp",
- "lib/Bitstream/Reader/*.h",
]),
hdrs = [
"include/llvm/Bitstream/BitCodeEnums.h",
@@ -836,9 +873,6 @@ cc_library(
cc_library(
name = "BitstreamWriter",
- srcs = glob([
- "lib/Bitstream/Writer/*.h",
- ]),
hdrs = [
"include/llvm/Bitstream/BitCodeEnums.h",
"include/llvm/Bitstream/BitCodes.h",
@@ -956,7 +990,6 @@ cc_library(
name = "MCParser",
srcs = glob([
"lib/MC/MCParser/*.cpp",
- "lib/MC/MCParser/*.h",
]),
hdrs = glob(["include/llvm/MC/MCParser/*.h"]),
copts = llvm_copts,
@@ -1002,11 +1035,10 @@ cc_library(
srcs = glob([
"lib/TextAPI/BinaryReader/**/*.cpp",
]),
- hdrs = ["include/llvm/TextAPI/DylibReader.h"] + glob(
- ["lib/TextAPI/BinaryReader/**/*.h"],
- ),
+ hdrs = ["include/llvm/TextAPI/DylibReader.h"],
copts = llvm_copts,
deps = [
+ ":DebugInfoDWARF",
":Object",
":Support",
":TargetParser",
@@ -1067,7 +1099,6 @@ cc_library(
name = "ObjectYAML",
srcs = glob([
"lib/ObjectYAML/*.cpp",
- "lib/ObjectYAML/*.h",
]),
hdrs = glob(["include/llvm/ObjectYAML/*.h"]),
copts = llvm_copts,
@@ -1085,7 +1116,6 @@ cc_library(
name = "ProfileData",
srcs = glob([
"lib/ProfileData/*.cpp",
- "lib/ProfileData/*.h",
]),
hdrs = glob([
"include/llvm/ProfileData/*.h",
@@ -1109,7 +1139,6 @@ cc_library(
name = "Coverage",
srcs = glob([
"lib/ProfileData/Coverage/*.cpp",
- "lib/ProfileData/Coverage/*.h",
]),
hdrs = glob(["include/llvm/ProfileData/Coverage/*.h"]),
copts = llvm_copts,
@@ -1126,8 +1155,6 @@ cc_library(
srcs = glob(
[
"lib/Analysis/*.cpp",
- "lib/Analysis/*.h",
- "lib/Analysis/*.def",
],
),
hdrs = glob(
@@ -1185,7 +1212,6 @@ cc_library(
name = "Target",
srcs = glob([
"lib/Target/*.cpp",
- "lib/Target/*.h",
]),
hdrs = glob([
"include/llvm/Target/*.h",
@@ -1221,14 +1247,11 @@ cc_library(
name = "TargetParser",
srcs = glob([
"lib/TargetParser/*.cpp",
- "lib/TargetParser/*.h",
]) + select({
"@platforms//os:windows": glob([
- "lib/TargetParser/Windows/*.h",
"lib/TargetParser/Windows/*.inc",
]),
"//conditions:default": glob([
- "lib/TargetParser/Unix/*.h",
"lib/TargetParser/Unix/*.inc",
]),
}),
@@ -1252,7 +1275,6 @@ cc_library(
name = "DWP",
srcs = glob([
"lib/DWP/*.cpp",
- "lib/DWP/*.h",
]),
hdrs = glob(["include/llvm/DWP/*.h"]),
copts = llvm_copts,
@@ -1269,7 +1291,6 @@ cc_library(
name = "TransformUtils",
srcs = glob([
"lib/Transforms/Utils/*.cpp",
- "lib/Transforms/Utils/*.h",
]),
hdrs = glob(["include/llvm/Transforms/Utils/*.h"]) + [
"include/llvm/Transforms/Utils.h",
@@ -1390,7 +1411,6 @@ cc_library(
name = "Scalar",
srcs = glob([
"lib/Transforms/Scalar/*.cpp",
- "lib/Transforms/Scalar/*.h",
]),
hdrs = glob(["include/llvm/Transforms/Scalar/*.h"]) + [
"include/llvm/Transforms/Scalar.h",
@@ -1432,9 +1452,6 @@ cc_library(
cc_library(
name = "FrontendDebug",
- srcs = glob([
- "lib/Frontend/Debug/*.cpp",
- ]),
hdrs = glob([
"include/llvm/Frontend/Debug/*.h",
]),
@@ -1530,8 +1547,6 @@ cc_library(
]),
hdrs = glob([
"include/llvm/Frontend/OpenMP/*.h",
- "include/llvm/Frontend/OpenMP/OMP/*.h",
- "include/llvm/Frontend/*.h",
]) + [
"include/llvm/Frontend/OpenMP/OMP.h.inc",
"include/llvm/Frontend/OpenMP/OMP.inc",
@@ -1591,9 +1606,7 @@ cc_library(
]) + [
"include/llvm/Frontend/OpenACC/ACC.inc",
],
- hdrs = glob([
- "include/llvm/Frontend/OpenACC/*.h",
- ]) + ["include/llvm/Frontend/OpenACC/ACC.h.inc"],
+ hdrs = ["include/llvm/Frontend/OpenACC/ACC.h.inc"],
copts = llvm_copts,
deps = [
":Analysis",
@@ -1607,7 +1620,6 @@ cc_library(
name = "AsmParser",
srcs = glob([
"lib/AsmParser/*.cpp",
- "lib/AsmParser/*.h",
]),
hdrs = glob(["include/llvm/AsmParser/*.h"]),
copts = llvm_copts,
@@ -1623,7 +1635,6 @@ cc_library(
name = "IRPrinter",
srcs = glob([
"lib/IRPrinter/*.cpp",
- "lib/IRPrinter/*.h",
]),
hdrs = glob([
"include/llvm/IRPrinter/*.h",
@@ -1640,7 +1651,6 @@ cc_library(
name = "IRReader",
srcs = glob([
"lib/IRReader/*.cpp",
- "lib/IRReader/*.h",
]),
hdrs = glob([
"include/llvm/IRReader/*.h",
@@ -1683,7 +1693,6 @@ cc_library(
name = "IPO",
srcs = glob([
"lib/Transforms/IPO/*.cpp",
- "lib/Transforms/IPO/*.h",
]),
hdrs = glob([
"include/llvm/Transforms/IPO/*.h",
@@ -1721,7 +1730,6 @@ cc_library(
name = "CFGuard",
srcs = glob([
"lib/Transforms/CFGuard/*.cpp",
- "lib/Transforms/CFGuard/*.h",
]),
hdrs = ["include/llvm/Transforms/CFGuard.h"],
copts = llvm_copts,
@@ -1736,7 +1744,6 @@ cc_library(
name = "HipStdPar",
srcs = glob([
"lib/Transforms/HipStdPar/*.cpp",
- "lib/Transforms/HipStdPar/*.h",
]),
hdrs = ["include/llvm/Transforms/HipStdPar/HipStdPar.h"],
copts = llvm_copts,
@@ -1826,7 +1833,6 @@ cc_library(
copts = llvm_copts,
textual_hdrs = glob([
"include/llvm/CodeGen/**/*.def",
- "include/llvm/CodeGen/**/*.inc",
]),
deps = [
":AggressiveInstCombine",
@@ -2305,10 +2311,13 @@ gentbl(
td_file = "lib/Target/" + target["name"] + "/" + target["short_name"] + ".td",
td_srcs = [
":common_target_td_sources",
- ] + glob([
- "lib/Target/" + target["name"] + "/*.td",
- "lib/Target/" + target["name"] + "/GISel/*.td",
- ]),
+ ] + glob(
+ [
+ "lib/Target/" + target["name"] + "/*.td",
+ "lib/Target/" + target["name"] + "/GISel/*.td",
+ ],
+ allow_empty = True,
+ ),
deps = target.get("tbl_deps", []),
)],
[cc_library(
@@ -2332,43 +2341,49 @@ gentbl(
# a number of targets due to crisscrossing inclusion of headers.
[cc_library(
name = target["name"] + "UtilsAndDesc",
- srcs = glob([
- "lib/Target/" + target["name"] + "/MCTargetDesc/*.cpp",
- "lib/Target/" + target["name"] + "/Utils/*.cpp",
-
- # We have to include these headers here as well as in the `hdrs`
- # below to allow the `.cpp` files to use file-relative-inclusion to
- # find them, even though consumers of this library use inclusion
- # relative to the target with the `strip_includes_prefix` of this
- # library. This mixture is likely incompatible with header modules.
- "lib/Target/" + target["name"] + "/MCTargetDesc/*.h",
- "lib/Target/" + target["name"] + "/Utils/*.h",
- ]),
- hdrs = glob([
- "lib/Target/" + target["name"] + "/MCTargetDesc/*.h",
- "lib/Target/" + target["name"] + "/Utils/*.h",
-
- # This a bit of a hack to allow us to expose common, internal
- # target header files to other libraries within the target via
- # target-relative includes. This usage of headers is inherently
- # non-modular as there is a mixture of target-relative inclusion
- # using this rule and file-relative inclusion using the repeated
- # listing of these headers in the `srcs` of subsequent rules.
- "lib/Target/" + target["name"] + "/*.h",
-
- # FIXME: The entries below should be `textual_hdrs` instead of
- # `hdrs`, but unfortunately that doesn't work with
- # `strip_include_prefix`:
- # https://github.com/bazelbuild/bazel/issues/12424
- #
- # Once that issue is fixed and released, we can switch this to
- # `textual_hdrs` and remove the feature disabling the various Bazel
- # features (both current and under-development) that motivated the
- # distinction between these two.
- "lib/Target/" + target["name"] + "/*.def",
- "lib/Target/" + target["name"] + "/*.inc",
- "lib/Target/" + target["name"] + "/MCTargetDesc/*.def",
- ]),
+ srcs = glob(
+ [
+ "lib/Target/" + target["name"] + "/MCTargetDesc/*.cpp",
+ "lib/Target/" + target["name"] + "/Utils/*.cpp",
+
+ # We have to include these headers here as well as in the `hdrs`
+ # below to allow the `.cpp` files to use file-relative-inclusion to
+ # find them, even though consumers of this library use inclusion
+ # relative to the target with the `strip_includes_prefix` of this
+ # library. This mixture is likely incompatible with header modules.
+ "lib/Target/" + target["name"] + "/MCTargetDesc/*.h",
+ "lib/Target/" + target["name"] + "/Utils/*.h",
+ ],
+ allow_empty = True,
+ ),
+ hdrs = glob(
+ [
+ "lib/Target/" + target["name"] + "/MCTargetDesc/*.h",
+ "lib/Target/" + target["name"] + "/Utils/*.h",
+
+ # This a bit of a hack to allow us to expose common, internal
+ # target header files to other libraries within the target via
+ # target-relative includes. This usage of headers is inherently
+ # non-modular as there is a mixture of target-relative inclusion
+ # using this rule and file-relative inclusion using the repeated
+ # listing of these headers in the `srcs` of subsequent rules.
+ "lib/Target/" + target["name"] + "/*.h",
+
+ # FIXME: The entries below should be `textual_hdrs` instead of
+ # `hdrs`, but unfortunately that doesn't work with
+ # `strip_include_prefix`:
+ # https://github.com/bazelbuild/bazel/issues/12424
+ #
+ # Once that issue is fixed and released, we can switch this to
+ # `textual_hdrs` and remove the feature disabling the various Bazel
+ # features (both current and under-development) that motivated the
+ # distinction between these two.
+ "lib/Target/" + target["name"] + "/*.def",
+ "lib/Target/" + target["name"] + "/*.inc",
+ "lib/Target/" + target["name"] + "/MCTargetDesc/*.def",
+ ],
+ allow_empty = True,
+ ),
copts = llvm_copts,
features = [
"-parse_headers",
@@ -2392,20 +2407,26 @@ gentbl(
)],
[cc_library(
name = target["name"] + "CodeGen",
- srcs = glob([
- "lib/Target/" + target["name"] + "/GISel/*.cpp",
- "lib/Target/" + target["name"] + "/GISel/*.h",
- "lib/Target/" + target["name"] + "/*.cpp",
- "lib/Target/" + target["name"] + "/*.h",
- ]),
+ srcs = glob(
+ [
+ "lib/Target/" + target["name"] + "/GISel/*.cpp",
+ "lib/Target/" + target["name"] + "/GISel/*.h",
+ "lib/Target/" + target["name"] + "/*.cpp",
+ "lib/Target/" + target["name"] + "/*.h",
+ ],
+ allow_empty = True,
+ ),
hdrs = ["lib/Target/" + target["name"] + "/" + target["short_name"] + ".h"],
copts = llvm_copts,
features = ["-layering_check"],
strip_include_prefix = "lib/Target/" + target["name"],
- textual_hdrs = glob([
- "lib/Target/" + target["name"] + "/*.def",
- "lib/Target/" + target["name"] + "/*.inc",
- ]),
+ textual_hdrs = glob(
+ [
+ "lib/Target/" + target["name"] + "/*.def",
+ "lib/Target/" + target["name"] + "/*.inc",
+ ],
+ allow_empty = True,
+ ),
deps = [
":Analysis",
":BinaryFormat",
@@ -2430,10 +2451,13 @@ gentbl(
)],
[cc_library(
name = target["name"] + "AsmParser",
- srcs = glob([
- "lib/Target/" + target["name"] + "/AsmParser/*.cpp",
- "lib/Target/" + target["name"] + "/AsmParser/*.h",
- ]),
+ srcs = glob(
+ [
+ "lib/Target/" + target["name"] + "/AsmParser/*.cpp",
+ "lib/Target/" + target["name"] + "/AsmParser/*.h",
+ ],
+ allow_empty = True,
+ ),
copts = llvm_copts,
deps = [
":BinaryFormat",
@@ -2464,9 +2488,12 @@ gentbl(
# `textual_hdrs` and remove the feature disabling the various Bazel
# features (both current and under-development) that motivated the
# distinction between these two.
- hdrs = glob([
- "lib/Target/" + target["name"] + "/Disassembler/*.h",
- ]),
+ hdrs = glob(
+ [
+ "lib/Target/" + target["name"] + "/Disassembler/*.h",
+ ],
+ allow_empty = True,
+ ),
features = [
"-parse_headers",
"-header_modules",
@@ -2475,11 +2502,14 @@ gentbl(
)],
[cc_library(
name = target["name"] + "Disassembler",
- srcs = glob([
- "lib/Target/" + target["name"] + "/Disassembler/*.cpp",
- "lib/Target/" + target["name"] + "/Disassembler/*.c",
- "lib/Target/" + target["name"] + "/Disassembler/*.h",
- ]),
+ srcs = glob(
+ [
+ "lib/Target/" + target["name"] + "/Disassembler/*.cpp",
+ "lib/Target/" + target["name"] + "/Disassembler/*.c",
+ "lib/Target/" + target["name"] + "/Disassembler/*.h",
+ ],
+ allow_empty = True,
+ ),
copts = llvm_copts,
features = ["-layering_check"],
deps = [
@@ -2497,11 +2527,14 @@ gentbl(
)],
[cc_library(
name = target["name"] + "TargetMCA",
- srcs = glob([
- "lib/Target/" + target["name"] + "/MCA/*.cpp",
- "lib/Target/" + target["name"] + "/MCA/*.c",
- "lib/Target/" + target["name"] + "/MCA/*.h",
- ]),
+ srcs = glob(
+ [
+ "lib/Target/" + target["name"] + "/MCA/*.cpp",
+ "lib/Target/" + target["name"] + "/MCA/*.c",
+ "lib/Target/" + target["name"] + "/MCA/*.h",
+ ],
+ allow_empty = True,
+ ),
copts = llvm_copts,
features = ["-layering_check"],
deps = [
@@ -2560,31 +2593,14 @@ cc_library(
)
cc_library(
- name = "MLPolicies",
- srcs = glob([
- "lib/Analysis/ML/*.cpp",
- "lib/Analysis/ML/*.h",
- ]),
- hdrs = glob([
- "include/llvm/Analysis/ML/*.h",
- ]),
- copts = llvm_copts,
- deps = [
- ":Analysis",
- ":Core",
- ":Support",
- ],
-)
-
-cc_library(
name = "Passes",
srcs = glob([
"lib/Passes/*.cpp",
- "lib/Passes/*.h",
]),
hdrs = glob([
"include/llvm/Passes/*.h",
"include/llvm/Passes/*.def",
+ "include/llvm/Passes/*.inc",
]) + ["include/llvm-c/Transforms/PassBuilder.h"],
copts = llvm_copts,
deps = [
@@ -2600,7 +2616,6 @@ cc_library(
":InstCombine",
":Instrumentation",
":MC",
- ":MLPolicies",
":ObjCARC",
":Scalar",
":Support",
@@ -2617,7 +2632,6 @@ cc_library(
name = "LTO",
srcs = glob([
"lib/LTO/*.cpp",
- "lib/LTO/*.h",
]),
hdrs = glob([
"include/llvm/LTO/*.h",
@@ -2657,7 +2671,6 @@ cc_library(
name = "ExecutionEngine",
srcs = glob([
"lib/ExecutionEngine/*.cpp",
- "lib/ExecutionEngine/*.h",
"lib/ExecutionEngine/RuntimeDyld/*.cpp",
"lib/ExecutionEngine/RuntimeDyld/*.h",
"lib/ExecutionEngine/RuntimeDyld/Targets/*.cpp",
@@ -2771,11 +2784,9 @@ cc_library(
name = "OrcJIT",
srcs = glob([
"lib/ExecutionEngine/Orc/*.cpp",
- "lib/ExecutionEngine/Orc/*.h",
]),
hdrs = glob([
"include/llvm/ExecutionEngine/Orc/*.h",
- "include/llvm/ExecutionEngine/Orc/RPC/*.h",
]) + [
"include/llvm-c/LLJIT.h",
"include/llvm-c/Orc.h",
@@ -2899,7 +2910,6 @@ cc_library(
name = "DWARFLinker",
srcs = glob([
"lib/DWARFLinker/Classic/*.cpp",
- "lib/DWARFLinker/Classic/*.h",
]),
hdrs = glob(["include/llvm/DWARFLinker/Classic/*.h"]),
copts = llvm_copts,
@@ -2920,7 +2930,6 @@ cc_library(
name = "DWARFLinkerBase",
srcs = glob([
"lib/DWARFLinker/*.cpp",
- "lib/DWARFLinker/*.h",
]),
hdrs = glob(["include/llvm/DWARFLinker/*.h"]),
copts = llvm_copts,
@@ -3011,7 +3020,6 @@ cc_library(
name = "InterfaceStub",
srcs = glob([
"lib/InterfaceStub/*.cpp",
- "lib/InterfaceStub/*.h",
]),
hdrs = glob([
"include/llvm/InterfaceStub/*.h",
@@ -3062,7 +3070,6 @@ cc_library(
name = "MCA",
srcs = glob([
"lib/MCA/**/*.cpp",
- "lib/MCA/**/*.h",
]),
hdrs = glob([
"include/llvm/MCA/**/*.h",
@@ -3089,7 +3096,6 @@ cc_library(
name = "XRay",
srcs = glob([
"lib/XRay/*.cpp",
- "lib/XRay/*.h",
]),
hdrs = glob(["include/llvm/XRay/*.h"]),
copts = llvm_copts,
@@ -3146,21 +3152,24 @@ cc_library(
cc_library(
name = "Exegesis",
- srcs = glob([
- "tools/llvm-exegesis/lib/*.cpp",
- # We have to include these headers here as well as in the `hdrs` below
- # to allow the `.cpp` files to use file-relative-inclusion to find
- # them, even though consumers of this library use inclusion relative to
- # `tools/llvm-exegesis/lib` with the `strip_includes_prefix` of this
- # library. This mixture appears to be incompatible with header modules.
- "tools/llvm-exegesis/lib/*.h",
- ] + [
- "tools/llvm-exegesis/lib/{}/*.cpp".format(t)
- for t in llvm_target_exegesis
- ] + [
- "tools/llvm-exegesis/lib/{}/*.h".format(t)
- for t in llvm_target_exegesis
- ]),
+ srcs = glob(
+ [
+ "tools/llvm-exegesis/lib/*.cpp",
+ # We have to include these headers here as well as in the `hdrs` below
+ # to allow the `.cpp` files to use file-relative-inclusion to find
+ # them, even though consumers of this library use inclusion relative to
+ # `tools/llvm-exegesis/lib` with the `strip_includes_prefix` of this
+ # library. This mixture appears to be incompatible with header modules.
+ "tools/llvm-exegesis/lib/*.h",
+ ] + [
+ "tools/llvm-exegesis/lib/{}/*.cpp".format(t)
+ for t in llvm_target_exegesis
+ ] + [
+ "tools/llvm-exegesis/lib/{}/*.h".format(t)
+ for t in llvm_target_exegesis
+ ],
+ allow_empty = True,
+ ),
hdrs = glob(["tools/llvm-exegesis/lib/*.h"]),
copts = llvm_copts,
features = [
@@ -3184,8 +3193,10 @@ cc_library(
":Support",
":Target",
":config",
- ":maybe_pfm",
- ],
+ ] + select({
+ "@platforms//os:linux": [":maybe_pfm"],
+ "//conditions:default": [],
+ }),
)
################################################################################
@@ -3321,23 +3332,10 @@ cc_binary(
],
)
-expand_template(
- name = "ar_main",
- out = "llvm-ar-driver.cpp",
- substitutions = {
- "@TOOL_NAME@": "llvm_ar",
- },
- template = "cmake/modules/llvm-driver-template.cpp.in",
-)
-
-cc_binary(
- name = "llvm-ar",
- srcs = glob([
- "tools/llvm-ar/*.cpp",
- "tools/llvm-ar/*.h",
- ]) + ["llvm-ar-driver.cpp"],
+cc_library(
+ name = "llvm-ar-lib",
+ srcs = glob(["tools/llvm-ar/*.cpp"]),
copts = llvm_copts,
- stamp = 0,
deps = [
":AllTargetsAsmParsers",
":AllTargetsCodeGens",
@@ -3351,6 +3349,12 @@ cc_binary(
],
)
+llvm_driver_cc_binary(
+ name = "llvm-ar",
+ stamp = 0,
+ deps = [":llvm-ar-lib"],
+)
+
# We need to run llvm-ar with different basenames to make it run with
# different behavior.
binary_alias(
@@ -3372,7 +3376,6 @@ cc_binary(
name = "llvm-as",
srcs = glob([
"tools/llvm-as/*.cpp",
- "tools/llvm-as/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3389,7 +3392,6 @@ cc_binary(
name = "llvm-bcanalyzer",
srcs = glob([
"tools/llvm-bcanalyzer/*.cpp",
- "tools/llvm-bcanalyzer/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3476,7 +3478,6 @@ cc_binary(
name = "llvm-cvtres",
srcs = glob([
"tools/llvm-cvtres/*.cpp",
- "tools/llvm-cvtres/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3510,7 +3511,6 @@ cc_binary(
name = "llvm-cxxmap",
srcs = glob([
"tools/llvm-cxxmap/*.cpp",
- "tools/llvm-cxxmap/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3545,7 +3545,6 @@ cc_binary(
name = "llvm-cxxfilt",
srcs = glob([
"tools/llvm-cxxfilt/*.cpp",
- "tools/llvm-cxxfilt/*.h",
]) + ["llvm-cxxfilt-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -3578,7 +3577,6 @@ cc_binary(
name = "llvm-debuginfod-find",
srcs = glob([
"tools/llvm-debuginfod-find/*.cpp",
- "tools/llvm-debuginfod-find/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3595,7 +3593,6 @@ cc_binary(
name = "llvm-dis",
srcs = glob([
"tools/llvm-dis/*.cpp",
- "tools/llvm-dis/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3690,7 +3687,6 @@ cc_binary(
name = "llvm-dwp",
srcs = glob([
"tools/llvm-dwp/*.cpp",
- "tools/llvm-dwp/*.h",
]) + ["llvm-dwp-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -3730,7 +3726,6 @@ cc_binary(
name = "llvm-extract",
srcs = glob([
"tools/llvm-extract/*.cpp",
- "tools/llvm-extract/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3772,7 +3767,6 @@ cc_binary(
name = "llvm-gsymutil",
srcs = glob([
"tools/llvm-gsymutil/*.cpp",
- "tools/llvm-gsymutil/*.h",
]) + ["llvm-gsymutil-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -3924,7 +3918,6 @@ cc_binary(
name = "llvm-link",
srcs = glob([
"tools/llvm-link/*.cpp",
- "tools/llvm-link/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -3989,7 +3982,6 @@ cc_binary(
name = "llvm-lto",
srcs = glob([
"tools/llvm-lto/*.cpp",
- "tools/llvm-lto/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4012,7 +4004,6 @@ cc_binary(
name = "llvm-lto2",
srcs = glob([
"tools/llvm-lto2/*.cpp",
- "tools/llvm-lto2/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4158,7 +4149,6 @@ cc_binary(
name = "llvm-mt",
srcs = glob([
"tools/llvm-mt/*.cpp",
- "tools/llvm-mt/*.h",
]) + ["llvm-mt-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -4182,23 +4172,10 @@ gentbl(
td_srcs = ["include/llvm/Option/OptParser.td"],
)
-expand_template(
- name = "nm_main",
- out = "llvm-nm-driver.cpp",
- substitutions = {
- "@TOOL_NAME@": "llvm_nm",
- },
- template = "cmake/modules/llvm-driver-template.cpp.in",
-)
-
-cc_binary(
- name = "llvm-nm",
- srcs = glob([
- "tools/llvm-nm/*.cpp",
- "tools/llvm-nm/*.h",
- ]) + ["llvm-nm-driver.cpp"],
+cc_library(
+ name = "llvm-nm-lib",
+ srcs = glob(["tools/llvm-nm/*.cpp"]),
copts = llvm_copts,
- stamp = 0,
deps = [
":AllTargetsAsmParsers",
":AllTargetsCodeGens",
@@ -4215,6 +4192,12 @@ cc_binary(
],
)
+llvm_driver_cc_binary(
+ name = "llvm-nm",
+ stamp = 0,
+ deps = [":llvm-nm-lib"],
+)
+
gentbl(
name = "llvm-objcopy-opts",
strip_include_prefix = "tools/llvm-objcopy",
@@ -4279,7 +4262,6 @@ cc_binary(
name = "llvm-stress",
srcs = glob([
"tools/llvm-stress/*.cpp",
- "tools/llvm-stress/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4456,7 +4438,6 @@ cc_binary(
name = "llvm-profdata",
srcs = glob([
"tools/llvm-profdata/*.cpp",
- "tools/llvm-profdata/*.h",
]) + ["llvm-profdata-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -4640,7 +4621,6 @@ cc_binary(
name = "llvm-rtdyld",
srcs = glob([
"tools/llvm-rtdyld/*.cpp",
- "tools/llvm-rtdyld/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4669,23 +4649,10 @@ gentbl(
td_srcs = ["include/llvm/Option/OptParser.td"],
)
-expand_template(
- name = "size_main",
- out = "llvm-size-driver.cpp",
- substitutions = {
- "@TOOL_NAME@": "llvm_size",
- },
- template = "cmake/modules/llvm-driver-template.cpp.in",
-)
-
-cc_binary(
- name = "llvm-size",
- srcs = glob([
- "tools/llvm-size/*.cpp",
- "tools/llvm-size/*.h",
- ]) + ["llvm-size-driver.cpp"],
+cc_library(
+ name = "llvm-size-lib",
+ srcs = glob(["tools/llvm-size/*.cpp"]),
copts = llvm_copts,
- stamp = 0,
deps = [
":Object",
":Option",
@@ -4694,11 +4661,16 @@ cc_binary(
],
)
+llvm_driver_cc_binary(
+ name = "llvm-size",
+ stamp = 0,
+ deps = [":llvm-size-lib"],
+)
+
cc_binary(
name = "llvm-split",
srcs = glob([
"tools/llvm-split/*.cpp",
- "tools/llvm-split/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4728,7 +4700,6 @@ cc_binary(
name = "llvm-strings",
srcs = glob([
"tools/llvm-strings/*.cpp",
- "tools/llvm-strings/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4765,7 +4736,6 @@ cc_binary(
name = "llvm-symbolizer",
srcs = glob([
"tools/llvm-symbolizer/*.cpp",
- "tools/llvm-symbolizer/*.h",
]) + ["llvm-symbolizer-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -4791,7 +4761,6 @@ cc_binary(
name = "llvm-undname",
srcs = glob([
"tools/llvm-undname/*.cpp",
- "tools/llvm-undname/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4805,7 +4774,6 @@ cc_binary(
name = "llvm-xray",
srcs = glob([
"tools/llvm-xray/*.cpp",
- "tools/llvm-xray/*.cc",
"tools/llvm-xray/*.h",
]),
copts = llvm_copts,
@@ -4888,7 +4856,6 @@ cc_binary(
name = "sancov",
srcs = glob([
"tools/sancov/*.cpp",
- "tools/sancov/*.h",
]) + ["sancov-driver.cpp"],
copts = llvm_copts,
stamp = 0,
@@ -4911,7 +4878,6 @@ cc_binary(
name = "sanstats",
srcs = glob([
"tools/sanstats/*.cpp",
- "tools/sanstats/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4926,7 +4892,6 @@ cc_binary(
name = "split-file",
srcs = glob([
"utils/split-file/*.cpp",
- "utils/split-file/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -4972,6 +4937,7 @@ py_binary(
name = "lit",
testonly = True,
srcs = ["utils/lit/lit.py"] + glob(["utils/lit/lit/**/*.py"]),
+ imports = ["utils/lit"],
)
py_binary(
@@ -5022,7 +4988,6 @@ cc_library(
testonly = True,
srcs = glob([
"lib/Testing/Support/*.cpp",
- "lib/Testing/Support/*.h",
]),
hdrs = glob(["include/llvm/Testing/Support/*.h"]),
copts = llvm_copts,
@@ -5051,7 +5016,6 @@ cc_binary(
testonly = True,
srcs = glob([
"utils/FileCheck/*.cpp",
- "utils/FileCheck/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5097,7 +5061,6 @@ cc_binary(
testonly = True,
srcs = glob([
"utils/count/*.c",
- "utils/count/*.h",
]),
stamp = 0,
deps = [":Support"],
@@ -5108,7 +5071,6 @@ cc_binary(
testonly = True,
srcs = glob([
"tools/lli/ChildTarget/*.cpp",
- "tools/lli/ChildTarget/*.h",
]),
copts = llvm_copts,
# The tests load code into this binary that expect to see symbols
@@ -5175,7 +5137,6 @@ cc_binary(
testonly = True,
srcs = glob([
"tools/llvm-diff/*.cpp",
- "tools/llvm-diff/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5193,7 +5154,6 @@ cc_binary(
testonly = True,
srcs = glob([
"tools/llvm-isel-fuzzer/*.cpp",
- "tools/llvm-isel-fuzzer/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5239,7 +5199,6 @@ cc_binary(
testonly = True,
srcs = glob([
"utils/not/*.cpp",
- "utils/not/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5319,7 +5278,6 @@ cc_binary(
testonly = True,
srcs = glob([
"tools/llvm-tli-checker/*.cpp",
- "tools/llvm-tli-checker/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5366,7 +5324,6 @@ cc_binary(
name = "verify-uselistorder",
srcs = glob([
"tools/verify-uselistorder/*.cpp",
- "tools/verify-uselistorder/*.h",
]),
copts = llvm_copts,
stamp = 0,
@@ -5386,7 +5343,6 @@ cc_binary(
testonly = True,
srcs = glob([
"tools/yaml2obj/*.cpp",
- "tools/yaml2obj/*.h",
]),
copts = llvm_copts,
stamp = 0,
diff --git a/utils/bazel/llvm-project-overlay/llvm/driver.bzl b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
new file mode 100644
index 000000000000..bd0d26d64f48
--- /dev/null
+++ b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
@@ -0,0 +1,182 @@
+# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+"""Configuration for the llvm-driver tool."""
+
+load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo")
+load("@bazel_skylib//rules:expand_template.bzl", "expand_template")
+
+# Mapping from every tool to the cc_library that implements the tool's entrypoint.
+# TODO: uncomment the remaining targets after splitting them
+# into separate library/binary targets.
+_TOOLS = {
+ # "clang-scan-deps": "//clang:clang-scan-deps-lib",
+ # "clang": "//clang:clang-driver",
+ # "dsymutil": "//llvm:dsymutil-lib",
+ # "lld": "//lld:lld-lib",
+ "llvm-ar": "//llvm:llvm-ar-lib",
+ # "llvm-cxxfilt": "//llvm:llvm-cxxfilt-lib",
+ # "llvm-dwp": "//llvm:llvm-dwp-lib",
+ # "llvm-gsymutil": "//llvm:llvm-gsymutil-lib",
+ # "llvm-ifs": "//llvm:llvm-ifs-lib",
+ # "llvm-libtool-darwin": "//llvm:llvm-libtool-darwin-lib",
+ # "llvm-lipo": "//llvm:llvm-lipo-lib",
+ # "llvm-ml": "//llvm:llvm-ml-lib",
+ # "llvm-mt": "//llvm:llvm-mt-lib",
+ "llvm-nm": "//llvm:llvm-nm-lib",
+ # "llvm-objcopy": "//llvm:llvm-objcopy-lib",
+ # "llvm-objdump": "//llvm:llvm-objdump-lib",
+ # "llvm-profdata": "//llvm:llvm-profdata-lib",
+ # "llvm-rc": "//llvm:llvm-rc-lib",
+ # "llvm-readobj": "//llvm:llvm-readobj-lib",
+ "llvm-size": "//llvm:llvm-size-lib",
+ # "llvm-symbolizer": "//llvm:llvm-symbolizer-lib",
+ # "sancov": "//llvm:sancov-lib",
+}
+
+# Tools automatically get their own name as an alias, but there may be additional
+# aliases for a given tool.
+_EXTRA_ALIASES = {
+ "clang": ["clang++", "clang-cl", "clang-cpp"],
+ "lld": ["lld-link", "ld.lld", "ld64.lld", "wasm-ld"],
+ "llvm-ar": ["ranlib", "lib", "dlltool"],
+ "llvm-objcopy": ["bitcode-strip", "install-name-tool", "strip"],
+ "llvm-objdump": ["otool"],
+ "llvm-rc": ["windres"],
+ "llvm-readobj": ["readelf"],
+ "llvm-symbolizer": ["addr2line"],
+}
+
+def _validated_string_list_flag_impl(ctx):
+ invalid_values = [v for v in ctx.build_setting_value if v not in ctx.attr.values]
+ if invalid_values:
+ fail("Tool(s) [{}] are not in the known list of tools: {}".format(
+ ", ".join(invalid_values),
+ ", ".join(ctx.attr.values),
+ ))
+ return BuildSettingInfo(value = ctx.build_setting_value)
+
+# Like string_list_flag, but with the validation that string_flag provides.
+_validated_string_list_flag = rule(
+ implementation = _validated_string_list_flag_impl,
+ build_setting = config.string_list(flag = True),
+ attrs = {
+ "values": attr.string_list(
+ doc = "The list of allowed values for this setting. An error is raised if any other value is given.",
+ ),
+ },
+ doc = "A string list-typed build setting that can be set on the command line",
+)
+
+def generate_driver_selects(name):
+ """Generates flags and config settings to configure the tool list.
+
+ By default, all supported tools are included in the "llvm" driver binary.
+ To build only a subset, specify just the subset you want as the flag.
+ For example, to produce a binary with just llvm-nm and llvm-size, run:
+
+ $ bazel build \
+ --@llvm-project//llvm:driver-tools=llvm-nm,llvm-size \
+ @llvm-project//llvm:llvm
+
+ Note: this assumes the flag name is "driver-tools" by being invoked as:
+ generate_driver_selects(name = "driver-tools")
+
+ Args:
+ name: the name of the flag that configures which tools are included.
+ """
+
+ _validated_string_list_flag(
+ name = name,
+ build_setting_default = _TOOLS.keys(),
+ values = _TOOLS.keys(),
+ )
+ for tool in _TOOLS.keys():
+ native.config_setting(
+ name = "{}-include-{}".format(name, tool),
+ flag_values = {name: tool},
+ )
+
+def select_driver_tools(flag):
+ """Produce a list of tool deps based on generate_driver_selects().
+
+ Args:
+ flag: name that was used for generate_driver_selects().
+ Returns:
+ List of tool deps based on generate_driver_selects().
+ """
+ tools = []
+ for tool, target in _TOOLS.items():
+ tools += select({
+ "{}-include-{}".format(flag, tool): [target],
+ "//conditions:default": [],
+ })
+ return tools
+
+def _generate_driver_tools_def_impl(ctx):
+ # Depending on how the LLVM build files are included,
+ # it may or may not have the @llvm-project repo prefix.
+ # Compare just on the name. We could also include the package,
+ # but the name itself is unique in practice.
+ label_to_name = {Label(v).name: k for k, v in _TOOLS.items()}
+
+ # Reverse sort by the *main* tool name, but keep aliases together.
+ # This is consistent with how tools/llvm-driver/CMakeLists.txt does it,
+ # and this makes sure that more specific tools are checked first.
+ # For example, "clang-scan-deps" should not match "clang".
+ tools = [label_to_name[tool.label.name] for tool in ctx.attr.driver_tools]
+ tool_alias_pairs = []
+ for tool_name in reversed(tools):
+ tool_alias_pairs.append((tool_name, tool_name))
+ for extra_alias in _EXTRA_ALIASES.get(tool_name, []):
+ tool_alias_pairs.append((tool_name, extra_alias))
+
+ lines = [
+ 'LLVM_DRIVER_TOOL("{alias}", {tool})'.format(
+ tool = tool_name.replace("-", "_"),
+ alias = alias.removeprefix("llvm-"),
+ )
+ for (tool_name, alias) in tool_alias_pairs
+ ]
+ lines.append("#undef LLVM_DRIVER_TOOL")
+
+ ctx.actions.write(
+ output = ctx.outputs.out,
+ content = "\n".join(lines),
+ )
+
+generate_driver_tools_def = rule(
+ implementation = _generate_driver_tools_def_impl,
+ doc = """Generate a list of LLVM_DRIVER_TOOL macros.
+See tools/llvm-driver/CMakeLists.txt for the reference implementation.""",
+ attrs = {
+ "driver_tools": attr.label_list(
+ doc = "List of tools to include in the generated header. Use select_driver_tools() to provide this.",
+ providers = [CcInfo],
+ ),
+ "out": attr.output(
+ doc = "Name of the generated .def output file.",
+ mandatory = True,
+ ),
+ },
+)
+
+def llvm_driver_cc_binary(
+ name,
+ deps = None,
+ **kwargs):
+ """cc_binary wrapper for binaries using the llvm-driver template."""
+ expand_template(
+ name = "_gen_" + name,
+ out = name + "-driver.cpp",
+ substitutions = {"@TOOL_NAME@": name.replace("-", "_")},
+ template = "//llvm:cmake/modules/llvm-driver-template.cpp.in",
+ )
+ deps = deps or []
+ native.cc_binary(
+ name = name,
+ srcs = [name + "-driver.cpp"],
+ deps = deps + ["//llvm:Support"],
+ **kwargs
+ )
diff --git a/utils/bazel/llvm-project-overlay/llvm/lit_test.bzl b/utils/bazel/llvm-project-overlay/llvm/lit_test.bzl
index ce2a0a00c553..f754a9fc7d5e 100644
--- a/utils/bazel/llvm-project-overlay/llvm/lit_test.bzl
+++ b/utils/bazel/llvm-project-overlay/llvm/lit_test.bzl
@@ -35,6 +35,7 @@ def lit_test(
args = args + ["-v"] + ["$(execpath %s)" % src for src in srcs],
data = data + srcs,
legacy_create_init = False,
+ deps = [Label("//llvm:lit")],
**kwargs
)
diff --git a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
index c8863af43c40..dd42f84d16dc 100644
--- a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
@@ -357,10 +357,11 @@ cc_test(
[
"IR/*.cpp",
"IR/*.h",
- "Support/KnownBitsTest.h",
],
allow_empty = False,
- ),
+ ) + [
+ "Support/KnownBitsTest.h",
+ ],
shard_count = 20,
deps = [
"//llvm:Analysis",
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 3951a31bae3e..ddd3e69e6ce3 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -325,21 +325,18 @@ cc_library(
"lib/IR/*.cpp",
"lib/IR/*.h",
"lib/IR/PDL/*.cpp",
- "lib/Bytecode/Reader/*.h",
"lib/Bytecode/Writer/*.h",
- "lib/Bytecode/*.h",
+ "include/mlir/Bytecode/*.h",
]) + [
"include/mlir/IR/PDLPatternMatch.h.inc",
- "lib/Bytecode/BytecodeOpInterface.cpp",
+ "include/mlir/Interfaces/CallInterfaces.h",
+ "include/mlir/Interfaces/DataLayoutInterfaces.h",
+ "include/mlir/Interfaces/SideEffectInterfaces.h",
],
hdrs = glob([
"include/mlir/IR/*.h",
- "include/mlir/Bytecode/*.h",
]) + [
- "include/mlir/Interfaces/CallInterfaces.h",
- "include/mlir/Interfaces/DataLayoutInterfaces.h",
"include/mlir/Interfaces/FoldInterfaces.h",
- "include/mlir/Interfaces/SideEffectInterfaces.h",
],
includes = ["include"],
deps = [
@@ -354,7 +351,6 @@ cc_library(
":BytecodeOpInterfaceIncGen",
":CallOpInterfacesIncGen",
":DataLayoutInterfacesIncGen",
- ":InferTypeOpInterfaceIncGen",
":OpAsmInterfaceIncGen",
":RegionKindInterfaceIncGen",
":SideEffectInterfacesIncGen",
@@ -1008,6 +1004,7 @@ cc_library(
":CAPIDebugHeaders",
":CAPIIRHeaders",
":MLIRBindingsPythonHeaders",
+ ":Support",
"//llvm:Support",
"@local_config_python//:python_headers",
"@pybind11",
@@ -1460,14 +1457,12 @@ cc_library(
":AffineAnalysis",
":AffineDialect",
":AffineTransformOpsIncGen",
- ":AffineTransforms",
":AffineUtils",
- ":FuncDialect",
+ ":BytecodeOpInterface",
":IR",
":TransformDialect",
":TransformDialectInterfaces",
- ":Transforms",
- ":VectorDialect",
+ ":TransformUtils",
],
)
@@ -1549,10 +1544,10 @@ cc_library(
deps = [
":AMDGPUIncGen",
":ArithDialect",
+ ":BytecodeOpInterface",
":GPUDialect",
":IR",
":SideEffectInterfaces",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -1630,7 +1625,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/AMDGPU/Transforms/*.cpp",
- "lib/Dialect/AMDGPU/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/AMDGPU/Transforms/*.h"]),
@@ -1646,11 +1640,10 @@ cc_library(
":IR",
":MemRefDialect",
":Pass",
+ ":SideEffectInterfaces",
":Support",
":TransformUtils",
- ":Transforms",
":VectorDialect",
- "//llvm:Support",
],
)
@@ -1660,7 +1653,6 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/AMDGPU/Utils/*.h"]),
includes = ["include"],
deps = [
- ":AMDGPUDialect",
":Support",
"//llvm:Support",
],
@@ -1768,7 +1760,6 @@ cc_library(
name = "TargetCpp",
srcs = glob([
"lib/Target/Cpp/*.cpp",
- "lib/Target/Cpp/*.h",
]),
hdrs = glob(["include/mlir/Target/Cpp/*.h"]),
deps = [
@@ -1924,11 +1915,10 @@ cc_library(
includes = ["include"],
deps = [
":ArmNeonIncGen",
+ ":BytecodeOpInterface",
":IR",
":SideEffectInterfaces",
":VectorDialect",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -1945,7 +1935,7 @@ cc_library(
":IR",
":LLVMDialect",
":Support",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
],
)
@@ -1967,7 +1957,6 @@ cc_library(
name = "ArmNeon2dToIntr",
srcs = glob([
"lib/Conversion/ArmNeon2dToIntr/*.cpp",
- "lib/Conversion/ArmNeon2dToIntr/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ArmNeon2dToIntr/*.h",
@@ -1976,14 +1965,9 @@ cc_library(
deps = [
":ArmNeonDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
- ":MemRefDialect",
- ":OpenACCDialect",
":Pass",
- ":SCFDialect",
- ":Support",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
],
)
@@ -2189,13 +2173,13 @@ cc_library(
":ArmSMEIntrinsicOpsIncGen",
":ArmSMEOpInterfacesIncGen",
":ArmSMEOpsIncGen",
+ ":BytecodeOpInterface",
":IR",
":LLVMDialect",
":MemRefDialect",
":SCFDialect",
":SideEffectInterfaces",
":VectorDialect",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -2206,7 +2190,6 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/ArmSME/Transforms/*.h"]),
includes = ["include"],
deps = [
- ":ArithDialect",
":ArithUtils",
":ArmSMEDialect",
":ArmSMETransformsPassIncGen",
@@ -2219,10 +2202,8 @@ cc_library(
":LLVMDialect",
":MemRefDialect",
":Pass",
- ":SCFDialect",
":SCFTransforms",
- ":Transforms",
- ":VectorDialect",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -2238,7 +2219,7 @@ cc_library(
":ConversionPassIncGen",
":Pass",
":SCFDialect",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -2257,7 +2238,7 @@ cc_library(
":LLVMDialect",
":MemRefDialect",
":Pass",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
],
)
@@ -2327,11 +2308,11 @@ cc_library(
includes = ["include"],
deps = [
":ArmSVEIncGen",
+ ":BytecodeOpInterface",
":IR",
":LLVMDialect",
":SideEffectInterfaces",
":VectorDialect",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -2441,11 +2422,10 @@ cc_library(
includes = ["include"],
deps = [
":AMXIncGen",
+ ":BytecodeOpInterface",
":IR",
":LLVMDialect",
":SideEffectInterfaces",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -2456,12 +2436,9 @@ cc_library(
includes = ["include"],
deps = [
":AMXDialect",
- ":FuncDialect",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -2534,13 +2511,12 @@ cc_library(
hdrs = ["include/mlir/Dialect/X86Vector/X86VectorDialect.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":IR",
":InferTypeOpInterface",
":LLVMDialect",
":SideEffectInterfaces",
":X86VectorIncGen",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -2551,14 +2527,12 @@ cc_library(
includes = ["include"],
deps = [
":ArithDialect",
- ":FuncDialect",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
":VectorDialect",
":VectorUtils",
":X86VectorDialect",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -2732,6 +2706,7 @@ cc_library(
],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":Dialect",
":IR",
":IRDLAttributesIncGen",
@@ -2741,6 +2716,7 @@ cc_library(
":IRDLOpsIncGen",
":IRDLTypesIncGen",
":InferTypeOpInterface",
+ ":SideEffectInterfaces",
":Support",
"//llvm:Core",
"//llvm:Support",
@@ -2839,7 +2815,6 @@ cc_library(
name = "SCFTransforms",
srcs = glob([
"lib/Dialect/SCF/Transforms/*.cpp",
- "lib/Dialect/SCF/Transforms/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/SCF/Transforms/*.h",
@@ -2869,7 +2844,7 @@ cc_library(
":TensorDialect",
":TensorTransforms",
":TilingInterface",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -2915,6 +2890,7 @@ cc_library(
":AffineUtils",
":ArithDialect",
":ArithUtils",
+ ":BytecodeOpInterface",
":DialectUtils",
":FuncDialect",
":IR",
@@ -2923,11 +2899,9 @@ cc_library(
":SCFTransformOpsIncGen",
":SCFTransforms",
":SCFUtils",
- ":SideEffectInterfaces",
":TransformDialect",
":TransformDialectInterfaces",
":VectorDialect",
- "//llvm:Support",
],
)
@@ -3128,9 +3102,11 @@ cc_library(
deps = [
":ArithDialect",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":DialectUtils",
":IR",
":InferTypeOpInterface",
+ ":SideEffectInterfaces",
":SparseTensorAttrDefsIncGen",
":SparseTensorEnums",
":SparseTensorInterfacesIncGen",
@@ -3151,14 +3127,11 @@ cc_library(
includes = ["include"],
deps = [
":IR",
- ":LinalgDialect",
":LinalgTransformOps",
":SparseTensorDialect",
":SparseTensorTransformOpsIncGen",
- ":Support",
":TransformDialect",
":TransformDialectInterfaces",
- "//llvm:Support",
],
)
@@ -3187,7 +3160,6 @@ cc_library(
name = "SparseTensorTransforms",
srcs = glob([
"lib/Dialect/SparseTensor/Transforms/*.cpp",
- "lib/Dialect/SparseTensor/Transforms/*.h",
"lib/Dialect/SparseTensor/Transforms/Utils/*.cpp",
"lib/Dialect/SparseTensor/Transforms/Utils/*.h",
]),
@@ -3225,6 +3197,7 @@ cc_library(
":SparseTensorUtils",
":Support",
":TensorDialect",
+ ":TransformUtils",
":Transforms",
":VectorDialect",
"//llvm:Support",
@@ -3246,7 +3219,6 @@ cc_library(
":BufferizationTransforms",
":ConversionPasses",
":FuncDialect",
- ":FuncTransforms",
":GPUDialect",
":GPUToNVVMTransforms",
":GPUTransforms",
@@ -3256,10 +3228,8 @@ cc_library(
":Pass",
":SparseTensorDialect",
":SparseTensorTransforms",
- ":TensorTransforms",
":Transforms",
":VectorToLLVM",
- ":VectorTransforms",
],
)
@@ -3394,10 +3364,12 @@ cc_library(
includes = ["include"],
deps = [
":ArithDialect",
+ ":BytecodeOpInterface",
":DialectUtils",
":IR",
":InferTypeOpInterface",
":MeshIncGen",
+ ":SideEffectInterfaces",
":Support",
":ViewLikeInterface",
"//llvm:Support",
@@ -3445,7 +3417,7 @@ cc_library(
":Pass",
":Support",
":TensorDialect",
- ":TransformUtils",
+ ":Transforms",
"//llvm:Support",
],
)
@@ -3544,12 +3516,12 @@ cc_library(
hdrs = ["include/mlir/Dialect/NVGPU/IR/NVGPUDialect.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":GPUDialect",
":IR",
":LLVMDialect",
":NVGPUIncGen",
":SideEffectInterfaces",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -3569,9 +3541,8 @@ cc_library(
":ArithDialect",
":ArithUtils",
":DialectUtils",
- ":GPUCommonTransforms",
- ":GPUCompilationAttrInterfacesIncGen",
":GPUDialect",
+ ":GPUToGPURuntimeTransforms",
":IR",
":LLVMCommonConversion",
":LinalgDialect",
@@ -3583,7 +3554,6 @@ cc_library(
":NVVMDialect",
":SCFDialect",
":SCFTransforms",
- ":Support",
":TransformDialect",
":TransformDialectInterfaces",
":VectorDialect",
@@ -3646,9 +3616,7 @@ cc_library(
]),
includes = ["include"],
deps = [
- ":AffineDialect",
":ArithDialect",
- ":FuncDialect",
":GPUDialect",
":IR",
":MemRefDialect",
@@ -3657,9 +3625,7 @@ cc_library(
":Pass",
":SideEffectInterfaces",
":Support",
- ":Transforms",
":VectorDialect",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -3760,12 +3726,13 @@ cc_library(
hdrs = ["include/mlir/Dialect/XeGPU/IR/XeGPU.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":DialectUtils",
":IR",
":ShapedOpInterfaces",
+ ":SideEffectInterfaces",
":ViewLikeInterface",
":XeGPUIncGen",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -3826,11 +3793,11 @@ cc_library(
name = "Dialect",
srcs = glob([
"lib/Dialect/*.cpp",
- "lib/Dialect/*.h",
- ]),
- hdrs = glob([
- "include/mlir/Dialect/*.h",
]),
+ hdrs = glob(
+ include = ["include/mlir/Dialect/*.h"],
+ exclude = ["include/mlir/Dialect/CommonFolders.h"],
+ ),
includes = ["include"],
deps = [
":IR",
@@ -3868,14 +3835,12 @@ cc_library(
name = "DialectUtils",
srcs = glob([
"lib/Dialect/Utils/*.cpp",
- "lib/Dialect/Utils/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/Utils/*.h",
]),
includes = ["include"],
deps = [
- ":ArithUtils",
":DialectUtilsIncGen",
":IR",
":Support",
@@ -3887,7 +3852,6 @@ cc_library(
name = "AffineDialect",
srcs = glob([
"lib/Dialect/Affine/IR/*.cpp",
- "lib/Dialect/Affine/IR/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/Affine/IR/*.h",
@@ -3897,15 +3861,12 @@ cc_library(
":AffineMemoryOpInterfacesIncGen",
":AffineOpsIncGen",
":ArithDialect",
- ":BufferizationInterfaces",
":ControlFlowInterfaces",
- ":DialectUtils",
":IR",
":InliningUtils",
":LoopLikeInterface",
":MemRefDialect",
":ShapedOpInterfaces",
- ":SideEffectInterfaces",
":Support",
":UBDialect",
":ValueBoundsOpInterface",
@@ -3923,7 +3884,7 @@ cc_library(
]),
includes = ["include"],
deps = [
- ":CallOpInterfaces",
+ ":BytecodeOpInterface",
":CastInterfaces",
":ControlFlowInterfaces",
":EmitCAttributesIncGen",
@@ -3965,6 +3926,8 @@ cc_library(
includes = ["include"],
deps = [
":AsyncOpsIncGen",
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":ControlFlowInterfaces",
":FunctionInterfaces",
":IR",
@@ -3998,9 +3961,6 @@ cc_library(
":SCFToControlFlow",
":Support",
":TransformUtils",
- ":Transforms",
- ":TransformsPassIncGen",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -4009,7 +3969,6 @@ cc_library(
name = "AffineAnalysis",
srcs = glob([
"lib/Dialect/Affine/Analysis/*.cpp",
- "lib/Dialect/Affine/Analysis/*.h",
]),
hdrs = glob(["include/mlir/Dialect/Affine/Analysis/*.h"]),
includes = ["include"],
@@ -4017,8 +3976,8 @@ cc_library(
":AffineDialect",
":Analysis",
":ArithDialect",
+ ":CallOpInterfaces",
":DialectUtils",
- ":FuncDialect",
":IR",
":SideEffectInterfaces",
":Support",
@@ -4032,7 +3991,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Affine/Utils/*.cpp",
- "lib/Dialect/Affine/Utils/*.h",
],
),
hdrs = [
@@ -4079,7 +4037,6 @@ cc_library(
name = "AffineTransforms",
srcs = glob([
"lib/Dialect/Affine/Transforms/*.cpp",
- "lib/Dialect/Affine/Transforms/*.h",
]),
hdrs = [
"include/mlir/Dialect/Affine/Passes.h",
@@ -4100,8 +4057,10 @@ cc_library(
":Pass",
":SCFDialect",
":SCFUtils",
+ ":SideEffectInterfaces",
":Support",
":TensorDialect",
+ ":TransformUtils",
":Transforms",
":ValueBoundsOpInterface",
":VectorDialect",
@@ -4181,11 +4140,11 @@ cc_library(
":MathToLLVM",
":MathToLibm",
":MathToSPIRV",
+ ":MemRefToEmitC",
":MemRefToLLVM",
":MemRefToSPIRV",
":NVGPUToNVVM",
":NVVMToLLVM",
- ":OpenACCToLLVM",
":OpenACCToSCF",
":OpenMPToLLVM",
":PDLToPDLInterp",
@@ -4218,7 +4177,6 @@ cc_library(
name = "AsyncToLLVM",
srcs = glob([
"lib/Conversion/AsyncToLLVM/*.cpp",
- "lib/Conversion/AsyncToLLVM/*.h",
]),
hdrs = glob(["include/mlir/Conversion/AsyncToLLVM/*.h"]),
includes = ["include"],
@@ -4234,8 +4192,7 @@ cc_library(
":LLVMCommonConversion",
":LLVMDialect",
":Pass",
- ":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -4244,7 +4201,6 @@ cc_library(
name = "AffineToStandard",
srcs = glob([
"lib/Conversion/AffineToStandard/*.cpp",
- "lib/Conversion/AffineToStandard/*.h",
]),
hdrs = glob(["include/mlir/Conversion/AffineToStandard/*.h"]),
includes = ["include"],
@@ -4252,45 +4208,22 @@ cc_library(
":AffineDialect",
":AffineTransforms",
":AffineUtils",
- ":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":MemRefDialect",
- ":Pass",
":SCFDialect",
":Support",
+ ":TransformUtils",
":Transforms",
":VectorDialect",
],
)
-# SDBM dialect only contains attribute components that can be constructed given
-# a dialect object, so whenever it is used it must also be registered. Therefore
-# we don't split out the registration library for it.
-cc_library(
- name = "SDBM",
- srcs = glob([
- "lib/Dialect/SDBM/*.cpp",
- "lib/Dialect/SDBM/*.h",
- ]),
- hdrs = glob([
- "include/mlir/Dialect/SDBM/*.h",
- ]),
- includes = ["include"],
- deps = [
- ":IR",
- ":Support",
- "//llvm:Support",
- ],
-)
-
cc_library(
name = "SCFDialect",
srcs = glob(
[
"lib/Dialect/SCF/IR/*.cpp",
- "lib/Dialect/SCF/IR/*.h",
],
),
hdrs = glob(
@@ -4306,7 +4239,6 @@ cc_library(
":ControlFlowDialect",
":ControlFlowInterfaces",
":DestinationStyleOpInterface",
- ":FuncDialect",
":FunctionInterfaces",
":IR",
":InferTypeOpInterface",
@@ -4314,10 +4246,9 @@ cc_library(
":LoopLikeInterface",
":MemRefDialect",
":ParallelCombiningOpInterface",
- ":Pass",
":SCFDeviceMappingInterfacesIncGen",
":SCFIncGen",
- ":SCFPassIncGen",
+ ":SideEffectInterfaces",
":Support",
":TensorDialect",
":ValueBoundsOpInterface",
@@ -4350,7 +4281,7 @@ cc_library(
":SCFDialect",
":SideEffectInterfaces",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -4363,7 +4294,6 @@ cc_library(
hdrs = ["include/mlir/Interfaces/Utils/InferIntRangeCommon.h"],
includes = ["include"],
deps = [
- ":IR",
":InferIntRangeInterface",
"//llvm:Support",
],
@@ -4414,7 +4344,6 @@ cc_library(
deps = [
":IR",
":MemorySlotInterfacesIncGen",
- "//llvm:Support",
],
)
@@ -4426,7 +4355,6 @@ cc_library(
deps = [
":IR",
":ShapedOpInterfacesIncGen",
- "//llvm:Support",
],
)
@@ -4438,7 +4366,6 @@ cc_library(
deps = [
":IR",
":ParallelCombiningOpInterfaceIncGen",
- "//llvm:Support",
],
)
@@ -4450,7 +4377,6 @@ cc_library(
deps = [
":IR",
":RuntimeVerifiableOpInterfaceIncGen",
- "//llvm:Support",
],
)
@@ -4564,10 +4490,12 @@ cc_library(
deps = [
":ArithDialect",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":CastInterfaces",
+ ":CommonFolders",
":ControlFlowInterfaces",
":Dialect",
- ":FuncDialect",
":FunctionInterfaces",
":IR",
":InferTypeOpInterface",
@@ -4599,7 +4527,6 @@ cc_library(
name = "ShapeToStandard",
srcs = glob([
"lib/Conversion/ShapeToStandard/*.cpp",
- "lib/Conversion/ShapeToStandard/*.h",
]),
hdrs = ["include/mlir/Conversion/ShapeToStandard/ShapeToStandard.h"],
includes = ["include"],
@@ -4609,14 +4536,12 @@ cc_library(
":ConversionPassIncGen",
":FuncDialect",
":IR",
- ":MemRefDialect",
":Pass",
":SCFDialect",
":ShapeDialect",
":ShapeToStandardGen",
- ":Support",
":TensorDialect",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -4639,7 +4564,6 @@ cc_library(
name = "ShapeTransforms",
srcs = glob([
"lib/Dialect/Shape/Transforms/*.cpp",
- "lib/Dialect/Shape/Transforms/*.h",
]),
hdrs = [
"include/mlir/Dialect/Shape/Analysis/ShapeMappingAnalysis.h",
@@ -4659,7 +4583,7 @@ cc_library(
":ShapeDialect",
":ShapeTransformsPassIncGen",
":TensorDialect",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -4722,7 +4646,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/ControlFlow/IR/*.cpp",
- "lib/Dialect/ControlFlow/IR/*.h",
],
),
hdrs = glob([
@@ -4732,7 +4655,7 @@ cc_library(
deps = [
":ArithDialect",
":BufferizationInterfaces",
- ":CommonFolders",
+ ":BytecodeOpInterface",
":ControlFlowInterfaces",
":ControlFlowOpsIncGen",
":ConvertToLLVMInterface",
@@ -4768,22 +4691,16 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Func/IR/*.cpp",
- "lib/Dialect/Func/IR/*.h",
- "lib/Dialect/Func/Utils/*.cpp",
],
),
hdrs = glob([
"include/mlir/Dialect/Func/IR/*.h",
- "include/mlir/Dialect/Func/Utils/*.h",
]),
includes = ["include"],
deps = [
- ":ArithDialect",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":CallOpInterfaces",
- ":CastInterfaces",
- ":CommonFolders",
- ":ControlFlowDialect",
":ControlFlowInterfaces",
":ConvertToLLVMInterface",
":FuncIncGen",
@@ -4806,7 +4723,6 @@ cc_library(
":ControlFlowDialect",
":FuncDialect",
":IR",
- ":InferTypeOpInterface",
":InliningUtils",
":MeshShardingInterface",
],
@@ -4853,6 +4769,7 @@ cc_library(
],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":FuncDialect",
":FuncToLLVM",
":FuncTransformOpsIncGen",
@@ -4925,7 +4842,6 @@ cc_library(
name = "FuncTransforms",
srcs = glob([
"lib/Dialect/Func/Transforms/*.cpp",
- "lib/Dialect/Func/Transforms/*.h",
]),
hdrs = glob(["include/mlir/Dialect/Func/Transforms/*.h"]),
includes = ["include"],
@@ -4937,9 +4853,8 @@ cc_library(
":IR",
":MemRefDialect",
":Pass",
- ":SCFDialect",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -4957,11 +4872,12 @@ cc_library(
includes = ["include"],
deps = [
":AffineDialect",
+ ":Analysis",
":ArithDialect",
":ArithUtils",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":ControlFlowInterfaces",
- ":DataLayoutInterfaces",
":DestinationStyleOpInterface",
":DialectUtils",
":IR",
@@ -4994,24 +4910,18 @@ cc_library(
],
includes = ["include"],
deps = [
- ":AffineDialect",
- ":ArithDialect",
- ":AsmParser",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
- ":SideEffectInterfaces",
":TransformDialect",
":TransformDialectInterfaces",
":TransformUtils",
":VectorDialect",
- ":VectorEnumsIncGen",
":VectorToLLVM",
":VectorToSCF",
":VectorTransformOpsIncGen",
":VectorTransforms",
":X86VectorTransforms",
- "//llvm:Support",
],
)
@@ -5062,7 +4972,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Vector/Transforms/*.cpp",
- "lib/Dialect/Vector/Transforms/*.h",
],
),
hdrs = glob([
@@ -5089,7 +4998,7 @@ cc_library(
":SubsetOpInterface",
":Support",
":TensorDialect",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
":VectorEnumsIncGen",
":VectorInterfaces",
@@ -5129,25 +5038,18 @@ cc_library(
name = "Support",
srcs = glob([
"lib/Support/*.cpp",
- "lib/Support/*.h",
]),
hdrs = glob(["include/mlir/Support/*.h"]),
includes = ["include"],
- deps = [
- "//llvm:Support",
- "//llvm:TargetParser",
- ],
+ deps = ["//llvm:Support"],
)
cc_library(
name = "Debug",
srcs = glob([
"lib/Debug/*.cpp",
- "lib/Debug/*.h",
"lib/Debug/BreakpointManagers/*.cpp",
- "lib/Debug/BreakpointManagers/*.h",
"lib/Debug/Observers/*.cpp",
- "lib/Debug/Observers/*.h",
]),
hdrs = glob([
"include/mlir/Debug/*.h",
@@ -5179,7 +5081,6 @@ cc_library(
deps = [
":Support",
"//llvm:Support",
- "//llvm:TargetParser",
],
)
@@ -5189,8 +5090,6 @@ cc_library(
[
"lib/Tools/mlir-lsp-server/*.cpp",
"lib/Tools/mlir-lsp-server/*.h",
- "lib/Tools/mlir-lsp-server/lsp/*.cpp",
- "lib/Tools/mlir-lsp-server/lsp/*.h",
],
),
hdrs = glob(
@@ -5256,7 +5155,6 @@ cc_library(
":IR",
":Support",
"//llvm:Support",
- "//llvm:TargetParser",
],
)
@@ -5264,16 +5162,12 @@ cc_library(
name = "BytecodeReader",
srcs = glob([
"lib/Bytecode/Reader/*.cpp",
- "lib/Bytecode/Reader/*.h",
- "lib/Bytecode/*.h",
- ]),
- hdrs = glob([
- "include/mlir/Bytecode/*.h",
]),
+ hdrs = ["include/mlir/Bytecode/BytecodeReader.h"],
includes = ["include"],
deps = [
":AsmParser",
- ":BytecodeOpInterfaceIncGen",
+ ":BytecodeOpInterface",
":IR",
":Support",
"//llvm:Support",
@@ -5285,14 +5179,11 @@ cc_library(
srcs = glob([
"lib/Bytecode/Writer/*.cpp",
"lib/Bytecode/Writer/*.h",
- "lib/Bytecode/*.h",
- ]),
- hdrs = glob([
- "include/mlir/Bytecode/*.h",
]),
+ hdrs = ["include/mlir/Bytecode/BytecodeWriter.h"],
includes = ["include"],
deps = [
- ":BytecodeOpInterfaceIncGen",
+ ":BytecodeOpInterface",
":IR",
":Support",
"//llvm:Support",
@@ -5303,7 +5194,6 @@ cc_library(
name = "Parser",
srcs = glob([
"lib/Parser/*.cpp",
- "lib/Parser/*.h",
]),
hdrs = glob([
"include/mlir/Parser/*.h",
@@ -5313,7 +5203,6 @@ cc_library(
":AsmParser",
":BytecodeReader",
":IR",
- ":Support",
"//llvm:Support",
],
)
@@ -5385,11 +5274,10 @@ cc_library(
"include/mlir/Dialect/LLVMIR/VCIX*.h",
"include/mlir/Dialect/LLVMIR/*X86Vector*.h",
],
- ) + [
- "include/mlir/Transforms/Mem2Reg.h",
- ],
+ ),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":CallOpInterfaces",
":ControlFlowInterfaces",
":DataLayoutInterfaces",
@@ -5434,19 +5322,17 @@ cc_library(
name = "LLVMIRTransforms",
srcs = glob([
"lib/Dialect/LLVMIR/Transforms/*.cpp",
- "lib/Dialect/LLVMIR/Transforms/*.h",
]),
hdrs = glob(["include/mlir/Dialect/LLVMIR/Transforms/*.h"]),
includes = ["include"],
deps = [
":FuncDialect",
- ":GPUDialect",
":IR",
":LLVMDialect",
":LLVMPassIncGen",
":NVVMDialect",
":Pass",
- ":Transforms",
+ ":TransformUtils",
"//llvm:BinaryFormat",
"//llvm:Support",
],
@@ -5587,7 +5473,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/GPU/IR/*.cpp",
- "lib/Dialect/GPU/IR/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/GPU/IR/*.h"]),
@@ -5595,6 +5480,7 @@ cc_library(
deps = [
":ArithDialect",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":ControlFlowInterfaces",
":DLTIDialect",
":FunctionInterfaces",
@@ -5605,7 +5491,6 @@ cc_library(
":InferIntRangeInterface",
":InferTypeOpInterface",
":InliningUtils",
- ":LLVMDialect",
":MemRefDialect",
":SCFDialect",
":SideEffectInterfaces",
@@ -5654,8 +5539,8 @@ cc_library(
":ArithToLLVM",
":FuncDialect",
":FuncToLLVM",
- ":GPUCommonTransforms",
":GPUDialect",
+ ":GPUToGPURuntimeTransforms",
":GPUToNVVMTransforms",
":GPUTransforms",
":IndexToLLVM",
@@ -5681,7 +5566,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/GPU/Transforms/*.cpp",
- "lib/Dialect/GPU/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/GPU/Transforms/*.h"]),
@@ -5719,6 +5603,7 @@ cc_library(
":SideEffectInterfaces",
":Support",
":ToLLVMIRTranslation",
+ ":TransformUtils",
":Transforms",
":VCIXToLLVMIRTranslation",
":VectorDialect",
@@ -5775,12 +5660,10 @@ cc_library(
deps = [
":AffineDialect",
":ArithDialect",
- ":AsmParser",
- ":ControlFlowDialect",
":DialectUtils",
":FuncDialect",
- ":GPUCommonTransforms",
":GPUDialect",
+ ":GPUToGPURuntimeTransforms",
":GPUToNVVMTransforms",
":GPUTransformOpsIncGen",
":GPUTransforms",
@@ -5788,9 +5671,7 @@ cc_library(
":LLVMCommonConversion",
":MemRefDialect",
":NVVMDialect",
- ":Parser",
":SCFDialect",
- ":SideEffectInterfaces",
":Support",
":TransformDialect",
":TransformDialectInterfaces",
@@ -5828,25 +5709,16 @@ td_library(
cc_library(
name = "GPUCommonTransforms",
- srcs = [
- "lib/Conversion/GPUCommon/GPUOpsLowering.cpp",
- ],
hdrs = [
- "include/mlir/Conversion/GPUCommon/GPUCommonPass.h",
"lib/Conversion/GPUCommon/GPUOpsLowering.h",
"lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h",
"lib/Conversion/GPUCommon/OpToFuncCallLowering.h",
],
deps = [
- ":ConversionPassIncGen",
- ":FuncDialect",
":GPUDialect",
- ":GPUTransforms",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
- ":Support",
- "//llvm:Support",
],
)
@@ -5871,14 +5743,12 @@ cc_library(
name = "GPUToNVVMTransforms",
srcs = glob([
"lib/Conversion/GPUToNVVM/*.cpp",
- "lib/Conversion/GPUToNVVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/GPUToNVVM/*.h",
]),
includes = ["include"],
deps = [
- ":ArithDialect",
":ArithToLLVM",
":ControlFlowDialect",
":ControlFlowToLLVM",
@@ -5887,6 +5757,7 @@ cc_library(
":FuncToLLVM",
":GPUCommonTransforms",
":GPUDialect",
+ ":GPUToGPURuntimeTransforms",
":GPUToNVVMGen",
":GPUTransforms",
":IR",
@@ -5896,10 +5767,8 @@ cc_library(
":MemRefDialect",
":MemRefToLLVM",
":NVVMDialect",
- ":Pass",
- ":Transforms",
+ ":TransformUtils",
":VectorToLLVM",
- "//llvm:Support",
],
)
@@ -5907,7 +5776,6 @@ cc_library(
name = "AMDGPUToROCDL",
srcs = glob([
"lib/Conversion/AMDGPUToROCDL/*.cpp",
- "lib/Conversion/AMDGPUToROCDL/*.h",
]) + ["include/mlir/Conversion/GPUToROCDL/Runtimes.h"],
hdrs = glob([
"include/mlir/Conversion/AMDGPUToROCDL/*.h",
@@ -5922,8 +5790,6 @@ cc_library(
":LLVMDialect",
":Pass",
":ROCDLDialect",
- ":Support",
- ":Transforms",
"//llvm:Support",
],
)
@@ -5932,7 +5798,6 @@ cc_library(
name = "NVGPUToNVVM",
srcs = glob([
"lib/Conversion/NVGPUToNVVM/*.cpp",
- "lib/Conversion/NVGPUToNVVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/NVGPUToNVVM/*.h",
@@ -5941,7 +5806,6 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":GPUCommonTransforms",
":GPUDialect",
":GPUToGPURuntimeTransforms",
":IR",
@@ -5960,7 +5824,6 @@ cc_library(
name = "VectorToSPIRV",
srcs = glob([
"lib/Conversion/VectorToSPIRV/*.cpp",
- "lib/Conversion/VectorToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/VectorToSPIRV/*.h",
@@ -5974,7 +5837,7 @@ cc_library(
":SPIRVConversion",
":SPIRVDialect",
":Support",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
"//llvm:Support",
],
@@ -6016,6 +5879,7 @@ cc_library(
":FuncToLLVM",
":GPUCommonTransforms",
":GPUDialect",
+ ":GPUToGPURuntimeTransforms",
":GPUToROCDLTGen",
":GPUTransforms",
":IR",
@@ -6026,10 +5890,10 @@ cc_library(
":MemRefToLLVM",
":Pass",
":ROCDLDialect",
+ ":TransformUtils",
":Transforms",
":VectorDialect",
":VectorToLLVM",
- ":VectorToSCF",
"//llvm:Support",
],
)
@@ -6059,6 +5923,7 @@ cc_library(
cc_library(
name = "GPUToGPURuntimeTransforms",
srcs = [
+ "lib/Conversion/GPUCommon/GPUOpsLowering.cpp",
"lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp",
],
hdrs = ["include/mlir/Conversion/GPUCommon/GPUCommonPass.h"],
@@ -6072,6 +5937,7 @@ cc_library(
":ConvertToLLVM",
":ConvertToLLVMInterface",
":FuncToLLVM",
+ ":GPUCommonTransforms",
":GPUDialect",
":GPUTransforms",
":IR",
@@ -6089,7 +5955,6 @@ cc_library(
name = "GPUToSPIRV",
srcs = glob([
"lib/Conversion/GPUToSPIRV/*.cpp",
- "lib/Conversion/GPUToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/GPUToSPIRV/*.h",
@@ -6106,14 +5971,11 @@ cc_library(
":GPUDialect",
":IR",
":MemRefToSPIRV",
- ":Pass",
- ":SCFDialect",
":SCFToSPIRV",
":SPIRVConversion",
":SPIRVDialect",
":Support",
- ":Transforms",
- ":VectorToSPIRV",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -6164,7 +6026,7 @@ cc_library(
":SPIRVDialect",
":SPIRVUtils",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -6309,6 +6171,7 @@ cc_library(
includes = ["include"],
deps = [
":BasicPtxBuilderInterface",
+ ":BytecodeOpInterface",
":ConvertToLLVMInterface",
":DialectUtils",
":GPUDialect",
@@ -6533,13 +6396,10 @@ cc_library(
":ConversionPassIncGen",
":ConvertToLLVMInterface",
":FuncDialect",
- ":GPUDialect",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
- ":MemRefDialect",
":NVVMDialect",
- ":NVVMOpsIncGen",
":Pass",
":Support",
"//llvm:Support",
@@ -6552,6 +6412,7 @@ cc_library(
hdrs = ["include/mlir/Dialect/LLVMIR/ROCDLDialect.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":GPUDialect",
":IR",
":LLVMDialect",
@@ -6659,13 +6520,13 @@ cc_library(
name = "PDLDialect",
srcs = glob([
"lib/Dialect/PDL/IR/*.cpp",
- "lib/Dialect/PDL/IR/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/PDL/IR/*.h",
]),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":IR",
":InferTypeOpInterface",
":PDLOpsIncGen",
@@ -6734,20 +6595,19 @@ cc_library(
name = "PDLInterpDialect",
srcs = glob([
"lib/Dialect/PDLInterp/IR/*.cpp",
- "lib/Dialect/PDLInterp/IR/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/PDLInterp/IR/*.h",
]),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":FunctionInterfaces",
":IR",
":InferTypeOpInterface",
":PDLDialect",
":PDLInterpOpsIncGen",
":SideEffectInterfaces",
- "//llvm:Support",
],
)
@@ -6947,6 +6807,8 @@ cc_library(
]),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":CommonFolders",
":ControlFlowInterfaces",
":FunctionInterfaces",
@@ -6960,10 +6822,8 @@ cc_library(
":SPIRVAvailabilityIncGen",
":SPIRVCanonicalizationIncGen",
":SPIRVOpsIncGen",
- ":SPIRVSerializationGen",
":SideEffectInterfaces",
":Support",
- ":Transforms",
":UBDialect",
"//llvm:Support",
],
@@ -7006,10 +6866,7 @@ cc_library(
"include/mlir/Dialect/SPIRV/Utils/*.h",
]),
includes = ["include"],
- deps = [
- ":SPIRVDialect",
- "//llvm:Support",
- ],
+ deps = [":SPIRVDialect"],
)
cc_library(
@@ -7031,7 +6888,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/SPIRV/Transforms/*.cpp",
- "lib/Dialect/SPIRV/Transforms/*.h",
],
exclude = ["lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp"],
),
@@ -7049,7 +6905,7 @@ cc_library(
":SPIRVPassIncGen",
":SPIRVUtils",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7061,7 +6917,7 @@ cc_library(
deps = [
":IR",
":SPIRVDialect",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7070,7 +6926,6 @@ cc_library(
name = "MathToSPIRV",
srcs = glob([
"lib/Conversion/MathToSPIRV/*.cpp",
- "lib/Conversion/MathToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/MathToSPIRV/*.h",
@@ -7087,8 +6942,7 @@ cc_library(
":SPIRVCommonConversion",
":SPIRVConversion",
":SPIRVDialect",
- ":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7097,7 +6951,6 @@ cc_library(
name = "FuncToEmitC",
srcs = glob([
"lib/Conversion/FuncToEmitC/*.cpp",
- "lib/Conversion/FuncToEmitC/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/FuncToEmitC/*.h",
@@ -7110,12 +6963,8 @@ cc_library(
":ConversionPassIncGen",
":EmitCDialect",
":FuncDialect",
- ":IR",
":Pass",
- ":Support",
":TransformUtils",
- ":Transforms",
- "//llvm:Support",
],
)
@@ -7123,7 +6972,6 @@ cc_library(
name = "FuncToSPIRV",
srcs = glob([
"lib/Conversion/FuncToSPIRV/*.cpp",
- "lib/Conversion/FuncToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/FuncToSPIRV/*.h",
@@ -7133,20 +6981,16 @@ cc_library(
"lib/Conversion/FuncToSPIRV",
],
deps = [
- ":ControlFlowToSPIRV",
":ConversionPassIncGen",
":FuncDialect",
":IR",
- ":MathToSPIRV",
":Pass",
":SPIRVCommonConversion",
":SPIRVConversion",
":SPIRVDialect",
":SPIRVUtils",
":Support",
- ":TensorDialect",
- ":Transforms",
- ":VectorDialect",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7155,7 +6999,6 @@ cc_library(
name = "TensorToLinalg",
srcs = glob([
"lib/Conversion/TensorToLinalg/*.cpp",
- "lib/Conversion/TensorToLinalg/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TensorToLinalg/*.h",
@@ -7167,15 +7010,13 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":LinalgDialect",
":LinalgTransforms",
":Pass",
":Support",
":TensorDialect",
- ":Transforms",
- ":VectorDialect",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7184,7 +7025,6 @@ cc_library(
name = "TensorToSPIRV",
srcs = glob([
"lib/Conversion/TensorToSPIRV/*.cpp",
- "lib/Conversion/TensorToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TensorToSPIRV/*.h",
@@ -7195,12 +7035,9 @@ cc_library(
],
deps = [
":ArithToSPIRV",
- ":ControlFlowToSPIRV",
":ConversionPassIncGen",
- ":FuncDialect",
":FuncToSPIRV",
":IR",
- ":MathToSPIRV",
":Pass",
":SPIRVCommonConversion",
":SPIRVConversion",
@@ -7208,8 +7045,7 @@ cc_library(
":SPIRVUtils",
":Support",
":TensorDialect",
- ":Transforms",
- ":VectorDialect",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -7220,11 +7056,8 @@ cc_library(
hdrs = ["include/mlir/Target/SPIRV/SPIRVBinaryUtils.h"],
includes = ["include"],
deps = [
- ":SPIRVAttrUtilsGen",
":SPIRVDialect",
- ":SPIRVOpsIncGen",
":Support",
- "//llvm:Support",
],
)
@@ -7240,13 +7073,10 @@ cc_library(
includes = ["include"],
deps = [
":IR",
- ":SPIRVAttrUtilsGen",
":SPIRVBinaryUtils",
":SPIRVDialect",
- ":SPIRVOpsIncGen",
":SPIRVSerializationGen",
":Support",
- ":Transforms",
"//llvm:Support",
],
)
@@ -7261,13 +7091,10 @@ cc_library(
includes = ["include"],
deps = [
":IR",
- ":SPIRVAttrUtilsGen",
":SPIRVBinaryUtils",
":SPIRVDialect",
- ":SPIRVOpsIncGen",
":SPIRVSerializationGen",
":Support",
- ":Transforms",
"//llvm:Support",
],
)
@@ -7372,6 +7199,7 @@ cc_library(
":ArithDialect",
":ArithUtils",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":CastInterfaces",
":ComplexDialect",
":ControlFlowInterfaces",
@@ -7407,7 +7235,6 @@ cc_library(
":IR",
":InferTypeOpInterface",
":TensorDialect",
- "//llvm:Support",
],
)
@@ -7429,7 +7256,6 @@ cc_library(
":TensorUtils",
":TilingInterface",
":ValueBoundsOpInterface",
- "//llvm:Support",
],
)
@@ -7445,7 +7271,6 @@ cc_library(
":DialectUtils",
":TensorDialect",
":ValueBoundsOpInterface",
- "//llvm:Support",
],
)
@@ -7470,7 +7295,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Tensor/Transforms/*.cpp",
- "lib/Dialect/Tensor/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/Tensor/Transforms/*.h"]),
@@ -7496,7 +7320,7 @@ cc_library(
":TensorPassIncGen",
":TensorUtils",
":TilingInterface",
- ":Transforms",
+ ":TransformUtils",
":ValueBoundsOpInterface",
":VectorDialect",
"//llvm:Support",
@@ -7575,34 +7399,37 @@ cc_library(
cc_library(
name = "TransformUtils",
srcs = glob(
- include = [
- "lib/Transforms/Utils/*.cpp",
- "lib/Transforms/Utils/*.h",
- ],
+ include = ["lib/Transforms/Utils/*.cpp"],
exclude = ["lib/Transforms/Utils/InliningUtils.cpp"],
- ),
- hdrs = glob(
- include = ["include/mlir/Transforms/*.h"],
- exclude = [
- "include/mlir/Transforms/InliningUtils.h",
- "include/mlir/Transforms/Passes.h",
- ],
- ),
+ ) + [
+ ],
+ hdrs = [
+ "include/mlir/Transforms/CFGToSCF.h",
+ "include/mlir/Transforms/CommutativityUtils.h",
+ "include/mlir/Transforms/ControlFlowSinkUtils.h",
+ "include/mlir/Transforms/DialectConversion.h",
+ "include/mlir/Transforms/FoldUtils.h",
+ "include/mlir/Transforms/GreedyPatternRewriteDriver.h",
+ "include/mlir/Transforms/Inliner.h",
+ "include/mlir/Transforms/LoopInvariantCodeMotionUtils.h",
+ "include/mlir/Transforms/OneToNTypeConversion.h",
+ "include/mlir/Transforms/RegionUtils.h",
+ "include/mlir/Transforms/TopologicalSortUtils.h",
+ ],
includes = ["include"],
deps = [
":Analysis",
+ ":CallOpInterfaces",
":ControlFlowInterfaces",
":FunctionInterfaces",
":IR",
":InliningUtils",
":LoopLikeInterface",
- ":MemorySlotInterfaces",
":Pass",
":Rewrite",
":SideEffectInterfaces",
":SubsetOpInterface",
":Support",
- ":TransformsPassIncGen",
":config",
"//llvm:Support",
],
@@ -7633,7 +7460,6 @@ cc_library(
deps = [
":DerivedAttributeOpInterfaceIncGen",
":IR",
- "//llvm:Support",
],
)
@@ -7691,7 +7517,6 @@ cc_library(
deps = [
":IR",
":InferIntRangeInterfaceIncGen",
- "//llvm:Support",
],
)
@@ -7915,25 +7740,32 @@ cc_library(
name = "Transforms",
srcs = glob([
"lib/Transforms/*.cpp",
- "lib/Transforms/*.h",
]),
- hdrs = glob(["include/mlir/Transforms/*.h"]),
+ hdrs = [
+ "include/mlir/Transforms/CSE.h",
+ "include/mlir/Transforms/EndomorphismSimplification.h",
+ "include/mlir/Transforms/HomomorphismSimplification.h",
+ "include/mlir/Transforms/LocationSnapshot.h",
+ "include/mlir/Transforms/Mem2Reg.h",
+ "include/mlir/Transforms/Passes.h",
+ "include/mlir/Transforms/SROA.h",
+ "include/mlir/Transforms/ViewOpGraph.h",
+ ],
includes = ["include"],
deps = [
":Analysis",
+ ":CallOpInterfaces",
":ControlFlowInterfaces",
":FunctionInterfaces",
":IR",
- ":InliningUtils",
":LoopLikeInterface",
":MemorySlotInterfaces",
":Pass",
- ":Rewrite",
":RuntimeVerifiableOpInterface",
+ ":SideEffectInterfaces",
":Support",
":TransformUtils",
":TransformsPassIncGen",
- ":config",
"//llvm:Support",
],
)
@@ -7961,7 +7793,6 @@ cc_library(
":ArithDialect",
":ComplexDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":FunctionInterfaces",
":GPUDialect",
":GPUTransforms",
@@ -7984,16 +7815,12 @@ cc_library(
includes = ["include"],
deps = [
":ArithDialect",
- ":ControlFlowInterfaces",
":ConversionPassIncGen",
":EmitCDialect",
":IR",
- ":Pass",
":SCFDialect",
- ":Support",
":TransformUtils",
":Transforms",
- "//llvm:Support",
],
)
@@ -8001,17 +7828,14 @@ cc_library(
name = "SCFToSPIRV",
srcs = glob([
"lib/Conversion/SCFToSPIRV/*.cpp",
- "lib/Conversion/SCFToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/SCFToSPIRV/*.h",
]),
includes = ["include"],
deps = [
- ":AffineDialect",
":ArithToSPIRV",
":ConversionPassIncGen",
- ":FuncDialect",
":FuncToSPIRV",
":IR",
":IndexToSPIRV",
@@ -8020,9 +7844,7 @@ cc_library(
":SCFDialect",
":SPIRVConversion",
":SPIRVDialect",
- ":Support",
":TransformUtils",
- ":Transforms",
"//llvm:Support",
],
)
@@ -8039,15 +7861,13 @@ cc_library(
":Analysis",
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":LLVMDialect",
":MemRefDialect",
":OpenMPDialect",
":Pass",
":SCFDialect",
- ":Support",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -8062,12 +7882,8 @@ cc_library(
":ArithDialect",
":ControlFlowDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
- ":LLVMDialect",
- ":Pass",
":SCFDialect",
- ":Support",
":TransformUtils",
":Transforms",
],
@@ -8081,9 +7897,11 @@ cc_library(
hdrs = glob(["include/mlir/Conversion/LLVMCommon/*.h"]),
includes = ["include"],
deps = [
+ ":DataLayoutInterfaces",
":IR",
":LLVMDialect",
":Support",
+ ":TransformUtils",
":Transforms",
"//llvm:Core",
"//llvm:Support",
@@ -8128,9 +7946,7 @@ cc_library(
":LLVMDialect",
":Pass",
":Rewrite",
- ":Support",
":TransformUtils",
- "//llvm:Support",
],
)
@@ -8187,10 +8003,8 @@ cc_library(
":LLVMDialect",
":Pass",
":SCFDialect",
- ":Transforms",
+ ":TransformUtils",
":UBDialect",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -8204,24 +8018,14 @@ cc_library(
],
includes = ["include"],
deps = [
- ":Analysis",
- ":ArithToLLVM",
":ControlFlowDialect",
":ConversionPassIncGen",
":ConvertToLLVMInterface",
- ":DataLayoutInterfaces",
- ":DialectUtils",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
- ":MathDialect",
- ":MemRefDialect",
- ":Parser",
":Pass",
- ":Support",
":TransformUtils",
- ":Transforms",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -8241,12 +8045,34 @@ cc_library(
":SPIRVDialect",
":SPIRVUtils",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
cc_library(
+ name = "MemRefToEmitC",
+ srcs = glob([
+ "lib/Conversion/MemRefToEmitC/*.cpp",
+ ]),
+ hdrs = glob([
+ "include/mlir/Conversion/MemRefToEmitC/*.h",
+ ]),
+ includes = [
+ "include",
+ "lib/Conversion/MemRefToEmitC",
+ ],
+ deps = [
+ ":ConversionPassIncGen",
+ ":EmitCDialect",
+ ":IR",
+ ":MemRefDialect",
+ ":Pass",
+ ":TransformUtils",
+ ],
+)
+
+cc_library(
name = "MemRefToLLVM",
srcs = glob(["lib/Conversion/MemRefToLLVM/*.cpp"]),
hdrs = glob(["include/mlir/Conversion/MemRefToLLVM/*.h"]),
@@ -8256,7 +8082,6 @@ cc_library(
":ArithDialect",
":ConversionPassIncGen",
":ConvertToLLVMInterface",
- ":DataLayoutInterfaces",
":FuncDialect",
":IR",
":LLVMCommonConversion",
@@ -8265,7 +8090,6 @@ cc_library(
":MemRefUtils",
":Pass",
":Support",
- ":Transforms",
"//llvm:Support",
],
)
@@ -8274,7 +8098,6 @@ cc_library(
name = "MemRefToSPIRV",
srcs = glob([
"lib/Conversion/MemRefToSPIRV/*.cpp",
- "lib/Conversion/MemRefToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/MemRefToSPIRV/*.h",
@@ -8286,7 +8109,6 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":FunctionInterfaces",
":IR",
":MemRefDialect",
@@ -8294,7 +8116,7 @@ cc_library(
":SPIRVConversion",
":SPIRVDialect",
":Support",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -8320,12 +8142,10 @@ cc_library(
":ArithDialect",
":ArithUtils",
":ConversionPassIncGen",
- ":ConvertToLLVMInterface",
":IR",
- ":LLVMDialect",
":Pass",
":Support",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
],
)
@@ -8334,7 +8154,6 @@ cc_library(
name = "ArithToArmSME",
srcs = glob([
"lib/Conversion/ArithToArmSME/*.cpp",
- "lib/Conversion/ArithToArmSME/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ArithToArmSME/*.h",
@@ -8344,10 +8163,8 @@ cc_library(
":ArithDialect",
":ArmSMEDialect",
":ConversionPassIncGen",
- ":IR",
":Pass",
- ":Transforms",
- "//llvm:Support",
+ ":TransformUtils",
],
)
@@ -8355,7 +8172,6 @@ cc_library(
name = "ArithToEmitC",
srcs = glob([
"lib/Conversion/ArithToEmitC/*.cpp",
- "lib/Conversion/ArithToEmitC/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ArithToEmitC/*.h",
@@ -8368,12 +8184,8 @@ cc_library(
":ArithDialect",
":ConversionPassIncGen",
":EmitCDialect",
- ":IR",
":Pass",
- ":Support",
":TransformUtils",
- ":Transforms",
- "//llvm:Support",
],
)
@@ -8383,7 +8195,6 @@ cc_library(
hdrs = glob(["include/mlir/Conversion/ArithToLLVM/*.h"]),
includes = ["include"],
deps = [
- ":Analysis",
":ArithAttrToLLVMConversion",
":ArithDialect",
":ConversionPassIncGen",
@@ -8392,8 +8203,6 @@ cc_library(
":LLVMCommonConversion",
":LLVMDialect",
":Pass",
- ":Support",
- ":Transforms",
],
)
@@ -8405,14 +8214,11 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":FuncToSPIRV",
":IR",
":Pass",
":SPIRVCommonConversion",
":SPIRVConversion",
":SPIRVDialect",
- ":Support",
- ":Transforms",
"//llvm:Support",
],
)
@@ -8423,18 +8229,14 @@ cc_library(
hdrs = glob(["include/mlir/Conversion/MathToLLVM/*.h"]),
includes = ["include"],
deps = [
- ":Analysis",
":ArithAttrToLLVMConversion",
":ConversionPassIncGen",
":ConvertToLLVMInterface",
- ":DataLayoutInterfaces",
":IR",
":LLVMCommonConversion",
":LLVMDialect",
":MathDialect",
":Pass",
- ":Support",
- ":Transforms",
],
)
@@ -8454,7 +8256,7 @@ cc_library(
":MathDialect",
":Pass",
":SCFDialect",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
":VectorUtils",
"//llvm:Support",
@@ -8478,6 +8280,23 @@ gentbl_cc_library(
deps = [":BytecodeOpInterfaceTdFiles"],
)
+cc_library(
+ name = "BytecodeOpInterface",
+ srcs = ["lib/Bytecode/BytecodeOpInterface.cpp"],
+ hdrs = [
+ "include/mlir/Bytecode/BytecodeImplementation.h",
+ "include/mlir/Bytecode/BytecodeOpInterface.h",
+ "include/mlir/Bytecode/Encoding.h",
+ ],
+ includes = ["include"],
+ deps = [
+ ":BytecodeOpInterfaceIncGen",
+ ":IR",
+ ":Support",
+ "//llvm:Support",
+ ],
+)
+
gentbl_cc_library(
name = "CallOpInterfacesIncGen",
tbl_outs = [
@@ -8532,7 +8351,6 @@ cc_library(
deps = [
":CastInterfacesIncGen",
":IR",
- "//llvm:Support",
],
)
@@ -8646,9 +8464,7 @@ cc_library(
srcs = glob(
[
"lib/Analysis/*.cpp",
- "lib/Analysis/*.h",
"lib/Analysis/*/*.cpp",
- "lib/Analysis/*/*.h",
],
),
hdrs = glob(
@@ -8680,16 +8496,25 @@ cc_library(
)
cc_library(
+ name = "ParseUtilities",
+ hdrs = ["include/mlir/Tools/ParseUtilities.h"],
+ includes = ["include"],
+ deps = [
+ ":IR",
+ ":Parser",
+ ],
+)
+
+cc_library(
name = "TranslateLib",
srcs = glob([
"lib/Tools/mlir-translate/*.cpp",
- ]) + [
- "include/mlir/Tools/ParseUtilities.h",
- ],
+ ]),
hdrs = glob(["include/mlir/Tools/mlir-translate/*.h"]),
includes = ["include"],
deps = [
":IR",
+ ":ParseUtilities",
":Parser",
":Support",
"//llvm:Support",
@@ -8699,6 +8524,7 @@ cc_library(
cc_library(
name = "ToLLVMIRTranslation",
srcs = [
+ "lib/Target/LLVMIR/AttrKindDetail.h",
"lib/Target/LLVMIR/DebugTranslation.cpp",
"lib/Target/LLVMIR/DebugTranslation.h",
"lib/Target/LLVMIR/LoopAnnotationTranslation.cpp",
@@ -8711,7 +8537,6 @@ cc_library(
"include/mlir/Target/LLVMIR/LLVMTranslationInterface.h",
"include/mlir/Target/LLVMIR/ModuleTranslation.h",
"include/mlir/Target/LLVMIR/TypeToLLVM.h",
- "lib/Target/LLVMIR/AttrKindDetail.h",
],
includes = ["include"],
deps = [
@@ -8877,8 +8702,6 @@ cc_library(
deps = [
":IR",
":ToLLVMIRTranslation",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -8954,7 +8777,6 @@ cc_library(
":IR",
":LLVMDialect",
":OpenACCDialect",
- ":OpenACCToLLVM",
":OpenMPCommon",
":Support",
":ToLLVMIRTranslation",
@@ -8977,6 +8799,7 @@ cc_library(
":OpenMPDialect",
":Support",
":ToLLVMIRTranslation",
+ ":TransformUtils",
":Transforms",
"//llvm:Core",
"//llvm:FrontendOpenMP",
@@ -9033,9 +8856,11 @@ cc_library(
cc_library(
name = "FromLLVMIRTranslation",
srcs = [
+ "lib/Target/LLVMIR/AttrKindDetail.h",
"lib/Target/LLVMIR/DataLayoutImporter.cpp",
"lib/Target/LLVMIR/DataLayoutImporter.h",
"lib/Target/LLVMIR/DebugImporter.cpp",
+ "lib/Target/LLVMIR/DebugImporter.h",
"lib/Target/LLVMIR/LoopAnnotationImporter.cpp",
"lib/Target/LLVMIR/LoopAnnotationImporter.h",
"lib/Target/LLVMIR/ModuleImport.cpp",
@@ -9046,12 +8871,11 @@ cc_library(
"include/mlir/Target/LLVMIR/LLVMImportInterface.h",
"include/mlir/Target/LLVMIR/ModuleImport.h",
"include/mlir/Target/LLVMIR/TypeFromLLVM.h",
- "lib/Target/LLVMIR/AttrKindDetail.h",
- "lib/Target/LLVMIR/DebugImporter.h",
],
includes = ["include"],
deps = [
":DLTIDialect",
+ ":DataLayoutInterfaces",
":IR",
":LLVMConversionIncGen",
":LLVMDialect",
@@ -9145,7 +8969,6 @@ cc_library(
includes = ["include"],
deps = [
":IR",
- ":Pass",
":Support",
"//llvm:Support",
],
@@ -9153,18 +8976,15 @@ cc_library(
cc_library(
name = "MlirOptLib",
- srcs = [
- "include/mlir/Tools/ParseUtilities.h",
- "lib/Tools/mlir-opt/MlirOptMain.cpp",
- ],
+ srcs = ["lib/Tools/mlir-opt/MlirOptMain.cpp"],
hdrs = ["include/mlir/Tools/mlir-opt/MlirOptMain.h"],
includes = ["include"],
deps = [
- ":BytecodeReader",
":BytecodeWriter",
":Debug",
":IR",
":IRDLDialect",
+ ":ParseUtilities",
":Parser",
":Pass",
":PluginsLib",
@@ -9319,7 +9139,6 @@ cc_library(
":SCFToGPU",
":SCFTransformOps",
":SCFTransforms",
- ":SDBM",
":SPIRVDialect",
":SPIRVPassIncGen",
":SPIRVTarget",
@@ -9430,7 +9249,6 @@ cc_binary(
"//mlir/test:TestTosaDialect",
"//mlir/test:TestTransformDialect",
"//mlir/test:TestTransforms",
- "//mlir/test:TestTypeDialect",
"//mlir/test:TestVector",
"//mlir/test:TestVectorToSPIRV",
],
@@ -9439,10 +9257,7 @@ cc_binary(
cc_library(
name = "MlirJitRunner",
srcs = ["lib/ExecutionEngine/JitRunner.cpp"],
- hdrs = [
- "include/mlir/ExecutionEngine/JitRunner.h",
- "include/mlir/Tools/ParseUtilities.h",
- ],
+ hdrs = ["include/mlir/ExecutionEngine/JitRunner.h"],
includes = ["include"],
deps = [
":AllPassesAndDialects",
@@ -9453,6 +9268,7 @@ cc_library(
":LLVMToLLVMIRTranslation",
":OpenACCToLLVMIRTranslation",
":OpenMPToLLVMIRTranslation",
+ ":ParseUtilities",
":Parser",
":SCFToControlFlow",
":Support",
@@ -9586,7 +9402,6 @@ cc_library(
deps = [
":mlir_c_runner_utils",
":mlir_float16_utils",
- "//llvm:Support",
],
)
@@ -9695,13 +9510,7 @@ cc_library(
"manual", # External dependency
],
deps = [
- ":FuncDialect",
- ":IR",
- ":Pass",
- ":SPIRVDialect",
- ":SideEffectInterfaces",
":Support",
- "//llvm:Support",
"@vulkan_headers",
"@vulkan_sdk//:sdk",
],
@@ -9881,7 +9690,6 @@ cc_library(
":AtomicInterfacesIncGen",
":ControlFlowInterfaces",
":IR",
- "//llvm:Support",
],
)
@@ -10048,7 +9856,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/OpenACC/IR/*.cpp",
- "lib/Dialect/OpenACC/IR/*.h",
],
),
hdrs = glob(
@@ -10067,6 +9874,7 @@ cc_library(
deps = [
":AtomicInterfaces",
":AtomicInterfacesIncGen",
+ ":BytecodeOpInterface",
":ControlFlowInterfaces",
":IR",
":LLVMDialect",
@@ -10076,7 +9884,8 @@ cc_library(
":OpenACCOpsInterfacesIncGen",
":OpenACCTypeInterfacesIncGen",
":OpenACCTypesIncGen",
- ":Transforms",
+ ":SideEffectInterfaces",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -10102,14 +9911,12 @@ cc_library(
srcs = glob(
[
"lib/Dialect/OpenACC/Transforms/*.cpp",
- "lib/Dialect/OpenACC/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/OpenACC/Transforms/*.h"]),
includes = ["include"],
deps = [
":FuncDialect",
- ":LLVMIRTransforms",
":OpenACCDialect",
":OpenACCPassIncGen",
":Pass",
@@ -10266,7 +10073,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/OpenMP/IR/*.cpp",
- "lib/Dialect/OpenMP/IR/*.h",
],
),
hdrs = glob(
@@ -10292,6 +10098,7 @@ cc_library(
":OpenMPInterfacesIncGen",
":OpenMPOpsIncGen",
":OpenMPTypeInterfacesIncGen",
+ ":SideEffectInterfaces",
":Support",
"//llvm:FrontendOpenMP",
"//llvm:Support",
@@ -10302,7 +10109,6 @@ cc_library(
name = "OpenACCToSCF",
srcs = glob([
"lib/Conversion/OpenACCToSCF/*.cpp",
- "lib/Conversion/OpenACCToSCF/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/OpenACCToSCF/*.h",
@@ -10311,38 +10117,11 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":OpenACCDialect",
- ":OpenACCOpsIncGen",
- ":OpenACCTypesIncGen",
":Pass",
":SCFDialect",
- ":Transforms",
- ],
-)
-
-cc_library(
- name = "OpenACCToLLVM",
- srcs = glob([
- "lib/Conversion/OpenACCToLLVM/*.cpp",
- "lib/Conversion/OpenACCToLLVM/*.h",
- ]),
- hdrs = glob([
- "include/mlir/Conversion/OpenACCToLLVM/*.h",
- ]),
- includes = ["include"],
- deps = [
- ":ConversionPassIncGen",
- ":FuncDialect",
- ":IR",
- ":LLVMCommonConversion",
- ":LLVMDialect",
- ":OpenACCDialect",
- ":Pass",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
+ ":TransformUtils",
],
)
@@ -10350,7 +10129,6 @@ cc_library(
name = "OpenMPToLLVM",
srcs = glob([
"lib/Conversion/OpenMPToLLVM/*.cpp",
- "lib/Conversion/OpenMPToLLVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/OpenMPToLLVM/*.h",
@@ -10360,17 +10138,12 @@ cc_library(
":ArithToLLVM",
":ControlFlowToLLVM",
":ConversionPassIncGen",
- ":FuncDialect",
":FuncToLLVM",
- ":IR",
":LLVMCommonConversion",
":LLVMDialect",
":MemRefToLLVM",
":OpenMPDialect",
":Pass",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -10455,16 +10228,13 @@ cc_library(
],
includes = ["include"],
deps = [
- ":ArithDialect",
- ":FuncDialect",
+ ":BytecodeOpInterface",
":IR",
":InferTypeOpInterface",
- ":Pass",
":QuantDialectBytecodeGen",
":QuantOpsIncGen",
":SideEffectInterfaces",
":Support",
- ":TransformUtils",
"//llvm:Support",
],
)
@@ -10560,25 +10330,18 @@ cc_library(
name = "IndexToLLVM",
srcs = glob([
"lib/Conversion/IndexToLLVM/*.cpp",
- "lib/Conversion/IndexToLLVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/IndexToLLVM/*.h",
]),
includes = ["include"],
deps = [
- ":Analysis",
":ConversionPassIncGen",
":ConvertToLLVMInterface",
- ":IR",
":IndexDialect",
":LLVMCommonConversion",
":LLVMDialect",
":Pass",
- ":Support",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -10586,7 +10349,6 @@ cc_library(
name = "IndexToSPIRV",
srcs = glob([
"lib/Conversion/IndexToSPIRV/*.cpp",
- "lib/Conversion/IndexToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/IndexToSPIRV/*.h",
@@ -10594,16 +10356,11 @@ cc_library(
includes = ["include"],
deps = [
":ConversionPassIncGen",
- ":IR",
":IndexDialect",
":Pass",
":SPIRVCommonConversion",
":SPIRVConversion",
":SPIRVDialect",
- ":Support",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -10613,6 +10370,7 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/Index/IR/*.h"]),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":CastInterfaces",
":ConvertToLLVMInterface",
":IR",
@@ -10621,6 +10379,7 @@ cc_library(
":InferIntRangeCommon",
":InferIntRangeInterface",
":InferTypeOpInterface",
+ ":SideEffectInterfaces",
"//llvm:Support",
],
)
@@ -10906,6 +10665,36 @@ gentbl_cc_library(
)
td_library(
+ name = "BufferViewFlowOpInterfaceTdFiles",
+ srcs = [
+ "include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td",
+ ],
+ includes = ["include"],
+ deps = [
+ ":OpBaseTdFiles",
+ ],
+)
+
+gentbl_cc_library(
+ name = "BufferViewFlowOpInterfaceIncGen",
+ tbl_outs = [
+ (
+ ["-gen-op-interface-decls"],
+ "include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h.inc",
+ ),
+ (
+ ["-gen-op-interface-defs"],
+ "include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp.inc",
+ ),
+ ],
+ tblgen = ":mlir-tblgen",
+ td_file = "include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td",
+ deps = [
+ ":BufferViewFlowOpInterfaceTdFiles",
+ ],
+)
+
+td_library(
name = "SubsetOpInterfaceTdFiles",
srcs = [
"include/mlir/Interfaces/SubsetOpInterface.td",
@@ -10944,9 +10733,7 @@ cc_library(
":DestinationStyleOpInterface",
":IR",
":SubsetOpInterfaceIncGen",
- ":Support",
":ValueBoundsOpInterface",
- "//llvm:Support",
],
)
@@ -10977,7 +10764,6 @@ cc_library(
name = "LinalgToStandard",
srcs = glob([
"lib/Conversion/LinalgToStandard/*.cpp",
- "lib/Conversion/LinalgToStandard/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/LinalgToStandard/*.h",
@@ -10987,17 +10773,13 @@ cc_library(
":AffineDialect",
":ConversionPassIncGen",
":FuncDialect",
- ":IR",
":LLVMDialect",
":LinalgDialect",
":LinalgTransforms",
":MemRefDialect",
":Pass",
":SCFDialect",
- ":Support",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
+ ":TransformUtils",
],
)
@@ -11011,14 +10793,13 @@ cc_library(
":ArithDialect",
":ArithUtils",
":AsmParser",
- ":BufferizationDialect",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
":ComplexDialect",
":ControlFlowInterfaces",
":CopyOpInterface",
":DestinationStyleOpInterface",
":DialectUtils",
- ":FuncDialect",
":FunctionInterfaces",
":IR",
":InferTypeOpInterface",
@@ -11123,7 +10904,6 @@ cc_library(
name = "LinalgUtils",
srcs = glob([
"lib/Dialect/Linalg/Utils/*.cpp",
- "lib/Dialect/Linalg/Utils/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/Linalg/Utils/*.h",
@@ -11152,7 +10932,6 @@ cc_library(
name = "LinalgTransforms",
srcs = glob([
"lib/Dialect/Linalg/Transforms/*.cpp",
- "lib/Dialect/Linalg/Transforms/*.h",
]),
hdrs = [
"include/mlir/Dialect/Linalg/Passes.h",
@@ -11167,7 +10946,6 @@ cc_library(
":AffineUtils",
":Analysis",
":ArithDialect",
- ":ArithTransforms",
":ArithUtils",
":BufferizationDialect",
":BufferizationInterfaces",
@@ -11184,9 +10962,7 @@ cc_library(
":LinalgPassIncGen",
":LinalgStructuredOpsIncGen",
":LinalgUtils",
- ":LoopLikeInterface",
":MaskableOpInterface",
- ":MathDialect",
":MemRefDialect",
":MemRefTransforms",
":MeshDialect",
@@ -11205,7 +10981,6 @@ cc_library(
":TensorUtils",
":TilingInterface",
":TransformUtils",
- ":Transforms",
":ValueBoundsOpInterface",
":VectorDialect",
":VectorToSCF",
@@ -11255,7 +11030,6 @@ cc_library(
":Analysis",
":DestinationStyleOpInterface",
":IR",
- ":Support",
":ValueBoundsOpInterfaceIncGen",
":ViewLikeInterface",
"//llvm:Support",
@@ -11269,7 +11043,6 @@ cc_library(
includes = ["include"],
deps = [
":ArithDialect",
- ":IR",
":ValueBoundsOpInterface",
],
)
@@ -11285,7 +11058,6 @@ cc_library(
":Support",
":TilingInterfaceIncGen",
":ViewLikeInterface",
- "//llvm:Support",
],
)
@@ -11482,8 +11254,6 @@ cc_library(
":IR",
":MaskableOpInterfaceIncGen",
":MaskingOpInterface",
- ":Support",
- "//llvm:Support",
],
)
@@ -11495,8 +11265,6 @@ cc_library(
deps = [
":IR",
":MaskingOpInterfaceIncGen",
- ":Support",
- "//llvm:Support",
],
)
@@ -11504,7 +11272,6 @@ cc_library(
name = "VectorToLLVM",
srcs = glob([
"lib/Conversion/VectorToLLVM/*.cpp",
- "lib/Conversion/VectorToLLVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/VectorToLLVM/*.h",
@@ -11517,12 +11284,9 @@ cc_library(
":ArithDialect",
":ArithUtils",
":ArmNeonDialect",
- ":ArmSMEDialect",
- ":ArmSMETransforms",
":ArmSVEDialect",
":ArmSVETransforms",
":ConversionPassIncGen",
- ":DialectUtils",
":FuncDialect",
":IR",
":LLVMCommonConversion",
@@ -11530,14 +11294,12 @@ cc_library(
":MaskableOpInterface",
":MemRefDialect",
":Pass",
- ":Support",
":ToLLVMIRTranslation",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
":VectorTransforms",
":X86VectorDialect",
":X86VectorTransforms",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -11546,7 +11308,6 @@ cc_library(
name = "VectorToArmSME",
srcs = glob([
"lib/Conversion/VectorToArmSME/*.cpp",
- "lib/Conversion/VectorToArmSME/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/VectorToArmSME/*.h",
@@ -11558,7 +11319,7 @@ cc_library(
":IR",
":MemRefDialect",
":Pass",
- ":Transforms",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -11567,7 +11328,6 @@ cc_library(
name = "VectorToGPU",
srcs = glob([
"lib/Conversion/VectorToGPU/*.cpp",
- "lib/Conversion/VectorToGPU/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/VectorToGPU/*.h",
@@ -11579,23 +11339,19 @@ cc_library(
":ArithDialect",
":ConversionPassIncGen",
":DialectUtils",
- ":FuncDialect",
- ":FuncToLLVM",
":GPUDialect",
":IR",
- ":LLVMDialect",
":MemRefDialect",
":NVGPUDialect",
":NVGPUUtils",
- ":NVVMDialect",
":Pass",
":SCFDialect",
":Support",
+ ":TransformUtils",
":Transforms",
":VectorDialect",
":VectorTransforms",
":VectorUtils",
- "//llvm:Core",
"//llvm:Support",
],
)
@@ -11604,7 +11360,6 @@ cc_library(
name = "VectorToSCF",
srcs = glob([
"lib/Conversion/VectorToSCF/*.cpp",
- "lib/Conversion/VectorToSCF/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/VectorToSCF/*.h",
@@ -11612,23 +11367,17 @@ cc_library(
includes = ["include"],
deps = [
":AffineDialect",
- ":AffineUtils",
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
- ":FuncToLLVM",
":IR",
- ":LLVMDialect",
":MemRefDialect",
":Pass",
":SCFDialect",
- ":Support",
":TensorDialect",
+ ":TransformUtils",
":Transforms",
":VectorDialect",
":VectorTransforms",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -11754,7 +11503,6 @@ cc_library(
name = "TosaDialect",
srcs = glob([
"lib/Dialect/Tosa/IR/*.cpp",
- "lib/Dialect/Tosa/IR/*.h",
"lib/Dialect/Tosa/Utils/*.cpp",
"lib/Dialect/Tosa/Transforms/*.cpp",
]),
@@ -11765,8 +11513,8 @@ cc_library(
]),
includes = ["include"],
deps = [
- ":Analysis",
":ArithDialect",
+ ":BytecodeOpInterface",
":Dialect",
":DialectUtils",
":FuncDialect",
@@ -11778,6 +11526,7 @@ cc_library(
":MeshShardingInterface",
":Pass",
":QuantOps",
+ ":SideEffectInterfaces",
":Support",
":TensorDialect",
":TosaDialectBytecodeGen",
@@ -11794,7 +11543,6 @@ cc_library(
name = "TosaToArith",
srcs = glob([
"lib/Conversion/TosaToArith/*.cpp",
- "lib/Conversion/TosaToArith/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TosaToArith/*.h",
@@ -11806,11 +11554,10 @@ cc_library(
deps = [
":ArithDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":Pass",
":TosaDialect",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -11818,7 +11565,6 @@ cc_library(
name = "TosaToLinalg",
srcs = glob([
"lib/Conversion/TosaToLinalg/*.cpp",
- "lib/Conversion/TosaToLinalg/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TosaToLinalg/*.h",
@@ -11835,13 +11581,13 @@ cc_library(
":FuncDialect",
":IR",
":LinalgDialect",
- ":LinalgUtils",
":MathDialect",
":Pass",
":SCFDialect",
":TensorDialect",
":TensorUtils",
":TosaDialect",
+ ":TransformUtils",
":Transforms",
"//llvm:Support",
],
@@ -11851,7 +11597,6 @@ cc_library(
name = "TosaToMLProgram",
srcs = glob([
"lib/Conversion/TosaToMLProgram/*.cpp",
- "lib/Conversion/TosaToMLProgram/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TosaToMLProgram/*.h",
@@ -11862,12 +11607,11 @@ cc_library(
],
deps = [
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":MLProgramDialect",
":Pass",
":TosaDialect",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -11875,7 +11619,6 @@ cc_library(
name = "TosaToSCF",
srcs = glob([
"lib/Conversion/TosaToSCF/*.cpp",
- "lib/Conversion/TosaToSCF/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TosaToSCF/*.h",
@@ -11892,7 +11635,7 @@ cc_library(
":SCFDialect",
":TensorDialect",
":TosaDialect",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -11900,7 +11643,6 @@ cc_library(
name = "TosaToTensor",
srcs = glob([
"lib/Conversion/TosaToTensor/*.cpp",
- "lib/Conversion/TosaToTensor/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/TosaToTensor/*.h",
@@ -11913,13 +11655,12 @@ cc_library(
":ArithDialect",
":ArithUtils",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":Pass",
":TensorDialect",
":TensorUtils",
":TosaDialect",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -11973,17 +11714,17 @@ gentbl_cc_library(
[
"-gen-op-interface-decls",
],
- "include/mlir/Dialect/Transform/IR/MatchInterfaces.h.inc",
+ "include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.h.inc",
),
(
[
"-gen-op-interface-defs",
],
- "include/mlir/Dialect/Transform/IR/MatchInterfaces.cpp.inc",
+ "include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.cpp.inc",
),
],
tblgen = ":mlir-tblgen",
- td_file = "include/mlir/Dialect/Transform/IR/MatchInterfaces.td",
+ td_file = "include/mlir/Dialect/Transform/Interfaces/MatchInterfaces.td",
deps = [
":TransformDialectInterfacesIncGen",
":TransformDialectTdFiles",
@@ -12093,7 +11834,9 @@ cc_library(
":SideEffectInterfaces",
":Support",
":TransformDialectInterfacesIncGen",
+ ":TransformDialectMatchInterfacesIncGen",
":TransformDialectUtils",
+ ":TransformUtils",
":Transforms",
"//llvm:Support",
],
@@ -12121,10 +11864,10 @@ cc_library(
":TransformDialectEnumsIncGen",
":TransformDialectIncGen",
":TransformDialectInterfaces",
- ":TransformDialectMatchInterfacesIncGen",
":TransformDialectUtils",
":TransformOpsIncGen",
":TransformTypesIncGen",
+ ":TransformUtils",
":Transforms",
"//llvm:Support",
],
@@ -12263,6 +12006,7 @@ cc_library(
":TransformDialect",
":TransformDialectInterfaces",
":TransformLoopExtensionOpsIncGen",
+ ":TransformUtils",
":Transforms",
"//llvm:Support",
],
@@ -12318,7 +12062,6 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/Transform/Utils/*.h"]),
includes = ["include"],
deps = [
- ":DialectUtils",
":IR",
":Support",
":ViewLikeInterface",
@@ -12405,13 +12148,13 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Complex/IR/*.cpp",
- "lib/Dialect/Complex/IR/*.h",
],
),
hdrs = ["include/mlir/Dialect/Complex/IR/Complex.h"],
includes = ["include"],
deps = [
":ArithDialect",
+ ":BytecodeOpInterface",
":ComplexAttributesIncGen",
":ComplexBaseIncGen",
":ComplexOpsIncGen",
@@ -12428,7 +12171,6 @@ cc_library(
name = "ComplexToLLVM",
srcs = glob([
"lib/Conversion/ComplexToLLVM/*.cpp",
- "lib/Conversion/ComplexToLLVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ComplexToLLVM/*.h",
@@ -12440,15 +12182,9 @@ cc_library(
":ComplexDialect",
":ConversionPassIncGen",
":ConvertToLLVMInterface",
- ":FuncDialect",
- ":IR",
":LLVMCommonConversion",
":LLVMDialect",
":Pass",
- ":Support",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -12456,7 +12192,6 @@ cc_library(
name = "ComplexToLibm",
srcs = glob([
"lib/Conversion/ComplexToLibm/*.cpp",
- "lib/Conversion/ComplexToLibm/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ComplexToLibm/*.h",
@@ -12465,14 +12200,10 @@ cc_library(
deps = [
":ComplexDialect",
":ConversionPassIncGen",
- ":DialectUtils",
":FuncDialect",
":IR",
":Pass",
- ":Support",
- ":Transforms",
- "//llvm:Core",
- "//llvm:Support",
+ ":TransformUtils",
],
)
@@ -12480,7 +12211,6 @@ cc_library(
name = "ComplexToSPIRV",
srcs = glob([
"lib/Conversion/ComplexToSPIRV/*.cpp",
- "lib/Conversion/ComplexToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ComplexToSPIRV/*.h",
@@ -12489,14 +12219,10 @@ cc_library(
deps = [
":ComplexDialect",
":ConversionPassIncGen",
- ":IR",
":Pass",
- ":SPIRVCommonConversion",
":SPIRVConversion",
":SPIRVDialect",
- ":Support",
- ":Transforms",
- "//llvm:Core",
+ ":TransformUtils",
"//llvm:Support",
],
)
@@ -12505,7 +12231,6 @@ cc_library(
name = "ComplexToStandard",
srcs = glob([
"lib/Conversion/ComplexToStandard/*.cpp",
- "lib/Conversion/ComplexToStandard/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/ComplexToStandard/*.h",
@@ -12515,11 +12240,10 @@ cc_library(
":ArithDialect",
":ComplexDialect",
":ConversionPassIncGen",
- ":FuncDialect",
":IR",
":MathDialect",
":Pass",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -12681,6 +12405,8 @@ cc_library(
":ArithOpsIncGen",
":ArithOpsInterfacesIncGen",
":BufferizationInterfaces",
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":CastInterfaces",
":CommonFolders",
":ControlFlowInterfaces",
@@ -12692,6 +12418,7 @@ cc_library(
":InferTypeOpInterface",
":InliningUtils",
":Pass",
+ ":SideEffectInterfaces",
":Support",
":UBDialect",
":ValueBoundsOpInterfaceIncGen",
@@ -12720,7 +12447,6 @@ cc_library(
name = "ArithTransforms",
srcs = glob([
"lib/Dialect/Arith/Transforms/*.cpp",
- "lib/Dialect/Arith/Transforms/*.h",
]),
hdrs = glob([
"include/mlir/Dialect/Arith/Transforms/*.h",
@@ -12737,13 +12463,11 @@ cc_library(
":FuncDialect",
":FuncTransforms",
":IR",
- ":InferIntRangeInterface",
":MemRefDialect",
":Pass",
":Support",
":TensorDialect",
":TransformUtils",
- ":Transforms",
":ValueBoundsOpInterface",
":VectorDialect",
"//llvm:Support",
@@ -12842,7 +12566,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Math/IR/*.cpp",
- "lib/Dialect/Math/IR/*.h",
],
),
hdrs = [
@@ -12851,6 +12574,7 @@ cc_library(
includes = ["include"],
deps = [
":ArithDialect",
+ ":BytecodeOpInterface",
":CommonFolders",
":ConvertToLLVMInterface",
":IR",
@@ -12861,7 +12585,6 @@ cc_library(
":SideEffectInterfaces",
":UBDialect",
":VectorInterfaces",
- "//llvm:Support",
],
)
@@ -12869,7 +12592,6 @@ cc_library(
name = "MathTransforms",
srcs = glob([
"lib/Dialect/Math/Transforms/*.cpp",
- "lib/Dialect/Math/Transforms/*.h",
]),
hdrs = glob(["include/mlir/Dialect/Math/Transforms/*.h"]),
includes = ["include"],
@@ -12881,7 +12603,7 @@ cc_library(
":MathPassIncGen",
":Pass",
":SCFDialect",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
":VectorUtils",
":X86VectorDialect",
@@ -12893,7 +12615,6 @@ cc_library(
name = "MathToLibm",
srcs = glob([
"lib/Conversion/MathToLibm/*.cpp",
- "lib/Conversion/MathToLibm/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/MathToLibm/*.h",
@@ -12908,12 +12629,8 @@ cc_library(
":LLVMDialect",
":MathDialect",
":Pass",
- ":Support",
- ":Transforms",
+ ":TransformUtils",
":VectorDialect",
- ":VectorUtils",
- "//llvm:Core",
- "//llvm:Support",
],
)
@@ -12984,7 +12701,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/MemRef/IR/*.cpp",
- "lib/Dialect/MemRef/IR/*.h",
],
),
hdrs = [
@@ -12997,9 +12713,9 @@ cc_library(
":AllocationOpInterface",
":ArithDialect",
":ArithUtils",
- ":BufferizationInterfaces",
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":CastInterfaces",
- ":ComplexDialect",
":ControlFlowInterfaces",
":ConvertToLLVMInterface",
":CopyOpInterface",
@@ -13012,11 +12728,11 @@ cc_library(
":MemorySlotInterfaces",
":RuntimeVerifiableOpInterface",
":ShapedOpInterfaces",
+ ":SideEffectInterfaces",
":Support",
":ValueBoundsOpInterface",
":ViewLikeInterface",
"//llvm:Support",
- "//llvm:TargetParser",
],
)
@@ -13060,7 +12776,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/MemRef/Transforms/*.cpp",
- "lib/Dialect/MemRef/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/MemRef/Transforms/*.h"]),
@@ -13074,6 +12789,7 @@ cc_library(
":ArithTransforms",
":ArithUtils",
":BufferizationDialect",
+ ":BufferizationInterfaces",
":ControlFlowDialect",
":DialectUtils",
":FuncDialect",
@@ -13090,7 +12806,7 @@ cc_library(
":SCFDialect",
":Support",
":TensorDialect",
- ":Transforms",
+ ":TransformUtils",
":ValueBoundsOpInterface",
":VectorDialect",
"//llvm:Support",
@@ -13136,6 +12852,7 @@ cc_library(
":AffineDialect",
":Analysis",
":ArithDialect",
+ ":BytecodeOpInterface",
":IR",
":LLVMCommonConversion",
":LoopLikeInterface",
@@ -13259,6 +12976,8 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/MLProgram/IR/*.h"]),
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
+ ":CallOpInterfaces",
":ControlFlowInterfaces",
":FunctionInterfaces",
":IR",
@@ -13266,9 +12985,7 @@ cc_library(
":MLProgramAttributesIncGen",
":MLProgramOpsIncGen",
":MLProgramTypesIncGen",
- ":Pass",
- ":Support",
- ":Transforms",
+ ":SideEffectInterfaces",
"//llvm:Support",
],
)
@@ -13283,7 +13000,6 @@ cc_library(
]),
includes = ["include"],
deps = [
- ":BufferizationDialect",
":BufferizationInterfaces",
":FuncDialect",
":IR",
@@ -13403,14 +13119,12 @@ cc_library(
hdrs = glob(["include/mlir/Dialect/MPI/IR/*.h"]),
includes = ["include"],
deps = [
- ":Dialect",
+ ":BytecodeOpInterface",
":IR",
- ":InferTypeOpInterface",
":MPIAttrsIncGen",
":MPIIncGen",
":MPIOpsIncGen",
":MPITypesIncGen",
- ":SideEffectInterfaces",
"//llvm:Support",
],
)
@@ -13464,6 +13178,7 @@ td_library(
includes = ["include"],
deps = [
":AllocationOpInterfaceTdFiles",
+ ":BufferViewFlowOpInterfaceTdFiles",
":BufferizableOpInterfaceTdFiles",
":CopyOpInterfaceTdFiles",
":DestinationStyleOpInterfaceTdFiles",
@@ -13571,16 +13286,14 @@ cc_library(
":BufferizationInterfaces",
":BufferizationTransformOpsIncGen",
":BufferizationTransforms",
+ ":BytecodeOpInterface",
":FunctionInterfaces",
":IR",
":LinalgDialect",
":MemRefDialect",
- ":Parser",
- ":SideEffectInterfaces",
":TensorDialect",
":TransformDialect",
":TransformDialectInterfaces",
- "//llvm:Support",
],
)
@@ -13610,11 +13323,13 @@ cc_library(
],
hdrs = [
"include/mlir/Dialect/Bufferization/IR/BufferDeallocationOpInterface.h",
+ "include/mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.h",
"include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h",
],
includes = ["include"],
deps = [
":BufferDeallocationOpInterfaceIncGen",
+ ":BufferViewFlowOpInterfaceIncGen",
":BufferizableOpInterfaceIncGen",
":BufferizationEnumsIncGen",
":IR",
@@ -13627,6 +13342,7 @@ cc_library(
name = "BufferizationDialect",
srcs = [
"lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp",
+ "lib/Dialect/Bufferization/IR/BufferViewFlowOpInterface.cpp",
"lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp",
"lib/Dialect/Bufferization/IR/BufferizationDialect.cpp",
"lib/Dialect/Bufferization/IR/BufferizationOps.cpp",
@@ -13641,13 +13357,14 @@ cc_library(
deps = [
":AffineDialect",
":AllocationOpInterface",
- ":Analysis",
":ArithDialect",
":BufferDeallocationOpInterfaceIncGen",
+ ":BufferViewFlowOpInterfaceIncGen",
":BufferizableOpInterfaceIncGen",
":BufferizationBaseIncGen",
":BufferizationInterfaces",
":BufferizationOpsIncGen",
+ ":BytecodeOpInterface",
":ControlFlowInterfaces",
":CopyOpInterface",
":DestinationStyleOpInterface",
@@ -13659,7 +13376,6 @@ cc_library(
":MemRefDialect",
":SparseTensorDialect",
":SubsetOpInterface",
- ":Support",
":TensorDialect",
"//llvm:Support",
],
@@ -13686,7 +13402,6 @@ cc_library(
srcs = glob(
[
"lib/Dialect/Bufferization/Transforms/*.cpp",
- "lib/Dialect/Bufferization/Transforms/*.h",
],
),
hdrs = glob(["include/mlir/Dialect/Bufferization/Transforms/*.h"]),
@@ -13698,9 +13413,11 @@ cc_library(
":BufferizationDialect",
":BufferizationInterfaces",
":BufferizationPassIncGen",
+ ":CallOpInterfaces",
":ControlFlowDialect",
":ControlFlowInterfaces",
":FuncDialect",
+ ":FunctionInterfaces",
":IR",
":LoopLikeInterface",
":MemRefDialect",
@@ -13711,6 +13428,7 @@ cc_library(
":SubsetOpInterface",
":Support",
":TensorDialect",
+ ":TransformUtils",
":Transforms",
":ViewLikeInterface",
"//llvm:Support",
@@ -13737,7 +13455,7 @@ cc_library(
":Pass",
":SCFDialect",
":Support",
- ":Transforms",
+ ":TransformUtils",
],
)
@@ -13747,9 +13465,7 @@ cc_library(
hdrs = ["include/mlir/Dialect/Bufferization/Pipelines/Passes.h"],
includes = ["include"],
deps = [
- ":BufferizationDialect",
":BufferizationInterfaces",
- ":BufferizationToMemRef",
":BufferizationTransforms",
":FuncDialect",
":MemRefTransforms",
@@ -13842,13 +13558,11 @@ cc_library(
cc_library(
name = "MlirReduceLib",
srcs = ["lib/Tools/mlir-reduce/MlirReduceMain.cpp"],
- hdrs = [
- "include/mlir/Tools/ParseUtilities.h",
- "include/mlir/Tools/mlir-reduce/MlirReduceMain.h",
- ],
+ hdrs = ["include/mlir/Tools/mlir-reduce/MlirReduceMain.h"],
includes = ["include"],
deps = [
":IR",
+ ":ParseUtilities",
":Parser",
":Pass",
":Reducer",
@@ -13878,7 +13592,6 @@ cc_library(
srcs = glob(
[
"lib/Tools/PDLL/ODS/*.cpp",
- "lib/Tools/PDLL/ODS/*.h",
],
),
hdrs = glob(["include/mlir/Tools/PDLL/ODS/*.h"]),
@@ -13886,7 +13599,6 @@ cc_library(
deps = [
":Support",
"//llvm:Support",
- "//llvm:TargetParser",
],
)
@@ -13903,7 +13615,6 @@ cc_library(
deps = [
":Support",
"//llvm:Support",
- "//llvm:TargetParser",
],
)
@@ -13912,7 +13623,6 @@ cc_library(
srcs = glob(
[
"lib/Tools/PDLL/CodeGen/*.cpp",
- "lib/Tools/PDLL/CodeGen/*.h",
],
),
hdrs = glob(["include/mlir/Tools/PDLL/CodeGen/*.h"]),
@@ -14059,6 +13769,7 @@ cc_library(
hdrs = ["include/mlir/Dialect/UB/IR/UBOps.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":ConvertToLLVMInterface",
":IR",
":InliningUtils",
@@ -14073,7 +13784,6 @@ cc_library(
name = "UBToLLVM",
srcs = glob([
"lib/Conversion/UBToLLVM/*.cpp",
- "lib/Conversion/UBToLLVM/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/UBToLLVM/*.h",
@@ -14094,7 +13804,6 @@ cc_library(
name = "UBToSPIRV",
srcs = glob([
"lib/Conversion/UBToSPIRV/*.cpp",
- "lib/Conversion/UBToSPIRV/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/UBToSPIRV/*.h",
@@ -14106,7 +13815,6 @@ cc_library(
":SPIRVConversion",
":SPIRVDialect",
":UBDialect",
- "//llvm:Core",
],
)
@@ -14116,6 +13824,7 @@ cc_library(
hdrs = ["include/mlir/Dialect/LLVMIR/VCIXDialect.h"],
includes = ["include"],
deps = [
+ ":BytecodeOpInterface",
":GPUDialect",
":IR",
":LLVMDialect",
diff --git a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch4/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch4/BUILD.bazel
index b8c5a59cd14d..68639df2aa2c 100644
--- a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch4/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch4/BUILD.bazel
@@ -101,6 +101,7 @@ cc_binary(
"//mlir:CastInterfaces",
"//mlir:FunctionInterfaces",
"//mlir:IR",
+ "//mlir:InliningUtils",
"//mlir:Parser",
"//mlir:Pass",
"//mlir:SideEffectInterfaces",
diff --git a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch5/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch5/BUILD.bazel
index ce48e249489d..9ce23b5d9754 100644
--- a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch5/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch5/BUILD.bazel
@@ -102,11 +102,13 @@ cc_binary(
"//mlir:Analysis",
"//mlir:ArithDialect",
"//mlir:BytecodeReader",
+ "//mlir:CallOpInterfaces",
"//mlir:CastInterfaces",
"//mlir:FuncDialect",
"//mlir:FuncExtensions",
"//mlir:FunctionInterfaces",
"//mlir:IR",
+ "//mlir:InliningUtils",
"//mlir:MemRefDialect",
"//mlir:Parser",
"//mlir:Pass",
diff --git a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch6/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch6/BUILD.bazel
index 286c08065645..86925aa0662c 100644
--- a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch6/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch6/BUILD.bazel
@@ -108,6 +108,7 @@ cc_binary(
"//mlir:ArithToLLVM",
"//mlir:BuiltinToLLVMIRTranslation",
"//mlir:BytecodeReader",
+ "//mlir:CallOpInterfaces",
"//mlir:CastInterfaces",
"//mlir:ControlFlowToLLVM",
"//mlir:ExecutionEngine",
@@ -117,6 +118,7 @@ cc_binary(
"//mlir:FuncToLLVM",
"//mlir:FunctionInterfaces",
"//mlir:IR",
+ "//mlir:InliningUtils",
"//mlir:LLVMCommonConversion",
"//mlir:LLVMDialect",
"//mlir:LLVMIRTransforms",
diff --git a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch7/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch7/BUILD.bazel
index f4037cab03f6..bf9ab79529b8 100644
--- a/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch7/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/examples/toy/Ch7/BUILD.bazel
@@ -108,6 +108,7 @@ cc_binary(
"//mlir:ArithToLLVM",
"//mlir:BuiltinToLLVMIRTranslation",
"//mlir:BytecodeReader",
+ "//mlir:CallOpInterfaces",
"//mlir:CastInterfaces",
"//mlir:ControlFlowToLLVM",
"//mlir:ExecutionEngine",
@@ -117,6 +118,7 @@ cc_binary(
"//mlir:FuncToLLVM",
"//mlir:FunctionInterfaces",
"//mlir:IR",
+ "//mlir:InliningUtils",
"//mlir:LLVMCommonConversion",
"//mlir:LLVMDialect",
"//mlir:LLVMIRTransforms",
diff --git a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
index 0c3ed22e7360..d6b0832f4c1a 100644
--- a/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/python/BUILD.bazel
@@ -493,6 +493,44 @@ filegroup(
)
##---------------------------------------------------------------------------##
+# Index dialect.
+##---------------------------------------------------------------------------##
+
+gentbl_filegroup(
+ name = "IndexOpsPyGen",
+ tbl_outs = [
+ (
+ [
+ "-gen-python-enum-bindings",
+ "-bind-dialect=index",
+ ],
+ "mlir/dialects/_index_enum_gen.py",
+ ),
+ (
+ [
+ "-gen-python-op-bindings",
+ "-bind-dialect=index",
+ ],
+ "mlir/dialects/_index_ops_gen.py",
+ ),
+ ],
+ tblgen = "//mlir:mlir-tblgen",
+ td_file = "mlir/dialects/IndexOps.td",
+ deps = [
+ "//mlir:IndexOpsTdFiles",
+ "//mlir:OpBaseTdFiles",
+ ],
+)
+
+filegroup(
+ name = "IndexOpsPyFiles",
+ srcs = [
+ "mlir/dialects/index.py",
+ ":IndexOpsPyGen",
+ ],
+)
+
+##---------------------------------------------------------------------------##
# Math dialect.
##---------------------------------------------------------------------------##
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 771cbcc4eea0..30130131c465 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -94,12 +94,14 @@ cc_library(
"//mlir:AffineAnalysis",
"//mlir:AffineDialect",
"//mlir:Analysis",
+ "//mlir:CallOpInterfaces",
"//mlir:ControlFlowInterfaces",
"//mlir:FuncDialect",
"//mlir:FunctionInterfaces",
"//mlir:IR",
"//mlir:MemRefDialect",
"//mlir:Pass",
+ "//mlir:SideEffectInterfaces",
"//mlir:Support",
],
)
@@ -356,6 +358,7 @@ cc_library(
deps = [
":TestTransformDialectExtensionIncGen",
"//llvm:Support",
+ "//mlir:BytecodeOpInterface",
"//mlir:IR",
"//mlir:PDLDialect",
"//mlir:Pass",
@@ -386,6 +389,8 @@ cc_library(
":TestTypeDefsIncGen",
"//llvm:Support",
"//mlir:ArithDialect",
+ "//mlir:BytecodeOpInterface",
+ "//mlir:CallOpInterfaces",
"//mlir:ControlFlowInterfaces",
"//mlir:CopyOpInterface",
"//mlir:DLTIDialect",
@@ -409,6 +414,7 @@ cc_library(
"//mlir:SideEffectInterfaces",
"//mlir:Support",
"//mlir:TensorDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:ViewLikeInterface",
],
@@ -436,6 +442,7 @@ cc_library(
"//llvm:Support",
"//mlir:Analysis",
"//mlir:ArithDialect",
+ "//mlir:BytecodeOpInterface",
"//mlir:BytecodeReader",
"//mlir:BytecodeWriter",
"//mlir:FuncDialect",
@@ -534,6 +541,7 @@ cc_library(
"//mlir:PDLInterpDialect",
"//mlir:Pass",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -564,6 +572,7 @@ cc_library(
"//mlir:ArithDialect",
"//mlir:FuncDialect",
"//mlir:IR",
+ "//mlir:InliningUtils",
"//mlir:MathDialect",
"//mlir:PDLDialect",
"//mlir:PDLInterpDialect",
@@ -571,7 +580,9 @@ cc_library(
"//mlir:Pass",
"//mlir:SCFDialect",
"//mlir:SPIRVDialect",
+ "//mlir:SideEffectInterfaces",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -602,6 +613,7 @@ cc_library(
"//mlir:Pass",
"//mlir:SCFDialect",
"//mlir:SCFTransforms",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -614,6 +626,7 @@ cc_library(
"//mlir:FuncDialect",
"//mlir:Pass",
"//mlir:SPIRVDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:VectorToSPIRV",
@@ -641,6 +654,7 @@ cc_library(
"//mlir:SCFDialect",
"//mlir:Support",
"//mlir:TensorDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:ValueBoundsOpInterface",
"//mlir:VectorDialect",
@@ -697,6 +711,7 @@ cc_library(
"//mlir:SCFToControlFlow",
"//mlir:SPIRVDialect",
"//mlir:ToLLVMIRTranslation",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:VectorToLLVM",
@@ -729,6 +744,7 @@ cc_library(
"//mlir:SCFTransforms",
"//mlir:TensorDialect",
"//mlir:TensorTransforms",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:VectorToSCF",
@@ -770,6 +786,7 @@ cc_library(
"//mlir:MathTransforms",
"//mlir:Pass",
"//mlir:SCFDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:X86VectorDialect",
@@ -786,6 +803,7 @@ cc_library(
"//mlir:IR",
"//mlir:MathDialect",
"//mlir:Pass",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VCIXDialect",
"//mlir:VectorDialect",
@@ -805,6 +823,7 @@ cc_library(
"//mlir:MemRefDialect",
"//mlir:MemRefTransforms",
"//mlir:Pass",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:VectorTransforms",
@@ -814,7 +833,6 @@ cc_library(
cc_library(
name = "TestMesh",
srcs = glob(["lib/Dialect/Mesh/**/*.cpp"]),
- hdrs = glob(["lib/Dialect/Mesh/**/*.h"]),
includes = ["lib/Dialect/Test"],
deps = [
":TestDialect",
@@ -827,6 +845,7 @@ cc_library(
"//mlir:Pass",
"//mlir:SPIRVDialect",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -850,6 +869,7 @@ cc_library(
"//mlir:Pass",
"//mlir:SCFDialect",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -869,6 +889,7 @@ cc_library(
"//mlir:SCFDialect",
"//mlir:SCFTransforms",
"//mlir:SCFUtils",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -884,6 +905,7 @@ cc_library(
"//mlir:LLVMDialect",
"//mlir:MathDialect",
"//mlir:Pass",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
],
@@ -900,6 +922,7 @@ cc_library(
"//mlir:IR",
"//mlir:Pass",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -945,6 +968,7 @@ cc_library(
"//mlir:LLVMCommonConversion",
"//mlir:LLVMDialect",
"//mlir:Pass",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -994,6 +1018,7 @@ cc_library(
"//mlir:FuncTransforms",
"//mlir:IR",
"//mlir:Pass",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -1011,6 +1036,7 @@ cc_library(
"//mlir:TensorTransforms",
"//mlir:TransformDialect",
"//mlir:TransformDialectInterfaces",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -1034,6 +1060,7 @@ cc_library(
"//mlir:SCFDialect",
"//mlir:Support",
"//mlir:TensorDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
"//mlir:VectorDialect",
"//mlir:VectorToSCF",
@@ -1043,18 +1070,6 @@ cc_library(
)
cc_library(
- name = "TestTypeDialect",
- srcs = glob([
- "lib/Dialect/LLVMIR/*.cpp",
- ]),
- deps = [
- ":TestDialect",
- "//mlir:IR",
- "//mlir:LLVMDialect",
- ],
-)
-
-cc_library(
name = "TestTosaDialect",
srcs = glob([
"lib/Dialect/Tosa/*.cpp",
@@ -1065,6 +1080,7 @@ cc_library(
"//mlir:Pass",
"//mlir:TensorDialect",
"//mlir:TosaDialect",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
@@ -1120,6 +1136,7 @@ cc_library(
"//mlir:Parser",
"//mlir:Pass",
"//mlir:Support",
+ "//mlir:TransformUtils",
"//mlir:Transforms",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/unittests/BUILD.bazel
index 252b9ec951f6..7172beb4de9a 100644
--- a/utils/bazel/llvm-project-overlay/mlir/unittests/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/unittests/BUILD.bazel
@@ -16,7 +16,6 @@ cc_test(
size = "small",
srcs = glob([
"Debug/*.cpp",
- "Debug/*.h",
]),
deps = [
"//llvm:Support",
@@ -35,11 +34,11 @@ cc_test(
size = "small",
srcs = glob([
"IR/*.cpp",
- "IR/*.h",
]),
deps = [
"//llvm:Support",
"//mlir:BytecodeReader",
+ "//mlir:CallOpInterfaces",
"//mlir:FunctionInterfaces",
"//mlir:IR",
"//mlir:Parser",
@@ -56,7 +55,6 @@ cc_test(
size = "small",
srcs = glob([
"Interfaces/*.cpp",
- "Interfaces/*.h",
]),
deps = [
"//llvm:Support",
@@ -81,7 +79,6 @@ cc_test(
size = "small",
srcs = glob([
"Support/*.cpp",
- "Support/*.h",
]),
deps = [
"//llvm:Support",
@@ -97,7 +94,6 @@ cc_test(
size = "small",
srcs = glob([
"Pass/*.cpp",
- "Pass/*.h",
]),
deps = [
"//llvm:Support",
@@ -117,7 +113,6 @@ cc_test(
size = "small",
srcs = glob([
"Rewrite/*.cpp",
- "Rewrite/*.h",
]),
deps = [
"//mlir:IR",
@@ -133,7 +128,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/*.cpp",
- "Dialect/*.h",
]),
deps = [
"//llvm:Support",
@@ -148,7 +142,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/MemRef/*.cpp",
- "Dialect/MemRef/*.h",
]),
deps = [
"//llvm:TestingSupport",
@@ -161,26 +154,10 @@ cc_test(
)
cc_test(
- name = "quantops_tests",
- size = "small",
- srcs = glob([
- "Dialect/Quant/*.cpp",
- "Dialect/Quant/*.h",
- ]),
- deps = [
- "//llvm:TestingSupport",
- "//mlir:QuantOps",
- "//mlir:Transforms",
- "//third-party/unittest:gtest_main",
- ],
-)
-
-cc_test(
name = "scf_tests",
size = "small",
srcs = glob([
"Dialect/SCF/*.cpp",
- "Dialect/SCF/*.h",
]),
deps = [
"//mlir:ArithDialect",
@@ -199,7 +176,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/SparseTensor/*.cpp",
- "Dialect/SparseTensor/*.h",
]),
deps = [
"//llvm:Support",
@@ -217,7 +193,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/SPIRV/*.cpp",
- "Dialect/SPIRV/*.h",
]),
deps = [
"//llvm:Support",
@@ -237,7 +212,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/Transform/*.cpp",
- "Dialect/Transform/*.h",
]),
deps = [
"//llvm:Support",
@@ -263,7 +237,6 @@ cc_test(
size = "small",
srcs = glob([
"Dialect/Utils/*.cpp",
- "Dialect/Utils/*.h",
]),
deps = [
"//llvm:Support",
@@ -316,7 +289,6 @@ cc_test(
size = "small",
srcs = glob([
"TableGen/*.cpp",
- "TableGen/*.h",
]) + [
"TableGen/EnumsGenTest.cpp.inc",
"TableGen/EnumsGenTest.h.inc",
@@ -342,7 +314,6 @@ cc_test(
size = "small",
srcs = glob([
"Transforms/*.cpp",
- "Transforms/*.h",
]),
deps = [
"//mlir:AffineAnalysis",
@@ -362,8 +333,6 @@ cc_test(
name = "analysis_tests",
size = "small",
srcs = glob([
- "Analysis/*.cpp",
- "Analysis/*.h",
"Analysis/*/*.cpp",
"Analysis/*/*.h",
]),
@@ -386,9 +355,6 @@ cc_test(
size = "small",
srcs = glob([
"Bytecode/*.cpp",
- "Bytecode/*.h",
- "Bytecode/*/*.cpp",
- "Bytecode/*/*.h",
]),
deps = [
"//llvm:Support",
@@ -407,10 +373,7 @@ cc_test(
name = "conversion_tests",
size = "small",
srcs = glob([
- "Conversion/*.cpp",
- "Conversion/*.h",
"Conversion/*/*.cpp",
- "Conversion/*/*.h",
]),
deps = [
"//mlir:ArithDialect",
diff --git a/utils/bazel/third_party_build/pfm.BUILD b/utils/bazel/third_party_build/pfm.BUILD
index fe908d4744da..dba7464d17df 100644
--- a/utils/bazel/third_party_build/pfm.BUILD
+++ b/utils/bazel/third_party_build/pfm.BUILD
@@ -14,8 +14,12 @@ make_variant(
copts = ["-w"],
lib_name = "libpfm",
lib_source = ":sources",
+ target_compatible_with = select({
+ "@platforms//os:linux": [],
+ "//conditions:default": ["@platforms//:incompatible"],
+ }),
toolchain = "@rules_foreign_cc//toolchains:preinstalled_autoconf_toolchain",
- visibility = ["//visibility:public"],
+ visibility = ["//visibility:private"],
)
alias(
@@ -27,5 +31,9 @@ alias(
cc_library(
name = "pfm_system",
linkopts = ["-lpfm"],
+ target_compatible_with = select({
+ "@platforms//os:linux": [],
+ "//conditions:default": ["@platforms//:incompatible"],
+ }),
visibility = ["//visibility:public"],
)